diff options
593 files changed, 13272 insertions, 9738 deletions
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index de79efdad46c..c3313d45f4d6 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl @@ -128,16 +128,48 @@ X!Edrivers/base/interface.c !Edrivers/base/platform.c !Edrivers/base/bus.c </sect1> - <sect1><title>Device Drivers DMA Management</title> + <sect1> + <title>Buffer Sharing and Synchronization</title> + <para> + The dma-buf subsystem provides the framework for sharing buffers + for hardware (DMA) access across multiple device drivers and + subsystems, and for synchronizing asynchronous hardware access. + </para> + <para> + This is used, for example, by drm "prime" multi-GPU support, but + is of course not limited to GPU use cases. + </para> + <para> + The three main components of this are: (1) dma-buf, representing + a sg_table and exposed to userspace as a file descriptor to allow + passing between devices, (2) fence, which provides a mechanism + to signal when one device as finished access, and (3) reservation, + which manages the shared or exclusive fence(s) associated with + the buffer. + </para> + <sect2><title>dma-buf</title> !Edrivers/dma-buf/dma-buf.c +!Iinclude/linux/dma-buf.h + </sect2> + <sect2><title>reservation</title> +!Pdrivers/dma-buf/reservation.c Reservation Object Overview +!Edrivers/dma-buf/reservation.c +!Iinclude/linux/reservation.h + </sect2> + <sect2><title>fence</title> !Edrivers/dma-buf/fence.c -!Edrivers/dma-buf/seqno-fence.c !Iinclude/linux/fence.h +!Edrivers/dma-buf/seqno-fence.c !Iinclude/linux/seqno-fence.h +!Edrivers/dma-buf/fence-array.c +!Iinclude/linux/fence-array.h !Edrivers/dma-buf/reservation.c !Iinclude/linux/reservation.h !Edrivers/dma-buf/sync_file.c !Iinclude/linux/sync_file.h + </sect2> + </sect1> + <sect1><title>Device Drivers DMA Management</title> !Edrivers/base/dma-coherent.c !Edrivers/base/dma-mapping.c </sect1> diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index 7586bf75f62e..d09536c91717 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -1018,6 +1018,11 @@ int max_width, max_height;</synopsis> </para> </sect2> <sect2> + <title>DRM Format Handling</title> +!Iinclude/drm/drm_fourcc.h +!Edrivers/gpu/drm/drm_fourcc.c + </sect2> + <sect2> <title>Dumb Buffer Objects</title> <para> The KMS API doesn't standardize backing storage object creation and @@ -1092,22 +1097,6 @@ int max_width, max_height;</synopsis> operation. </para> </sect2> - <sect2> - <title>Locking</title> - <para> - Beside some lookup structures with their own locking (which is hidden - behind the interface functions) most of the modeset state is protected - by the <code>dev-<mode_config.lock</code> mutex and additionally - per-crtc locks to allow cursor updates, pageflips and similar operations - to occur concurrently with background tasks like output detection. - Operations which cross domains like a full modeset always grab all - locks. Drivers there need to protect resources shared between crtcs with - additional locking. They also need to be careful to always grab the - relevant crtc locks if a modset functions touches crtc state, e.g. for - load detection (which does only grab the <code>mode_config.lock</code> - to allow concurrent screen updates on live crtcs). - </para> - </sect2> </sect1> <!-- Internals: kms initialization and cleanup --> @@ -1586,7 +1575,7 @@ void intel_crt_init(struct drm_device *dev) </sect3> <sect3> <title>Implementing Asynchronous Atomic Commit</title> -!Pdrivers/gpu/drm/drm_atomic_helper.c implementing async commit +!Pdrivers/gpu/drm/drm_atomic_helper.c implementing nonblocking commit </sect3> <sect3> <title>Atomic State Reset and Initialization</title> @@ -1699,6 +1688,12 @@ void intel_crt_init(struct drm_device *dev) !Edrivers/gpu/drm/drm_panel.c !Pdrivers/gpu/drm/drm_panel.c drm panel </sect2> + <sect2> + <title>Simple KMS Helper Reference</title> +!Iinclude/drm/drm_simple_kms_helper.h +!Edrivers/gpu/drm/drm_simple_kms_helper.c +!Pdrivers/gpu/drm/drm_simple_kms_helper.c overview + </sect2> </sect1> <!-- Internals: kms properties --> @@ -2845,14 +2840,7 @@ void (*disable_vblank) (struct drm_device *dev, int crtc);</synopsis> <para> Drivers must initialize the vertical blanking handling core with a call to <function>drm_vblank_init</function> in their - <methodname>load</methodname> operation. The function will set the struct - <structname>drm_device</structname> - <structfield>vblank_disable_allowed</structfield> field to 0. This will - keep vertical blanking interrupts enabled permanently until the first mode - set operation, where <structfield>vblank_disable_allowed</structfield> is - set to 1. The reason behind this is not clear. Drivers can set the field - to 1 after <function>calling drm_vblank_init</function> to make vertical - blanking interrupts dynamically managed from the beginning. + <methodname>load</methodname> operation. </para> <para> Vertical blanking interrupts can be enabled by the DRM core or by drivers diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index c6938e50e71f..4da60b463995 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -56,6 +56,7 @@ stable kernels. | ARM | MMU-500 | #841119,#826419 | N/A | | | | | | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | +| Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | Cavium | ThunderX SMMUv2 | #27704 | N/A | diff --git a/Documentation/devicetree/bindings/display/imx/ldb.txt b/Documentation/devicetree/bindings/display/imx/ldb.txt index 0a175d991b52..a407462c885e 100644 --- a/Documentation/devicetree/bindings/display/imx/ldb.txt +++ b/Documentation/devicetree/bindings/display/imx/ldb.txt @@ -62,6 +62,7 @@ Required properties: display-timings are used instead. Optional properties (required if display-timings are used): + - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing - display-timings : A node that describes the display timings as defined in Documentation/devicetree/bindings/display/display-timing.txt. - fsl,data-mapping : should be "spwg" or "jeida" diff --git a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt index 216c894d4f99..b52ac52757df 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt +++ b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt @@ -7,6 +7,8 @@ Required properties: Optional properties: - label: a symbolic name for the panel - enable-gpios: panel enable gpio +- reset-gpios: GPIO to control the RESET pin +- vcc-supply: phandle of regulator that will be used to enable power to the display Required nodes: - "panel-timing" containing video timings diff --git a/Documentation/filesystems/devpts.txt b/Documentation/filesystems/devpts.txt index 30d2fcb32f72..9f94fe276dea 100644 --- a/Documentation/filesystems/devpts.txt +++ b/Documentation/filesystems/devpts.txt @@ -1,141 +1,26 @@ +Each mount of the devpts filesystem is now distinct such that ptys +and their indicies allocated in one mount are independent from ptys +and their indicies in all other mounts. -To support containers, we now allow multiple instances of devpts filesystem, -such that indices of ptys allocated in one instance are independent of indices -allocated in other instances of devpts. +All mounts of the devpts filesystem now create a /dev/pts/ptmx node +with permissions 0000. -To preserve backward compatibility, this support for multiple instances is -enabled only if: +To retain backwards compatibility the a ptmx device node (aka any node +created with "mknod name c 5 2") when opened will look for an instance +of devpts under the name "pts" in the same directory as the ptmx device +node. - - CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and - - '-o newinstance' mount option is specified while mounting devpts - -IOW, devpts now supports both single-instance and multi-instance semantics. - -If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and -this referred to as the "legacy" mode. In this mode, the new mount options -(-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message -on console. - -If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the -'newinstance' option (as in current start-up scripts) the new mount binds -to the initial kernel mount of devpts. This mode is referred to as the -'single-instance' mode and the current, single-instance semantics are -preserved, i.e PTYs are common across the system. - -The only difference between this single-instance mode and the legacy mode -is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which -can safely be ignored. - -If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified, -the mount is considered to be in the multi-instance mode and a new instance -of the devpts fs is created. Any ptys created in this instance are independent -of ptys in other instances of devpts. Like in the single-instance mode, the -/dev/pts/ptmx node is present. To effectively use the multi-instance mode, -open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or -bind-mount. - -Eg: A container startup script could do the following: - - $ chmod 0666 /dev/pts/ptmx - $ rm /dev/ptmx - $ ln -s pts/ptmx /dev/ptmx - $ ns_exec -cm /bin/bash - - # We are now in new container - - $ umount /dev/pts - $ mount -t devpts -o newinstance lxcpts /dev/pts - $ sshd -p 1234 - -where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs -/bin/bash in the child process. A pty created by the sshd is not visible in -the original mount of /dev/pts. +As an option instead of placing a /dev/ptmx device node at /dev/ptmx +it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or +to bind mount /dev/ptx/ptmx to /dev/ptmx. If you opt for using +the devpts filesystem in this manner devpts should be mounted with +the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called. Total count of pty pairs in all instances is limited by sysctls: kernel.pty.max = 4096 - global limit -kernel.pty.reserve = 1024 - reserve for initial instance +kernel.pty.reserve = 1024 - reserved for filesystems mounted from the initial mount namespace kernel.pty.nr - current count of ptys Per-instance limit could be set by adding mount option "max=<count>". This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve. In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit. - -User-space changes ------------------- - -In multi-instance mode (i.e '-o newinstance' mount option is specified at least -once), following user-space issues should be noted. - -1. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored - and no change is needed to system-startup scripts. - -2. To effectively use multi-instance mode (i.e -o newinstance is specified) - administrators or startup scripts should "redirect" open of /dev/ptmx to - /dev/pts/ptmx using either a bind mount or symlink. - - $ mount -t devpts -o newinstance devpts /dev/pts - - followed by either - - $ rm /dev/ptmx - $ ln -s pts/ptmx /dev/ptmx - $ chmod 666 /dev/pts/ptmx - or - $ mount -o bind /dev/pts/ptmx /dev/ptmx - -3. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it - enables better error-reporting and treats both single-instance and - multi-instance mounts similarly. - - But this method requires that system-startup scripts set the mode of - /dev/pts/ptmx correctly (default mode is 0000). The scripts can set the - mode by, either - - - adding ptmxmode mount option to devpts entry in /etc/fstab, or - - using 'chmod 0666 /dev/pts/ptmx' - -4. If multi-instance mode mount is needed for containers, but the system - startup scripts have not yet been updated, container-startup scripts - should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single- - instance mounts. - - Or, in general, container-startup scripts should use: - - mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts - if [ ! -L /dev/ptmx ]; then - mount -o bind /dev/pts/ptmx /dev/ptmx - fi - - When all devpts mounts are multi-instance, /dev/ptmx can permanently be - a symlink to pts/ptmx and the bind mount can be ignored. - -5. A multi-instance mount that is not accompanied by the /dev/ptmx to - /dev/pts/ptmx redirection would result in an unusable/unreachable pty. - - mount -t devpts -o newinstance lxcpts /dev/pts - - immediately followed by: - - open("/dev/ptmx") - - would create a pty, say /dev/pts/7, in the initial kernel mount. - But /dev/pts/7 would be invisible in the new mount. - -6. The permissions for /dev/pts/ptmx node should be specified when mounting - /dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000). - - mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts - - The permissions can be later be changed as usual with 'chmod'. - - chmod 666 /dev/pts/ptmx - -7. A mount of devpts without the 'newinstance' option results in binding to - initial kernel mount. This behavior while preserving legacy semantics, - does not provide strict isolation in a container environment. i.e by - mounting devpts without the 'newinstance' option, a container could - get visibility into the 'host' or root container's devpts. - - To workaround this and have strict isolation, all mounts of devpts, - including the mount in the root container, should use the newinstance - option. diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt index 35f6a982a0d5..220d0a80ca2c 100644 --- a/Documentation/kdump/gdbmacros.txt +++ b/Documentation/kdump/gdbmacros.txt @@ -170,21 +170,92 @@ document trapinfo address the kernel panicked. end +define dump_log_idx + set $idx = $arg0 + if ($argc > 1) + set $prev_flags = $arg1 + else + set $prev_flags = 0 + end + set $msg = ((struct printk_log *) (log_buf + $idx)) + set $prefix = 1 + set $newline = 1 + set $log = log_buf + $idx + sizeof(*$msg) -define dmesg - set $i = 0 - set $end_idx = (log_end - 1) & (log_buf_len - 1) + # prev & LOG_CONT && !(msg->flags & LOG_PREIX) + if (($prev_flags & 8) && !($msg->flags & 4)) + set $prefix = 0 + end + + # msg->flags & LOG_CONT + if ($msg->flags & 8) + # (prev & LOG_CONT && !(prev & LOG_NEWLINE)) + if (($prev_flags & 8) && !($prev_flags & 2)) + set $prefix = 0 + end + # (!(msg->flags & LOG_NEWLINE)) + if (!($msg->flags & 2)) + set $newline = 0 + end + end + + if ($prefix) + printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000 + end + if ($msg->text_len != 0) + eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len + end + if ($newline) + printf "\n" + end + if ($msg->dict_len > 0) + set $dict = $log + $msg->text_len + set $idx = 0 + set $line = 1 + while ($idx < $msg->dict_len) + if ($line) + printf " " + set $line = 0 + end + set $c = $dict[$idx] + if ($c == '\0') + printf "\n" + set $line = 1 + else + if ($c < ' ' || $c >= 127 || $c == '\\') + printf "\\x%02x", $c + else + printf "%c", $c + end + end + set $idx = $idx + 1 + end + printf "\n" + end +end +document dump_log_idx + Dump a single log given its index in the log buffer. The first + parameter is the index into log_buf, the second is optional and + specified the previous log buffer's flags, used for properly + formatting continued lines. +end - while ($i < logged_chars) - set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1) +define dmesg + set $i = log_first_idx + set $end_idx = log_first_idx + set $prev_flags = 0 - if ($idx + 100 <= $end_idx) || \ - ($end_idx <= $idx && $idx + 100 < log_buf_len) - printf "%.100s", &log_buf[$idx] - set $i = $i + 100 + while (1) + set $msg = ((struct printk_log *) (log_buf + $i)) + if ($msg->len == 0) + set $i = 0 else - printf "%c", log_buf[$idx] - set $i = $i + 1 + dump_log_idx $i $prev_flags + set $i = $i + $msg->len + set $prev_flags = $msg->flags + end + if ($i == $end_idx) + loop_break end end end diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 631b0f7ae16f..9d05ed7f7da5 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -369,8 +369,6 @@ does not allocate any driver private context space. Switch configuration -------------------- -- priv_size: additional size needed by the switch driver for its private context - - tag_protocol: this is to indicate what kind of tagging protocol is supported, should be a valid value from the dsa_tag_protocol enum @@ -416,11 +414,6 @@ PHY devices and link management to the switch port MDIO registers. If unavailable return a negative error code. -- poll_link: Function invoked by DSA to query the link state of the switch - builtin Ethernet PHYs, per port. This function is responsible for calling - netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a - single call. Executes from workqueue context. - - adjust_link: Function invoked by the PHY library when a slave network device is attached to a PHY device. This function is responsible for appropriately configuring the switch port link parameters: speed, duplex, pause based on @@ -542,6 +535,16 @@ Bridge layer Bridge VLAN filtering --------------------- +- port_vlan_filtering: bridge layer function invoked when the bridge gets + configured for turning on or off VLAN filtering. If nothing specific needs to + be done at the hardware level, this callback does not need to be implemented. + When VLAN filtering is turned on, the hardware must be programmed with + rejecting 802.1Q frames which have VLAN IDs outside of the programmed allowed + VLAN ID map/rules. If there is no PVID programmed into the switch port, + untagged frames must be rejected as well. When turned off the switch must + accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are + allowed. + - port_vlan_prepare: bridge layer function invoked when the bridge prepares the configuration of a VLAN on the given port. If the operation is not supported by the hardware, this function should return -EOPNOTSUPP to inform the bridge diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 6c7f365b1515..9ae929395b24 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1036,15 +1036,17 @@ proxy_arp_pvlan - BOOLEAN shared_media - BOOLEAN Send(router) or accept(host) RFC1620 shared media redirects. - Overrides ip_secure_redirects. + Overrides secure_redirects. shared_media for the interface will be enabled if at least one of conf/{all,interface}/shared_media is set to TRUE, it will be disabled otherwise default TRUE secure_redirects - BOOLEAN - Accept ICMP redirect messages only for gateways, - listed in default gateway list. + Accept ICMP redirect messages only to gateways listed in the + interface's current gateway list. Even if disabled, RFC1122 redirect + rules still apply. + Overridden by shared_media. secure_redirects for the interface will be enabled if at least one of conf/{all,interface}/secure_redirects is set to TRUE, it will be disabled otherwise diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt index 20d05719bceb..3849814bfe6d 100644 --- a/Documentation/security/keys.txt +++ b/Documentation/security/keys.txt @@ -826,7 +826,8 @@ The keyctl syscall functions are: (*) Compute a Diffie-Hellman shared secret or public key long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params, - char *buffer, size_t buflen); + char *buffer, size_t buflen, + void *reserved); The params struct contains serial numbers for three keys: @@ -843,6 +844,8 @@ The keyctl syscall functions are: public key. If the base is the remote public key, the result is the shared secret. + The reserved argument must be set to NULL. + The buffer length must be at least the length of the prime, or zero. If the buffer length is nonzero, the length of the result is diff --git a/MAINTAINERS b/MAINTAINERS index 7304d2e37a98..cb88f724e07c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3854,6 +3854,9 @@ T: git git://people.freedesktop.org/~airlied/linux S: Maintained F: drivers/gpu/drm/ F: drivers/gpu/vga/ +F: Documentation/devicetree/bindings/display/ +F: Documentation/devicetree/bindings/gpu/ +F: Documentation/devicetree/bindings/video/ F: Documentation/DocBook/gpu.* F: include/drm/ F: include/uapi/drm/ @@ -4101,6 +4104,21 @@ F: drivers/gpu/drm/vc4/ F: include/uapi/drm/vc4_drm.h F: Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt +DRM DRIVERS FOR TI OMAP +M: Tomi Valkeinen <tomi.valkeinen@ti.com> +L: dri-devel@lists.freedesktop.org +S: Maintained +F: drivers/gpu/drm/omapdrm/ +F: Documentation/devicetree/bindings/display/ti/ + +DRM DRIVERS FOR TI LCDC +M: Jyri Sarha <jsarha@ti.com> +R: Tomi Valkeinen <tomi.valkeinen@ti.com> +L: dri-devel@lists.freedesktop.org +S: Maintained +F: drivers/gpu/drm/tilcdc/ +F: Documentation/devicetree/bindings/display/tilcdc/ + DSBR100 USB FM RADIO DRIVER M: Alexey Klimov <klimov.linux@gmail.com> L: linux-media@vger.kernel.org @@ -7989,6 +8007,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git S: Odd Fixes +F: Documentation/devicetree/bindings/net/ F: drivers/net/ F: include/linux/if_* F: include/linux/netdevice.h @@ -8944,6 +8963,7 @@ M: Linus Walleij <linus.walleij@linaro.org> L: linux-gpio@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git S: Maintained +F: Documentation/devicetree/bindings/pinctrl/ F: drivers/pinctrl/ F: include/linux/pinctrl/ @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 7 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Psychotic Stoned Sheep # *DOCUMENTATION* diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ef9119f7462e..4d9375814b53 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target, if (ret) return ret; - vfp_flush_hwstate(thread); thread->vfpstate.hard = new_vfp; + vfp_flush_hwstate(thread); return 0; } diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index d9c3ffc39329..390795b334c3 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c @@ -39,7 +39,7 @@ #include "gpmc.h" #include "gpmc-smsc911x.h" -#include <video/omapdss.h> +#include <linux/platform_data/omapdss.h> #include <video/omap-panel-data.h> #include "board-flash.h" @@ -47,6 +47,7 @@ #include "hsmmc.h" #include "control.h" #include "common-board-devices.h" +#include "display.h" #define LDP_SMSC911X_CS 1 #define LDP_SMSC911X_GPIO 152 diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c index 9cfebc5c7455..180c6aa633bd 100644 --- a/arch/arm/mach-omap2/board-rx51-video.c +++ b/arch/arm/mach-omap2/board-rx51-video.c @@ -15,13 +15,14 @@ #include <linux/spi/spi.h> #include <linux/mm.h> #include <asm/mach-types.h> -#include <video/omapdss.h> +#include <linux/platform_data/omapdss.h> #include <video/omap-panel-data.h> #include <linux/platform_data/spi-omap2-mcspi.h> #include "soc.h" #include "board-rx51.h" +#include "display.h" #include "mux.h" @@ -32,7 +33,6 @@ static struct connector_atv_platform_data rx51_tv_pdata = { .name = "tv", .source = "venc.0", - .connector_type = OMAP_DSS_VENC_TYPE_COMPOSITE, .invert_polarity = false, }; diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 6ab13d18c636..70b3eaf085e4 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -29,7 +29,7 @@ #include <linux/mfd/syscon.h> #include <linux/regmap.h> -#include <video/omapdss.h> +#include <linux/platform_data/omapdss.h> #include "omap_hwmod.h" #include "omap_device.h" #include "omap-pm.h" diff --git a/arch/arm/mach-omap2/display.h b/arch/arm/mach-omap2/display.h index 7375854b16c7..78f253005279 100644 --- a/arch/arm/mach-omap2/display.h +++ b/arch/arm/mach-omap2/display.h @@ -33,4 +33,9 @@ int omap_init_vout(void); struct device_node * __init omapdss_find_dss_of_node(void); +struct omap_dss_board_info; + +/* Init with the board info */ +int omap_display_init(struct omap_dss_board_info *board_data); + #endif diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c index ea2be0f5953b..1d583bc0b1a9 100644 --- a/arch/arm/mach-omap2/dss-common.c +++ b/arch/arm/mach-omap2/dss-common.c @@ -27,7 +27,7 @@ #include <linux/gpio.h> #include <linux/platform_device.h> -#include <video/omapdss.h> +#include <linux/platform_data/omapdss.h> #include <video/omap-panel-data.h> #include "soc.h" diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 76747d92bc72..5a0a691d4220 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT config MMU def_bool y +config ARM64_PAGE_SHIFT + int + default 16 if ARM64_64K_PAGES + default 14 if ARM64_16K_PAGES + default 12 + +config ARM64_CONT_SHIFT + int + default 5 if ARM64_64K_PAGES + default 7 if ARM64_16K_PAGES + default 4 + config ARCH_MMAP_RND_BITS_MIN default 14 if ARM64_64K_PAGES default 16 if ARM64_16K_PAGES @@ -426,6 +438,15 @@ config CAVIUM_ERRATUM_22375 If unsure, say Y. +config CAVIUM_ERRATUM_23144 + bool "Cavium erratum 23144: ITS SYNC hang on dual socket system" + depends on NUMA + default y + help + ITS SYNC command hang for cross node io and collections/cpu mapping. + + If unsure, say Y. + config CAVIUM_ERRATUM_23154 bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed" default y diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 710fde4ad0f0..0cc758cdd0dc 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -12,7 +12,8 @@ config ARM64_PTDUMP who are working in architecture specific areas of the kernel. It is probably not a good idea to enable this feature in a production kernel. - If in doubt, say "N" + + If in doubt, say N. config PID_IN_CONTEXTIDR bool "Write the current PID to the CONTEXTIDR register" @@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET value. config DEBUG_SET_MODULE_RONX - bool "Set loadable kernel module data as NX and text as RO" - depends on MODULES - help - This option helps catch unintended modifications to loadable - kernel module's text and read-only data. It also prevents execution - of module data. Such protection may interfere with run-time code - patching and dynamic kernel tracing - and they might also protect - against certain classes of kernel exploits. - If in doubt, say "N". + bool "Set loadable kernel module data as NX and text as RO" + depends on MODULES + default y + help + Is this is set, kernel module text and rodata will be made read-only. + This is to help catch accidental or malicious attempts to change the + kernel's executable code. + + If in doubt, say Y. config DEBUG_RODATA bool "Make kernel text and rodata read-only" @@ -56,7 +57,7 @@ config DEBUG_RODATA is to help catch accidental or malicious attempts to change the kernel's executable code. - If in doubt, say Y + If in doubt, say Y. config DEBUG_ALIGN_RODATA depends on DEBUG_RODATA @@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA alignment and potentially wasted space. Turn on this option if performance is more important than memory pressure. - If in doubt, say N + If in doubt, say N. source "drivers/hwtracing/coresight/Kconfig" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 354d75402ace..7085e322dc42 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -60,7 +60,9 @@ head-y := arch/arm64/kernel/head.o # The byte offset of the kernel image in RAM from the start of RAM. ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) -TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}') +TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \ + int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \ + rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}") else TEXT_OFFSET := 0x00080000 endif diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 7a09c48c0475..579b6e654f2d 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) #endif -#ifdef CONFIG_COMPAT - #ifdef __AARCH64EB__ #define COMPAT_ELF_PLATFORM ("v8b") #else #define COMPAT_ELF_PLATFORM ("v8l") #endif +#ifdef CONFIG_COMPAT + #define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) /* AArch32 registers. */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 72a3025bb583..31b73227b41f 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -55,8 +55,9 @@ #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) /* - * PAGE_OFFSET - the virtual address of the start of the kernel image (top + * PAGE_OFFSET - the virtual address of the start of the linear map (top * (VA_BITS - 1)) + * KIMAGE_VADDR - the virtual address of the start of the kernel image * VA_BITS - the maximum number of bits for virtual addresses. * VA_START - the first kernel virtual address. * TASK_SIZE - the maximum size of a user space task. diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 17b45f7d96d3..8472c6def5ef 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -23,16 +23,8 @@ /* PAGE_SHIFT determines the page size */ /* CONT_SHIFT determines the number of pages which can be tracked together */ -#ifdef CONFIG_ARM64_64K_PAGES -#define PAGE_SHIFT 16 -#define CONT_SHIFT 5 -#elif defined(CONFIG_ARM64_16K_PAGES) -#define PAGE_SHIFT 14 -#define CONT_SHIFT 7 -#else -#define PAGE_SHIFT 12 -#define CONT_SHIFT 4 -#endif +#define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT +#define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 0685d74572af..9e397a542756 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -81,19 +81,6 @@ static inline void set_fs(mm_segment_t fs) #define segment_eq(a, b) ((a) == (b)) /* - * Return 1 if addr < current->addr_limit, 0 otherwise. - */ -#define __addr_ok(addr) \ -({ \ - unsigned long flag; \ - asm("cmp %1, %0; cset %0, lo" \ - : "=&r" (flag) \ - : "r" (addr), "0" (current_thread_info()->addr_limit) \ - : "cc"); \ - flag; \ -}) - -/* * Test whether a block of memory is a valid user space address. * Returns 1 if the range is valid, 0 otherwise. * diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 41e58fe3c041..e78ac26324bd 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 390 +#define __NR_compat_syscalls 394 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 5b925b761a2a..b7e8ef16ff0d 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat) __SYSCALL(__NR_userfaultfd, sys_userfaultfd) #define __NR_membarrier 389 __SYSCALL(__NR_membarrier, sys_membarrier) +#define __NR_mlock2 390 +__SYSCALL(__NR_mlock2, sys_mlock2) +#define __NR_copy_file_range 391 +__SYSCALL(__NR_copy_file_range, sys_copy_file_range) +#define __NR_preadv2 392 +__SYSCALL(__NR_preadv2, compat_sys_preadv2) +#define __NR_pwritev2 393 +__SYSCALL(__NR_pwritev2, compat_sys_pwritev2) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 3808470486f3..c173d329397f 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -22,6 +22,8 @@ #include <linux/bitops.h> #include <linux/bug.h> +#include <linux/compat.h> +#include <linux/elf.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/personality.h> @@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = { static int c_show(struct seq_file *m, void *v) { int i, j; + bool compat = personality(current->personality) == PER_LINUX32; for_each_online_cpu(i) { struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); @@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v) * "processor". Give glibc what it expects. */ seq_printf(m, "processor\t: %d\n", i); + if (compat) + seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", + MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", loops_per_jiffy / (500000UL/HZ), @@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v) * software which does already (at least for 32-bit). */ seq_puts(m, "Features\t:"); - if (personality(current->personality) == PER_LINUX32) { + if (compat) { #ifdef CONFIG_COMPAT for (j = 0; compat_hwcap_str[j]; j++) if (compat_elf_hwcap & (1 << j)) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c5392081b49b..f7cf463107df 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) void __user *pc = (void __user *)instruction_pointer(regs); console_verbose(); - pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n", - handler[reason], esr, esr_get_class_string(esr)); + pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", + handler[reason], smp_processor_id(), esr, + esr_get_class_string(esr)); __show_regs(regs); info.si_signo = SIGILL; diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index fff7cd42b3a3..5f8f80b4a224 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) * Make sure stores to the GIC via the memory mapped interface * are now visible to the system register interface. */ - dsb(st); + if (!cpu_if->vgic_sre) + dsb(st); cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); @@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) continue; - if (cpu_if->vgic_elrsr & (1 << i)) { + if (cpu_if->vgic_elrsr & (1 << i)) cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; - continue; - } + else + cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); - cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); __gic_v3_set_lr(0, i); } @@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) val = read_gicreg(ICC_SRE_EL2); write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); - isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ - write_gicreg(1, ICC_SRE_EL1); + + if (!cpu_if->vgic_sre) { + /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ + isb(); + write_gicreg(1, ICC_SRE_EL1); + } } void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) @@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) * been actually programmed with the value we want before * starting to mess with the rest of the GIC. */ - write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); - isb(); + if (!cpu_if->vgic_sre) { + write_gicreg(0, ICC_SRE_EL1); + isb(); + } val = read_gicreg(ICH_VTR_EL2); max_lr_idx = vtr_to_max_lr_idx(val); @@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) * (re)distributors. This ensure the guest will read the * correct values from the memory-mapped interface. */ - isb(); - dsb(sy); + if (!cpu_if->vgic_sre) { + isb(); + dsb(sy); + } vcpu->arch.vgic_cpu.live_lrs = live_lrs; /* * Prevent the guest from touching the GIC system registers if * SRE isn't enabled for GICv3 emulation. */ - if (!cpu_if->vgic_sre) { - write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, - ICC_SRE_EL2); - } + write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, + ICC_SRE_EL2); } void __hyp_text __vgic_v3_init_lrs(void) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 7bbe3ff02602..a57d650f552c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu, return true; } +static bool access_gic_sre(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + if (p->is_write) + return ignore_write(vcpu, p); + + p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; + return true; +} + static bool trap_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { access_gic_sgi }, /* ICC_SRE_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), - trap_raz_wi }, + access_gic_sre }, /* CONTEXTIDR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 8404190fe2bd..ccfde237d6e6 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = { struct pg_level { const struct prot_bits *bits; + const char *name; size_t num; u64 mask; }; @@ -157,15 +158,19 @@ struct pg_level { static struct pg_level pg_level[] = { { }, { /* pgd */ + .name = "PGD", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, { /* pud */ + .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, { /* pmd */ + .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, { /* pte */ + .name = "PTE", .bits = pte_bits, .num = ARRAY_SIZE(pte_bits), }, @@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, delta >>= 10; unit++; } - seq_printf(st->seq, "%9lu%c", delta, *unit); + seq_printf(st->seq, "%9lu%c %s", delta, *unit, + pg_level[st->level].name); if (pg_level[st->level].bits) dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num); diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index aa8aee7d6929..2e49bd252fe7 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt) hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); } else if (ps == PUD_SIZE) { hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); + } else if (ps == (PAGE_SIZE * CONT_PTES)) { + hugetlb_add_hstate(CONT_PTE_SHIFT); + } else if (ps == (PMD_SIZE * CONT_PMDS)) { + hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT); } else { hugetlb_bad_size(); pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); @@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt) return 1; } __setup("hugepagesz=", setup_hugepagesz); + +#ifdef CONFIG_ARM64_64K_PAGES +static __init int add_default_hugepagesz(void) +{ + if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) + hugetlb_add_hstate(CONT_PMD_SHIFT); + return 0; +} +arch_initcall(add_default_hugepagesz); +#endif diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h index 4736020ba5ea..5e953ab4530d 100644 --- a/arch/parisc/include/asm/traps.h +++ b/arch/parisc/include/asm/traps.h @@ -8,6 +8,8 @@ struct pt_regs; void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset) __noreturn __cold; +void die_if_kernel(char *str, struct pt_regs *regs, long err); + /* mm/fault.c */ void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address); diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index e81ccf1716e9..5adc339eb7c8 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -324,8 +324,9 @@ int init_per_cpu(int cpunum) per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; - printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", - cpunum, coproc_cfg.revision, coproc_cfg.model); + if (cpunum == 0) + printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", + cpunum, coproc_cfg.revision, coproc_cfg.model); /* ** store status register to stack (hopefully aligned) diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 58dd6801f5be..31ec99a5f119 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -309,11 +309,6 @@ void __init time_init(void) clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, NSEC_PER_MSEC, 0); -#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) - /* At bootup only one 64bit CPU is online and cr16 is "stable" */ - set_sched_clock_stable(); -#endif - start_cpu_itimer(); /* get CPU 0 started */ /* register at clocksource framework */ diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index d7c0acb35ec2..2b65c0177778 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -28,6 +28,7 @@ #include <linux/ratelimit.h> #include <asm/uaccess.h> #include <asm/hardirq.h> +#include <asm/traps.h> /* #define DEBUG_UNALIGNED 1 */ @@ -130,8 +131,6 @@ int unaligned_enabled __read_mostly = 1; -void die_if_kernel (char *str, struct pt_regs *regs, long err); - static int emulate_ldh(struct pt_regs *regs, int toreg) { unsigned long saddr = regs->ior; @@ -666,7 +665,7 @@ void handle_unaligned(struct pt_regs *regs) break; } - if (modify && R1(regs->iir)) + if (ret == 0 && modify && R1(regs->iir)) regs->gr[R1(regs->iir)] = newbase; @@ -677,6 +676,14 @@ void handle_unaligned(struct pt_regs *regs) if (ret) { + /* + * The unaligned handler failed. + * If we were called by __get_user() or __put_user() jump + * to it's exception fixup handler instead of crashing. + */ + if (!user_mode(regs) && fixup_exception(regs)) + return; + printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); die_if_kernel("Unaligned data reference", regs, 28); diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index ddd988b267a9..e278a87f43cc 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c @@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr) if (addr >= kernel_unwind_table.start && addr <= kernel_unwind_table.end) e = find_unwind_entry_in_table(&kernel_unwind_table, addr); - else + else { + unsigned long flags; + + spin_lock_irqsave(&unwind_lock, flags); list_for_each_entry(table, &unwind_tables, list) { if (addr >= table->start && addr <= table->end) @@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr) break; } } + spin_unlock_irqrestore(&unwind_lock, flags); + } return e; } @@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info) insn = *(unsigned int *)npc; - if ((insn & 0xffffc000) == 0x37de0000 || - (insn & 0xffe00000) == 0x6fc00000) { + if ((insn & 0xffffc001) == 0x37de0000 || + (insn & 0xffe00001) == 0x6fc00000) { /* ldo X(sp), sp, or stwm X,D(sp) */ - frame_size += (insn & 0x1 ? -1 << 13 : 0) | - ((insn & 0x3fff) >> 1); + frame_size += (insn & 0x3fff) >> 1; dbg("analyzing func @ %lx, insn=%08x @ " "%lx, frame_size = %ld\n", info->ip, insn, npc, frame_size); - } else if ((insn & 0xffe00008) == 0x73c00008) { + } else if ((insn & 0xffe00009) == 0x73c00008) { /* std,ma X,D(sp) */ - frame_size += (insn & 0x1 ? -1 << 13 : 0) | - (((insn >> 4) & 0x3ff) << 3); + frame_size += ((insn >> 4) & 0x3ff) << 3; dbg("analyzing func @ %lx, insn=%08x @ " "%lx, frame_size = %ld\n", info->ip, insn, npc, frame_size); @@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info) } } + if (frame_size > e->Total_frame_size << 3) + frame_size = e->Total_frame_size << 3; + if (!unwind_special(info, e->region_start, frame_size)) { info->prev_sp = info->sp - frame_size; if (e->Millicode) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c1e82e968506..a0948f40bc7b 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -717,7 +717,7 @@ #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ #define SPRN_MMCR1 798 -#define SPRN_MMCR2 769 +#define SPRN_MMCR2 785 #define SPRN_MMCRA 0x312 #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL @@ -754,13 +754,13 @@ #define SPRN_PMC6 792 #define SPRN_PMC7 793 #define SPRN_PMC8 794 -#define SPRN_SIAR 780 -#define SPRN_SDAR 781 #define SPRN_SIER 784 #define SIER_SIPR 0x2000000 /* Sampled MSR_PR */ #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ +#define SPRN_SIAR 796 +#define SPRN_SDAR 797 #define SPRN_TACR 888 #define SPRN_TCSCR 889 #define SPRN_CSIGR 890 diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index da5192590c44..ccd2037c797f 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = { W(0xffff0000), W(0x003e0000), /* POWER6 */ W(0xffff0000), W(0x003f0000), /* POWER7 */ W(0xffff0000), W(0x004b0000), /* POWER8E */ + W(0xffff0000), W(0x004c0000), /* POWER8NVL */ W(0xffff0000), W(0x004d0000), /* POWER8 */ W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 59268969a0bc..b2740c67e172 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = { }, }; +/* + * 'R' and 'C' update notes: + * - Under pHyp or KVM, the updatepp path will not set C, thus it *will* + * create writeable HPTEs without C set, because the hcall H_PROTECT + * that we use in that case will not update C + * - The above is however not a problem, because we also don't do that + * fancy "no flush" variant of eviction and we use H_REMOVE which will + * do the right thing and thus we don't have the race I described earlier + * + * - Under bare metal, we do have the race, so we need R and C set + * - We make sure R is always set and never lost + * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping + */ unsigned long htab_convert_pte_flags(unsigned long pteflags) { unsigned long rflags = 0; @@ -186,9 +199,14 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) rflags |= 0x1; } /* - * Always add "C" bit for perf. Memory coherence is always enabled + * We can't allow hardware to update hpte bits. Hence always + * set 'R' bit and set 'C' if it is a write fault + * Memory coherence is always enabled */ - rflags |= HPTE_R_C | HPTE_R_M; + rflags |= HPTE_R_R | HPTE_R_M; + + if (pteflags & _PAGE_DIRTY) + rflags |= HPTE_R_C; /* * Add in WIG bits */ diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index eb4451144746..670318766545 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, changed = !pmd_same(*(pmdp), entry); if (changed) { __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); - /* - * Since we are not supporting SW TLB systems, we don't - * have any thing similar to flush_tlb_page_nohash() - */ + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } return changed; } diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 18b2c11604fa..c939e6e57a9e 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -296,11 +296,6 @@ found: void __init radix__early_init_mmu(void) { unsigned long lpcr; - /* - * setup LPCR UPRT based on mmu_features - */ - lpcr = mfspr(SPRN_LPCR); - mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); #ifdef CONFIG_PPC_64K_PAGES /* PAGE_SIZE mappings */ @@ -343,8 +338,11 @@ void __init radix__early_init_mmu(void) __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; radix_init_page_sizes(); - if (!firmware_has_feature(FW_FEATURE_LPAR)) + if (!firmware_has_feature(FW_FEATURE_LPAR)) { + lpcr = mfspr(SPRN_LPCR); + mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); radix_init_partition_table(); + } radix_init_pgtable(); } @@ -353,16 +351,15 @@ void radix__early_init_mmu_secondary(void) { unsigned long lpcr; /* - * setup LPCR UPRT based on mmu_features + * update partition table control register and UPRT */ - lpcr = mfspr(SPRN_LPCR); - mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); - /* - * update partition table control register, 64 K size. - */ - if (!firmware_has_feature(FW_FEATURE_LPAR)) + if (!firmware_has_feature(FW_FEATURE_LPAR)) { + lpcr = mfspr(SPRN_LPCR); + mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); + mtspr(SPRN_PTCR, __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); + } } void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index ac3ffd97e059..3998e0f9a03b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2; static int ibm_slot_error_detail; static int ibm_get_config_addr_info; static int ibm_get_config_addr_info2; -static int ibm_configure_bridge; static int ibm_configure_pe; /* @@ -81,7 +80,14 @@ static int pseries_eeh_init(void) ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); ibm_configure_pe = rtas_token("ibm,configure-pe"); - ibm_configure_bridge = rtas_token("ibm,configure-bridge"); + + /* + * ibm,configure-pe and ibm,configure-bridge have the same semantics, + * however ibm,configure-pe can be faster. If we can't find + * ibm,configure-pe then fall back to using ibm,configure-bridge. + */ + if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE) + ibm_configure_pe = rtas_token("ibm,configure-bridge"); /* * Necessary sanity check. We needn't check "get-config-addr-info" @@ -93,8 +99,7 @@ static int pseries_eeh_init(void) (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || - (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && - ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) { + ibm_configure_pe == RTAS_UNKNOWN_SERVICE) { pr_info("EEH functionality not supported\n"); return -EINVAL; } @@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) { int config_addr; int ret; + /* Waiting 0.2s maximum before skipping configuration */ + int max_wait = 200; /* Figure out the PE address */ config_addr = pe->config_addr; if (pe->addr) config_addr = pe->addr; - /* Use new configure-pe function, if supported */ - if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { + while (max_wait > 0) { ret = rtas_call(ibm_configure_pe, 3, 1, NULL, config_addr, BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid)); - } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { - ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, - config_addr, BUID_HI(pe->phb->buid), - BUID_LO(pe->phb->buid)); - } else { - return -EFAULT; - } - if (ret) - pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", - __func__, pe->phb->global_number, pe->addr, ret); + if (!ret) + return ret; + + /* + * If RTAS returns a delay value that's above 100ms, cut it + * down to 100ms in case firmware made a mistake. For more + * on how these delay values work see rtas_busy_delay_time + */ + if (ret > RTAS_EXTENDED_DELAY_MIN+2 && + ret <= RTAS_EXTENDED_DELAY_MAX) + ret = RTAS_EXTENDED_DELAY_MIN+2; + + max_wait -= rtas_busy_delay_time(ret); + + if (max_wait < 0) + break; + + rtas_busy_delay(ret); + } + pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", + __func__, pe->phb->global_number, pe->addr, ret); return ret; } diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 0ac42cc4f880..d5ec71b2ed02 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -1,8 +1,7 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_AUDIT=y -CONFIG_NO_HZ=y +CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y @@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_NUMA_BALANCING=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_KMEM=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CGROUP_PERF=y +CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y -CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y @@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y CONFIG_LIVEPATCH=y -CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y @@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_ZPOOL=m +CONFIG_ZBUD=m +CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_PCI=y CONFIG_PCI_DEBUG=y CONFIG_HOTPLUG_PCI=y @@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m CONFIG_TN3270_FS=y +# CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m @@ -537,6 +545,8 @@ CONFIG_DLM=m CONFIG_PRINTK_TIME=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_GDB_SCRIPTS=y CONFIG_FRAME_WARN=1024 CONFIG_READABLE_ASM=y CONFIG_UNUSED_SYMBOLS=y @@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y CONFIG_SLUB_STATS=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_VM=y +CONFIG_DEBUG_VM_VMACACHE=y CONFIG_DEBUG_VM_RB=y +CONFIG_DEBUG_VM_PGFLAGS=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m CONFIG_DEBUG_PER_CPU_MAPS=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y +CONFIG_WQ_WATCHDOG=y CONFIG_PANIC_ON_OOPS=y +CONFIG_DEBUG_TIMEKEEPING=y CONFIG_TIMER_STATS=y CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y @@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENT=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_LKDTM=m CONFIG_TEST_LIST_SORT=y CONFIG_KPROBES_SANITY_TEST=y @@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y CONFIG_TEST_KSTRTOX=y CONFIG_DMA_API_DEBUG=y CONFIG_TEST_BPF=m -# CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y @@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=y CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m @@ -664,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_ASYMMETRIC_KEY_TYPE=m +CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index a31dcd56f7c0..f46a35115d2d 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -1,8 +1,7 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_AUDIT=y -CONFIG_NO_HZ=y +CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y @@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_NUMA_BALANCING=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y -CONFIG_BLK_CGROUP=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y @@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y -CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y @@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +CONFIG_ZSWAP=y +CONFIG_ZBUD=m +CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_PCI=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y @@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m CONFIG_DLM=m CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_GDB_SCRIPTS=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FRAME_WARN=1024 CONFIG_UNUSED_SYMBOLS=y @@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y CONFIG_BLK_DEV_IO_TRACE=y # CONFIG_KPROBE_EVENT is not set +CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_LKDTM=m CONFIG_RBTREE_TEST=m CONFIG_INTERVAL_TREE_TEST=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m -# CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y @@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=y -CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_USER_API_HASH=m @@ -610,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_ASYMMETRIC_KEY_TYPE=m +CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 7b73bf353345..ba0f2a58b8cd 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -1,8 +1,7 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y CONFIG_AUDIT=y -CONFIG_NO_HZ=y +CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y @@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_NUMA_BALANCING=y # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y -CONFIG_BLK_CGROUP=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y @@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_CFQ_GROUP_IOSCHED=y CONFIG_DEFAULT_DEADLINE=y CONFIG_LIVEPATCH=y -CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y CONFIG_NR_CPUS=512 CONFIG_NUMA=y @@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +CONFIG_ZSWAP=y +CONFIG_ZBUD=m +CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_PCI=y CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y @@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m CONFIG_RAW_DRIVER=m CONFIG_HANGCHECK_TIMER=m CONFIG_TN3270_FS=y +# CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_WATCHDOG_NOWAYOUT=y CONFIG_SOFT_WATCHDOG=m @@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m CONFIG_DLM=m CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_GDB_SCRIPTS=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FRAME_WARN=1024 CONFIG_UNUSED_SYMBOLS=y @@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENT=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_LKDTM=m CONFIG_PERCPU_TEST=m CONFIG_ATOMIC64_SELFTEST=y CONFIG_TEST_BPF=m -# CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y CONFIG_ENCRYPTED_KEYS=m CONFIG_SECURITY=y @@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_ZLIB=y -CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_USER_API_HASH=m @@ -607,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m -CONFIG_ASYMMETRIC_KEY_TYPE=m +CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_CRC7=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 1719843a55a2..4366a3e3e754 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -1,5 +1,5 @@ # CONFIG_SWAP is not set -CONFIG_NO_HZ=y +CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y @@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y CONFIG_DEFAULT_DEADLINE=y -CONFIG_MARCH_Z196=y CONFIG_TUNE_ZEC12=y # CONFIG_COMPAT is not set CONFIG_NR_CPUS=2 @@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_FTRACE is not set -# CONFIG_STRICT_DEVMEM is not set # CONFIG_PFAULT is not set # CONFIG_S390_HYPFS_FS is not set # CONFIG_VIRTUALIZATION is not set diff --git a/arch/s390/defconfig b/arch/s390/defconfig index e24f2af4c73b..3f571ea89509 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -1,8 +1,8 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_FHANDLE=y +CONFIG_USELIB=y CONFIG_AUDIT=y -CONFIG_NO_HZ=y +CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y @@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_CGROUPS=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_KMEM=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CGROUP_PERF=y +CONFIG_BLK_CGROUP=y CONFIG_CGROUP_SCHED=y CONFIG_RT_GROUP_SCHED=y -CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_BLK_DEV_INITRD=y @@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y CONFIG_DEFAULT_DEADLINE=y CONFIG_LIVEPATCH=y -CONFIG_MARCH_Z196=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y CONFIG_HZ_100=y @@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CLEANCACHE=y +CONFIG_FRONTSWAP=y +CONFIG_CMA=y +CONFIG_ZSWAP=y +CONFIG_ZBUD=m +CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_CRASH_DUMP=y CONFIG_BINFMT_MISC=m CONFIG_HIBERNATION=y @@ -61,7 +68,6 @@ CONFIG_UNIX=y CONFIG_NET_KEY=y CONFIG_INET=y CONFIG_IP_MULTICAST=y -# CONFIG_INET_LRO is not set CONFIG_L2TP=m CONFIG_L2TP_DEBUGFS=m CONFIG_VLAN_8021Q=y @@ -144,6 +150,9 @@ CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y # CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_GDB_SCRIPTS=y CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y @@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y CONFIG_DEBUG_LOCKDEP=y CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_LIST=y -CONFIG_DEBUG_PI_LIST=y CONFIG_DEBUG_SG=y CONFIG_DEBUG_NOTIFIERS=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_TRACE=y CONFIG_LATENCYTOP=y CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y -CONFIG_TRACER_SNAPSHOT=y +CONFIG_SCHED_TRACER=y +CONFIG_FTRACE_SYSCALLS=y CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_UPROBE_EVENT=y +CONFIG_FUNCTION_PROFILER=y +CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_KPROBES_SANITY_TEST=y -# CONFIG_STRICT_DEVMEM is not set CONFIG_S390_PTDUMP=y CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_AUTHENC=m @@ -212,8 +222,6 @@ CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_ZLIB=m -CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_ANSI_CPRNG=m diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 7a3144017301..19288c1b36d3 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code) report_user_fault(regs, SIGSEGV, 1); si.si_signo = SIGSEGV; + si.si_errno = 0; si.si_code = si_code; si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); force_sig_info(SIGSEGV, &si, current); diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h index f010c93a88b1..fda605dbc1b4 100644 --- a/arch/s390/net/bpf_jit.h +++ b/arch/s390/net/bpf_jit.h @@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; * | | | * +---------------+ | * | 8 byte skbp | | - * R15+170 -> +---------------+ | + * R15+176 -> +---------------+ | * | 8 byte hlen | | * R15+168 -> +---------------+ | * | 4 byte align | | @@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; #define STK_OFF (STK_SPACE - STK_160_UNUSED) #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ -#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */ +#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */ #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 9133b0ec000b..bee281f3163d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -45,7 +45,7 @@ struct bpf_jit { int labels[1]; /* Labels for local jumps */ }; -#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */ +#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */ #define SEEN_SKB 1 /* skb access */ #define SEEN_MEM 2 /* use mem[] for temporary storage */ @@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit) emit_load_skb_data_hlen(jit); if (jit->seen & SEEN_SKB_CHANGE) /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, + EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, STK_OFF_SKBP); } diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h index 10e9dabc4c41..f0700cfeedd7 100644 --- a/arch/sparc/include/asm/head_64.h +++ b/arch/sparc/include/asm/head_64.h @@ -15,6 +15,10 @@ #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) +#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) +#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) +#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) + #define __CHEETAH_ID 0x003e0014 #define __JALAPENO_ID 0x003e0016 #define __SERRANO_ID 0x003e0022 diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h index 71b5a67522ab..781b9f1dbdc2 100644 --- a/arch/sparc/include/asm/ttable.h +++ b/arch/sparc/include/asm/ttable.h @@ -589,8 +589,8 @@ user_rtt_fill_64bit: \ restored; \ nop; nop; nop; nop; nop; nop; \ nop; nop; nop; nop; nop; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ + ba,a,pt %xcc, user_rtt_fill_fixup_dax; \ + ba,a,pt %xcc, user_rtt_fill_fixup_mna; \ ba,a,pt %xcc, user_rtt_fill_fixup; @@ -652,8 +652,8 @@ user_rtt_fill_32bit: \ restored; \ nop; nop; nop; nop; nop; \ nop; nop; nop; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ - ba,a,pt %xcc, user_rtt_fill_fixup; \ + ba,a,pt %xcc, user_rtt_fill_fixup_dax; \ + ba,a,pt %xcc, user_rtt_fill_fixup_mna; \ ba,a,pt %xcc, user_rtt_fill_fixup; diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 7cf9c6ea3f1f..fdb13327fded 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg CFLAGS_REMOVE_pcr.o := -pg endif +obj-$(CONFIG_SPARC64) += urtt_fill.o obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o obj-$(CONFIG_SPARC32) += etrap_32.o obj-$(CONFIG_SPARC32) += rtrap_32.o diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index d08bdaffdbfc..216948ca4382 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -14,10 +14,6 @@ #include <asm/visasm.h> #include <asm/processor.h> -#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) -#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) -#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) - #ifdef CONFIG_CONTEXT_TRACKING # define SCHEDULE_USER schedule_user #else @@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 wrpr %g1, %cwp ba,a,pt %xcc, user_rtt_fill_64bit -user_rtt_fill_fixup: - rdpr %cwp, %g1 - add %g1, 1, %g1 - wrpr %g1, 0x0, %cwp - - rdpr %wstate, %g2 - sll %g2, 3, %g2 - wrpr %g2, 0x0, %wstate - - /* We know %canrestore and %otherwin are both zero. */ - - sethi %hi(sparc64_kern_pri_context), %g2 - ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 - mov PRIMARY_CONTEXT, %g1 - -661: stxa %g2, [%g1] ASI_DMMU - .section .sun4v_1insn_patch, "ax" - .word 661b - stxa %g2, [%g1] ASI_MMU - .previous - - sethi %hi(KERNBASE), %g1 - flush %g1 +user_rtt_fill_fixup_dax: + ba,pt %xcc, user_rtt_fill_fixup_common + mov 1, %g3 - or %g4, FAULT_CODE_WINFIXUP, %g4 - stb %g4, [%g6 + TI_FAULT_CODE] - stx %g5, [%g6 + TI_FAULT_ADDR] +user_rtt_fill_fixup_mna: + ba,pt %xcc, user_rtt_fill_fixup_common + mov 2, %g3 - mov %g6, %l1 - wrpr %g0, 0x0, %tl - -661: nop - .section .sun4v_1insn_patch, "ax" - .word 661b - SET_GL(0) - .previous - - wrpr %g0, RTRAP_PSTATE, %pstate - - mov %l1, %g6 - ldx [%g6 + TI_TASK], %g4 - LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) - call do_sparc64_fault - add %sp, PTREGS_OFF, %o0 - ba,pt %xcc, rtrap - nop +user_rtt_fill_fixup: + ba,pt %xcc, user_rtt_fill_fixup_common + clr %g3 user_rtt_pre_restore: add %g1, 1, %g1 diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 3c25241fa5cb..91cc2f4ae4d9 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) return 0; } +/* Checks if the fp is valid. We always build signal frames which are + * 16-byte aligned, therefore we can always enforce that the restore + * frame has that property as well. + */ +static bool invalid_frame_pointer(void __user *fp, int fplen) +{ + if ((((unsigned long) fp) & 15) || + ((unsigned long)fp) > 0x100000000ULL - fplen) + return true; + return false; +} + void do_sigreturn32(struct pt_regs *regs) { struct signal_frame32 __user *sf; compat_uptr_t fpu_save; compat_uptr_t rwin_save; - unsigned int psr; + unsigned int psr, ufp; unsigned int pc, npc; sigset_t set; compat_sigset_t seta; @@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs) sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || - (((unsigned long) sf) & 3)) + if (invalid_frame_pointer(sf, sizeof(*sf))) + goto segv; + + if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) goto segv; - if (get_user(pc, &sf->info.si_regs.pc) || + if (__get_user(pc, &sf->info.si_regs.pc) || __get_user(npc, &sf->info.si_regs.npc)) goto segv; @@ -227,7 +244,7 @@ segv: asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) { struct rt_signal_frame32 __user *sf; - unsigned int psr, pc, npc; + unsigned int psr, pc, npc, ufp; compat_uptr_t fpu_save; compat_uptr_t rwin_save; sigset_t set; @@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || - (((unsigned long) sf) & 3)) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv; - if (get_user(pc, &sf->regs.pc) || + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) + goto segv; + + if (__get_user(pc, &sf->regs.pc) || __get_user(npc, &sf->regs.npc)) goto segv; @@ -307,14 +329,6 @@ segv: force_sig(SIGSEGV, current); } -/* Checks if the fp is valid */ -static int invalid_frame_pointer(void __user *fp, int fplen) -{ - if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen) - return 1; - return 0; -} - static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 52aa5e4ce5e7..c3c12efe0bc0 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -60,10 +60,22 @@ struct rt_signal_frame { #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) +/* Checks if the fp is valid. We always build signal frames which are + * 16-byte aligned, therefore we can always enforce that the restore + * frame has that property as well. + */ +static inline bool invalid_frame_pointer(void __user *fp, int fplen) +{ + if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen)) + return true; + + return false; +} + asmlinkage void do_sigreturn(struct pt_regs *regs) { + unsigned long up_psr, pc, npc, ufp; struct signal_frame __user *sf; - unsigned long up_psr, pc, npc; sigset_t set; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; @@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) + if (!invalid_frame_pointer(sf, sizeof(*sf))) + goto segv_and_exit; + + if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) goto segv_and_exit; - if (((unsigned long) sf) & 3) + if (ufp & 0x7) goto segv_and_exit; err = __get_user(pc, &sf->info.si_regs.pc); @@ -127,7 +142,7 @@ segv_and_exit: asmlinkage void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; - unsigned int psr, pc, npc; + unsigned int psr, pc, npc, ufp; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; sigset_t set; @@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) synchronize_user_stack(); sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; - if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || - (((unsigned long) sf) & 0x03)) + if (!invalid_frame_pointer(sf, sizeof(*sf))) + goto segv; + + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) + goto segv; + + if (ufp & 0x7) goto segv; err = __get_user(pc, &sf->regs.pc); @@ -178,15 +198,6 @@ segv: force_sig(SIGSEGV, current); } -/* Checks if the fp is valid */ -static inline int invalid_frame_pointer(void __user *fp, int fplen) -{ - if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen)) - return 1; - - return 0; -} - static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP]; diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 39aaec173f66..5ee930c48f4c 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -234,6 +234,17 @@ do_sigsegv: goto out; } +/* Checks if the fp is valid. We always build rt signal frames which + * are 16-byte aligned, therefore we can always enforce that the + * restore frame has that property as well. + */ +static bool invalid_frame_pointer(void __user *fp) +{ + if (((unsigned long) fp) & 15) + return true; + return false; +} + struct rt_signal_frame { struct sparc_stackf ss; siginfo_t info; @@ -246,8 +257,8 @@ struct rt_signal_frame { void do_rt_sigreturn(struct pt_regs *regs) { + unsigned long tpc, tnpc, tstate, ufp; struct rt_signal_frame __user *sf; - unsigned long tpc, tnpc, tstate; __siginfo_fpu_t __user *fpu_save; __siginfo_rwin_t __user *rwin_save; sigset_t set; @@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs) (regs->u_regs [UREG_FP] + STACK_BIAS); /* 1. Make sure we are not getting garbage from the user */ - if (((unsigned long) sf) & 3) + if (invalid_frame_pointer(sf)) + goto segv; + + if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) goto segv; - err = get_user(tpc, &sf->regs.tpc); + if ((ufp + STACK_BIAS) & 0x7) + goto segv; + + err = __get_user(tpc, &sf->regs.tpc); err |= __get_user(tnpc, &sf->regs.tnpc); if (test_thread_flag(TIF_32BIT)) { tpc &= 0xffffffff; @@ -308,14 +325,6 @@ segv: force_sig(SIGSEGV, current); } -/* Checks if the fp is valid */ -static int invalid_frame_pointer(void __user *fp) -{ - if (((unsigned long) fp) & 15) - return 1; - return 0; -} - static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c index 0f6eebe71e6c..e5fe8cef9a69 100644 --- a/arch/sparc/kernel/sigutil_32.c +++ b/arch/sparc/kernel/sigutil_32.c @@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err; + + if (((unsigned long) fpu) & 3) + return -EFAULT; + #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) regs->psr &= ~PSR_EF; @@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) struct thread_info *t = current_thread_info(); int i, wsaved, err; - __get_user(wsaved, &rp->wsaved); + if (((unsigned long) rp) & 3) + return -EFAULT; + + get_user(wsaved, &rp->wsaved); if (wsaved > NSWINS) return -EFAULT; diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c index 387834a9c56a..36aadcbeac69 100644 --- a/arch/sparc/kernel/sigutil_64.c +++ b/arch/sparc/kernel/sigutil_64.c @@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) unsigned long fprs; int err; - err = __get_user(fprs, &fpu->si_fprs); + if (((unsigned long) fpu) & 7) + return -EFAULT; + + err = get_user(fprs, &fpu->si_fprs); fprs_write(0); regs->tstate &= ~TSTATE_PEF; if (fprs & FPRS_DL) @@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) struct thread_info *t = current_thread_info(); int i, wsaved, err; - __get_user(wsaved, &rp->wsaved); + if (((unsigned long) rp) & 7) + return -EFAULT; + + get_user(wsaved, &rp->wsaved); if (wsaved > NSWINS) return -EFAULT; diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S new file mode 100644 index 000000000000..5604a2b051d4 --- /dev/null +++ b/arch/sparc/kernel/urtt_fill.S @@ -0,0 +1,98 @@ +#include <asm/thread_info.h> +#include <asm/trap_block.h> +#include <asm/spitfire.h> +#include <asm/ptrace.h> +#include <asm/head.h> + + .text + .align 8 + .globl user_rtt_fill_fixup_common +user_rtt_fill_fixup_common: + rdpr %cwp, %g1 + add %g1, 1, %g1 + wrpr %g1, 0x0, %cwp + + rdpr %wstate, %g2 + sll %g2, 3, %g2 + wrpr %g2, 0x0, %wstate + + /* We know %canrestore and %otherwin are both zero. */ + + sethi %hi(sparc64_kern_pri_context), %g2 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 + mov PRIMARY_CONTEXT, %g1 + +661: stxa %g2, [%g1] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %g2, [%g1] ASI_MMU + .previous + + sethi %hi(KERNBASE), %g1 + flush %g1 + + mov %g4, %l4 + mov %g5, %l5 + brnz,pn %g3, 1f + mov %g3, %l3 + + or %g4, FAULT_CODE_WINFIXUP, %g4 + stb %g4, [%g6 + TI_FAULT_CODE] + stx %g5, [%g6 + TI_FAULT_ADDR] +1: + mov %g6, %l1 + wrpr %g0, 0x0, %tl + +661: nop + .section .sun4v_1insn_patch, "ax" + .word 661b + SET_GL(0) + .previous + + wrpr %g0, RTRAP_PSTATE, %pstate + + mov %l1, %g6 + ldx [%g6 + TI_TASK], %g4 + LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) + + brnz,pn %l3, 1f + nop + + call do_sparc64_fault + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap + nop + +1: cmp %g3, 2 + bne,pn %xcc, 2f + nop + + sethi %hi(tlb_type), %g1 + lduw [%g1 + %lo(tlb_type)], %g1 + cmp %g1, 3 + bne,pt %icc, 1f + add %sp, PTREGS_OFF, %o0 + mov %l4, %o2 + call sun4v_do_mna + mov %l5, %o1 + ba,a,pt %xcc, rtrap +1: mov %l4, %o1 + mov %l5, %o2 + call mem_address_unaligned + nop + ba,a,pt %xcc, rtrap + +2: sethi %hi(tlb_type), %g1 + mov %l4, %o1 + lduw [%g1 + %lo(tlb_type)], %g1 + mov %l5, %o2 + cmp %g1, 3 + bne,pt %icc, 1f + add %sp, PTREGS_OFF, %o0 + call sun4v_data_access_exception + nop + ba,a,pt %xcc, rtrap + +1: call spitfire_data_access_exception + nop + ba,a,pt %xcc, rtrap diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 652683cb4b4b..14bb0d5ed3c6 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2824,9 +2824,10 @@ void hugetlb_setup(struct pt_regs *regs) * the Data-TLB for huge pages. */ if (tlb_type == cheetah_plus) { + bool need_context_reload = false; unsigned long ctx; - spin_lock(&ctx_alloc_lock); + spin_lock_irq(&ctx_alloc_lock); ctx = mm->context.sparc64_ctx_val; ctx &= ~CTX_PGSZ_MASK; ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; @@ -2845,9 +2846,12 @@ void hugetlb_setup(struct pt_regs *regs) * also executing in this address space. */ mm->context.sparc64_ctx_val = ctx; - on_each_cpu(context_reload, mm, 0); + need_context_reload = true; } - spin_unlock(&ctx_alloc_lock); + spin_unlock_irq(&ctx_alloc_lock); + + if (need_context_reload) + on_each_cpu(context_reload, mm, 0); } } #endif diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index bca14c899137..757390eb562b 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -223,36 +223,19 @@ static void __init intel_remapping_check(int num, int slot, int func) * despite the efforts of the "RAM buffer" approach, which simply rounds * memory boundaries up to 64M to try to catch space that may decode * as RAM and so is not suitable for MMIO. - * - * And yes, so far on current devices the base addr is always under 4G. */ -static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size) -{ - u32 base; - - /* - * For the PCI IDs in this quirk, the stolen base is always - * in 0x5c, aka the BDSM register (yes that's really what - * it's called). - */ - base = read_pci_config(num, slot, func, 0x5c); - base &= ~((1<<20) - 1); - - return base; -} #define KB(x) ((x) * 1024UL) #define MB(x) (KB (KB (x))) -#define GB(x) (MB (KB (x))) static size_t __init i830_tseg_size(void) { - u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC); + u8 esmramc = read_pci_config_byte(0, 0, 0, I830_ESMRAMC); - if (!(tmp & TSEG_ENABLE)) + if (!(esmramc & TSEG_ENABLE)) return 0; - if (tmp & I830_TSEG_SIZE_1M) + if (esmramc & I830_TSEG_SIZE_1M) return MB(1); else return KB(512); @@ -260,27 +243,26 @@ static size_t __init i830_tseg_size(void) static size_t __init i845_tseg_size(void) { - u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC); + u8 esmramc = read_pci_config_byte(0, 0, 0, I845_ESMRAMC); + u8 tseg_size = esmramc & I845_TSEG_SIZE_MASK; - if (!(tmp & TSEG_ENABLE)) + if (!(esmramc & TSEG_ENABLE)) return 0; - switch (tmp & I845_TSEG_SIZE_MASK) { - case I845_TSEG_SIZE_512K: - return KB(512); - case I845_TSEG_SIZE_1M: - return MB(1); + switch (tseg_size) { + case I845_TSEG_SIZE_512K: return KB(512); + case I845_TSEG_SIZE_1M: return MB(1); default: - WARN_ON(1); - return 0; + WARN(1, "Unknown ESMRAMC value: %x!\n", esmramc); } + return 0; } static size_t __init i85x_tseg_size(void) { - u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC); + u8 esmramc = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC); - if (!(tmp & TSEG_ENABLE)) + if (!(esmramc & TSEG_ENABLE)) return 0; return MB(1); @@ -300,285 +282,287 @@ static size_t __init i85x_mem_size(void) * On 830/845/85x the stolen memory base isn't available in any * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. */ -static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size) +static phys_addr_t __init i830_stolen_base(int num, int slot, int func, + size_t stolen_size) { - return i830_mem_size() - i830_tseg_size() - stolen_size; + return (phys_addr_t)i830_mem_size() - i830_tseg_size() - stolen_size; } -static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size) +static phys_addr_t __init i845_stolen_base(int num, int slot, int func, + size_t stolen_size) { - return i830_mem_size() - i845_tseg_size() - stolen_size; + return (phys_addr_t)i830_mem_size() - i845_tseg_size() - stolen_size; } -static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size) +static phys_addr_t __init i85x_stolen_base(int num, int slot, int func, + size_t stolen_size) { - return i85x_mem_size() - i85x_tseg_size() - stolen_size; + return (phys_addr_t)i85x_mem_size() - i85x_tseg_size() - stolen_size; } -static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size) +static phys_addr_t __init i865_stolen_base(int num, int slot, int func, + size_t stolen_size) { + u16 toud; + /* * FIXME is the graphics stolen memory region * always at TOUD? Ie. is it always the last * one to be allocated by the BIOS? */ - return read_pci_config_16(0, 0, 0, I865_TOUD) << 16; + toud = read_pci_config_16(0, 0, 0, I865_TOUD); + + return (phys_addr_t)toud << 16; +} + +static phys_addr_t __init gen3_stolen_base(int num, int slot, int func, + size_t stolen_size) +{ + u32 bsm; + + /* Almost universally we can find the Graphics Base of Stolen Memory + * at register BSM (0x5c) in the igfx configuration space. On a few + * (desktop) machines this is also mirrored in the bridge device at + * different locations, or in the MCHBAR. + */ + bsm = read_pci_config(num, slot, func, INTEL_BSM); + + return (phys_addr_t)bsm & INTEL_BSM_MASK; } static size_t __init i830_stolen_size(int num, int slot, int func) { - size_t stolen_size; u16 gmch_ctrl; + u16 gms; gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); - - switch (gmch_ctrl & I830_GMCH_GMS_MASK) { - case I830_GMCH_GMS_STOLEN_512: - stolen_size = KB(512); - break; - case I830_GMCH_GMS_STOLEN_1024: - stolen_size = MB(1); - break; - case I830_GMCH_GMS_STOLEN_8192: - stolen_size = MB(8); - break; - case I830_GMCH_GMS_LOCAL: - /* local memory isn't part of the normal address space */ - stolen_size = 0; - break; + gms = gmch_ctrl & I830_GMCH_GMS_MASK; + + switch (gms) { + case I830_GMCH_GMS_STOLEN_512: return KB(512); + case I830_GMCH_GMS_STOLEN_1024: return MB(1); + case I830_GMCH_GMS_STOLEN_8192: return MB(8); + /* local memory isn't part of the normal address space */ + case I830_GMCH_GMS_LOCAL: return 0; default: - return 0; + WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl); } - return stolen_size; + return 0; } static size_t __init gen3_stolen_size(int num, int slot, int func) { - size_t stolen_size; u16 gmch_ctrl; + u16 gms; gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); - - switch (gmch_ctrl & I855_GMCH_GMS_MASK) { - case I855_GMCH_GMS_STOLEN_1M: - stolen_size = MB(1); - break; - case I855_GMCH_GMS_STOLEN_4M: - stolen_size = MB(4); - break; - case I855_GMCH_GMS_STOLEN_8M: - stolen_size = MB(8); - break; - case I855_GMCH_GMS_STOLEN_16M: - stolen_size = MB(16); - break; - case I855_GMCH_GMS_STOLEN_32M: - stolen_size = MB(32); - break; - case I915_GMCH_GMS_STOLEN_48M: - stolen_size = MB(48); - break; - case I915_GMCH_GMS_STOLEN_64M: - stolen_size = MB(64); - break; - case G33_GMCH_GMS_STOLEN_128M: - stolen_size = MB(128); - break; - case G33_GMCH_GMS_STOLEN_256M: - stolen_size = MB(256); - break; - case INTEL_GMCH_GMS_STOLEN_96M: - stolen_size = MB(96); - break; - case INTEL_GMCH_GMS_STOLEN_160M: - stolen_size = MB(160); - break; - case INTEL_GMCH_GMS_STOLEN_224M: - stolen_size = MB(224); - break; - case INTEL_GMCH_GMS_STOLEN_352M: - stolen_size = MB(352); - break; + gms = gmch_ctrl & I855_GMCH_GMS_MASK; + + switch (gms) { + case I855_GMCH_GMS_STOLEN_1M: return MB(1); + case I855_GMCH_GMS_STOLEN_4M: return MB(4); + case I855_GMCH_GMS_STOLEN_8M: return MB(8); + case I855_GMCH_GMS_STOLEN_16M: return MB(16); + case I855_GMCH_GMS_STOLEN_32M: return MB(32); + case I915_GMCH_GMS_STOLEN_48M: return MB(48); + case I915_GMCH_GMS_STOLEN_64M: return MB(64); + case G33_GMCH_GMS_STOLEN_128M: return MB(128); + case G33_GMCH_GMS_STOLEN_256M: return MB(256); + case INTEL_GMCH_GMS_STOLEN_96M: return MB(96); + case INTEL_GMCH_GMS_STOLEN_160M:return MB(160); + case INTEL_GMCH_GMS_STOLEN_224M:return MB(224); + case INTEL_GMCH_GMS_STOLEN_352M:return MB(352); default: - stolen_size = 0; - break; + WARN(1, "Unknown GMCH_CTRL value: %x!\n", gmch_ctrl); } - return stolen_size; + return 0; } static size_t __init gen6_stolen_size(int num, int slot, int func) { u16 gmch_ctrl; + u16 gms; gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); - gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; - gmch_ctrl &= SNB_GMCH_GMS_MASK; + gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK; - return gmch_ctrl << 25; /* 32 MB units */ + return (size_t)gms * MB(32); } static size_t __init gen8_stolen_size(int num, int slot, int func) { u16 gmch_ctrl; + u16 gms; gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); - gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; - gmch_ctrl &= BDW_GMCH_GMS_MASK; - return gmch_ctrl << 25; /* 32 MB units */ + gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK; + + return (size_t)gms * MB(32); } static size_t __init chv_stolen_size(int num, int slot, int func) { u16 gmch_ctrl; + u16 gms; gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); - gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; - gmch_ctrl &= SNB_GMCH_GMS_MASK; + gms = (gmch_ctrl >> SNB_GMCH_GMS_SHIFT) & SNB_GMCH_GMS_MASK; /* * 0x0 to 0x10: 32MB increments starting at 0MB * 0x11 to 0x16: 4MB increments starting at 8MB * 0x17 to 0x1d: 4MB increments start at 36MB */ - if (gmch_ctrl < 0x11) - return gmch_ctrl << 25; - else if (gmch_ctrl < 0x17) - return (gmch_ctrl - 0x11 + 2) << 22; + if (gms < 0x11) + return (size_t)gms * MB(32); + else if (gms < 0x17) + return (size_t)(gms - 0x11 + 2) * MB(4); else - return (gmch_ctrl - 0x17 + 9) << 22; + return (size_t)(gms - 0x17 + 9) * MB(4); } -struct intel_stolen_funcs { - size_t (*size)(int num, int slot, int func); - u32 (*base)(int num, int slot, int func, size_t size); -}; - static size_t __init gen9_stolen_size(int num, int slot, int func) { u16 gmch_ctrl; + u16 gms; gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); - gmch_ctrl >>= BDW_GMCH_GMS_SHIFT; - gmch_ctrl &= BDW_GMCH_GMS_MASK; + gms = (gmch_ctrl >> BDW_GMCH_GMS_SHIFT) & BDW_GMCH_GMS_MASK; - if (gmch_ctrl < 0xf0) - return gmch_ctrl << 25; /* 32 MB units */ + /* 0x0 to 0xef: 32MB increments starting at 0MB */ + /* 0xf0 to 0xfe: 4MB increments starting at 4MB */ + if (gms < 0xf0) + return (size_t)gms * MB(32); else - /* 4MB increments starting at 0xf0 for 4MB */ - return (gmch_ctrl - 0xf0 + 1) << 22; + return (size_t)(gms - 0xf0 + 1) * MB(4); } -typedef size_t (*stolen_size_fn)(int num, int slot, int func); +struct intel_early_ops { + size_t (*stolen_size)(int num, int slot, int func); + phys_addr_t (*stolen_base)(int num, int slot, int func, size_t size); +}; -static const struct intel_stolen_funcs i830_stolen_funcs __initconst = { - .base = i830_stolen_base, - .size = i830_stolen_size, +static const struct intel_early_ops i830_early_ops __initconst = { + .stolen_base = i830_stolen_base, + .stolen_size = i830_stolen_size, }; -static const struct intel_stolen_funcs i845_stolen_funcs __initconst = { - .base = i845_stolen_base, - .size = i830_stolen_size, +static const struct intel_early_ops i845_early_ops __initconst = { + .stolen_base = i845_stolen_base, + .stolen_size = i830_stolen_size, }; -static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = { - .base = i85x_stolen_base, - .size = gen3_stolen_size, +static const struct intel_early_ops i85x_early_ops __initconst = { + .stolen_base = i85x_stolen_base, + .stolen_size = gen3_stolen_size, }; -static const struct intel_stolen_funcs i865_stolen_funcs __initconst = { - .base = i865_stolen_base, - .size = gen3_stolen_size, +static const struct intel_early_ops i865_early_ops __initconst = { + .stolen_base = i865_stolen_base, + .stolen_size = gen3_stolen_size, }; -static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = { - .base = intel_stolen_base, - .size = gen3_stolen_size, +static const struct intel_early_ops gen3_early_ops __initconst = { + .stolen_base = gen3_stolen_base, + .stolen_size = gen3_stolen_size, }; -static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = { - .base = intel_stolen_base, - .size = gen6_stolen_size, +static const struct intel_early_ops gen6_early_ops __initconst = { + .stolen_base = gen3_stolen_base, + .stolen_size = gen6_stolen_size, }; -static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = { - .base = intel_stolen_base, - .size = gen8_stolen_size, +static const struct intel_early_ops gen8_early_ops __initconst = { + .stolen_base = gen3_stolen_base, + .stolen_size = gen8_stolen_size, }; -static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = { - .base = intel_stolen_base, - .size = gen9_stolen_size, +static const struct intel_early_ops gen9_early_ops __initconst = { + .stolen_base = gen3_stolen_base, + .stolen_size = gen9_stolen_size, }; -static const struct intel_stolen_funcs chv_stolen_funcs __initconst = { - .base = intel_stolen_base, - .size = chv_stolen_size, +static const struct intel_early_ops chv_early_ops __initconst = { + .stolen_base = gen3_stolen_base, + .stolen_size = chv_stolen_size, }; -static const struct pci_device_id intel_stolen_ids[] __initconst = { - INTEL_I830_IDS(&i830_stolen_funcs), - INTEL_I845G_IDS(&i845_stolen_funcs), - INTEL_I85X_IDS(&i85x_stolen_funcs), - INTEL_I865G_IDS(&i865_stolen_funcs), - INTEL_I915G_IDS(&gen3_stolen_funcs), - INTEL_I915GM_IDS(&gen3_stolen_funcs), - INTEL_I945G_IDS(&gen3_stolen_funcs), - INTEL_I945GM_IDS(&gen3_stolen_funcs), - INTEL_VLV_M_IDS(&gen6_stolen_funcs), - INTEL_VLV_D_IDS(&gen6_stolen_funcs), - INTEL_PINEVIEW_IDS(&gen3_stolen_funcs), - INTEL_I965G_IDS(&gen3_stolen_funcs), - INTEL_G33_IDS(&gen3_stolen_funcs), - INTEL_I965GM_IDS(&gen3_stolen_funcs), - INTEL_GM45_IDS(&gen3_stolen_funcs), - INTEL_G45_IDS(&gen3_stolen_funcs), - INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs), - INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs), - INTEL_SNB_D_IDS(&gen6_stolen_funcs), - INTEL_SNB_M_IDS(&gen6_stolen_funcs), - INTEL_IVB_M_IDS(&gen6_stolen_funcs), - INTEL_IVB_D_IDS(&gen6_stolen_funcs), - INTEL_HSW_D_IDS(&gen6_stolen_funcs), - INTEL_HSW_M_IDS(&gen6_stolen_funcs), - INTEL_BDW_M_IDS(&gen8_stolen_funcs), - INTEL_BDW_D_IDS(&gen8_stolen_funcs), - INTEL_CHV_IDS(&chv_stolen_funcs), - INTEL_SKL_IDS(&gen9_stolen_funcs), - INTEL_BXT_IDS(&gen9_stolen_funcs), - INTEL_KBL_IDS(&gen9_stolen_funcs), +static const struct pci_device_id intel_early_ids[] __initconst = { + INTEL_I830_IDS(&i830_early_ops), + INTEL_I845G_IDS(&i845_early_ops), + INTEL_I85X_IDS(&i85x_early_ops), + INTEL_I865G_IDS(&i865_early_ops), + INTEL_I915G_IDS(&gen3_early_ops), + INTEL_I915GM_IDS(&gen3_early_ops), + INTEL_I945G_IDS(&gen3_early_ops), + INTEL_I945GM_IDS(&gen3_early_ops), + INTEL_VLV_M_IDS(&gen6_early_ops), + INTEL_VLV_D_IDS(&gen6_early_ops), + INTEL_PINEVIEW_IDS(&gen3_early_ops), + INTEL_I965G_IDS(&gen3_early_ops), + INTEL_G33_IDS(&gen3_early_ops), + INTEL_I965GM_IDS(&gen3_early_ops), + INTEL_GM45_IDS(&gen3_early_ops), + INTEL_G45_IDS(&gen3_early_ops), + INTEL_IRONLAKE_D_IDS(&gen3_early_ops), + INTEL_IRONLAKE_M_IDS(&gen3_early_ops), + INTEL_SNB_D_IDS(&gen6_early_ops), + INTEL_SNB_M_IDS(&gen6_early_ops), + INTEL_IVB_M_IDS(&gen6_early_ops), + INTEL_IVB_D_IDS(&gen6_early_ops), + INTEL_HSW_D_IDS(&gen6_early_ops), + INTEL_HSW_M_IDS(&gen6_early_ops), + INTEL_BDW_M_IDS(&gen8_early_ops), + INTEL_BDW_D_IDS(&gen8_early_ops), + INTEL_CHV_IDS(&chv_early_ops), + INTEL_SKL_IDS(&gen9_early_ops), + INTEL_BXT_IDS(&gen9_early_ops), + INTEL_KBL_IDS(&gen9_early_ops), }; -static void __init intel_graphics_stolen(int num, int slot, int func) +static void __init +intel_graphics_stolen(int num, int slot, int func, + const struct intel_early_ops *early_ops) { + phys_addr_t base, end; size_t size; + + size = early_ops->stolen_size(num, slot, func); + base = early_ops->stolen_base(num, slot, func, size); + + if (!size || !base) + return; + + end = base + size - 1; + printk(KERN_INFO "Reserving Intel graphics memory at %pa-%pa\n", + &base, &end); + + /* Mark this space as reserved */ + e820_add_region(base, size, E820_RESERVED); + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); +} + +static void __init intel_graphics_quirks(int num, int slot, int func) +{ + const struct intel_early_ops *early_ops; + u16 device; int i; - u32 start; - u16 device, subvendor, subdevice; device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); - subvendor = read_pci_config_16(num, slot, func, - PCI_SUBSYSTEM_VENDOR_ID); - subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID); - - for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { - if (intel_stolen_ids[i].device == device) { - const struct intel_stolen_funcs *stolen_funcs = - (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data; - size = stolen_funcs->size(num, slot, func); - start = stolen_funcs->base(num, slot, func, size); - if (size && start) { - printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n", - start, start + (u32)size - 1); - /* Mark this space as reserved */ - e820_add_region(start, size, E820_RESERVED); - sanitize_e820_map(e820.map, - ARRAY_SIZE(e820.map), - &e820.nr_map); - } - return; - } + + for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) { + kernel_ulong_t driver_data = intel_early_ids[i].driver_data; + + if (intel_early_ids[i].device != device) + continue; + + early_ops = (typeof(early_ops))driver_data; + + intel_graphics_stolen(num, slot, func, early_ops); + + return; } } @@ -627,7 +611,7 @@ static struct chipset early_qrk[] __initdata = { { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, - QFLAG_APPLY_ONCE, intel_graphics_stolen }, + QFLAG_APPLY_ONCE, intel_graphics_quirks }, /* * HPET on the current version of the Baytrail platform has accuracy * problems: it will halt in deep idle state - so we disable it. diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 769af907f824..7597b42a8a88 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry __user *entries) { int r, i; - struct kvm_cpuid_entry *cpuid_entries; + struct kvm_cpuid_entry *cpuid_entries = NULL; r = -E2BIG; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) goto out; r = -ENOMEM; - cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); - if (!cpuid_entries) - goto out; - r = -EFAULT; - if (copy_from_user(cpuid_entries, entries, - cpuid->nent * sizeof(struct kvm_cpuid_entry))) - goto out_free; + if (cpuid->nent) { + cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * + cpuid->nent); + if (!cpuid_entries) + goto out; + r = -EFAULT; + if (copy_from_user(cpuid_entries, entries, + cpuid->nent * sizeof(struct kvm_cpuid_entry))) + goto out; + } for (i = 0; i < cpuid->nent; i++) { vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; @@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, kvm_x86_ops->cpuid_update(vcpu); r = kvm_update_cpuid(vcpu); -out_free: - vfree(cpuid_entries); out: + vfree(cpuid_entries); return r; } diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 24e800116ab4..def97b3a392b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte) #ifdef CONFIG_X86_64 static void __set_spte(u64 *sptep, u64 spte) { - *sptep = spte; + WRITE_ONCE(*sptep, spte); } static void __update_clear_spte_fast(u64 *sptep, u64 spte) { - *sptep = spte; + WRITE_ONCE(*sptep, spte); } static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) @@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte) */ smp_wmb(); - ssptep->spte_low = sspte.spte_low; + WRITE_ONCE(ssptep->spte_low, sspte.spte_low); } static void __update_clear_spte_fast(u64 *sptep, u64 spte) @@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte) ssptep = (union split_spte *)sptep; sspte = (union split_spte)spte; - ssptep->spte_low = sspte.spte_low; + WRITE_ONCE(ssptep->spte_low, sspte.spte_low); /* * If we map the spte from present to nonpresent, we should clear diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c805cf494154..902d9da12392 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: case MSR_AMD64_BU_CFG2: + case MSR_IA32_PERF_CTL: msr_info->data = 0; break; case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: @@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | KVM_VCPUEVENT_VALID_SMM)) return -EINVAL; + if (events->exception.injected && + (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) + return -EINVAL; + process_nmi(vcpu); vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; @@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, if (dbgregs->flags) return -EINVAL; + if (dbgregs->dr6 & ~0xffffffffull) + return -EINVAL; + if (dbgregs->dr7 & ~0xffffffffull) + return -EINVAL; + memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); kvm_update_dr0123(vcpu); vcpu->arch.dr6 = dbgregs->dr6; @@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) slot = id_to_memslot(slots, id); if (size) { - if (WARN_ON(slot->npages)) + if (slot->npages) return -EEXIST; /* diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index e28e912000a7..331f6baf2df8 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig @@ -13,6 +13,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE tristate "Asymmetric public-key crypto algorithm subtype" select MPILIB select CRYPTO_HASH_INFO + select CRYPTO_AKCIPHER help This option provides support for asymmetric public key type handling. If signature generation and/or verification are to be used, diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 0d92d0f915e9..c7ba948d253c 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device) pr->throttling.duty_width = acpi_gbl_FADT.duty_width; pr->pblk = object.processor.pblk_address; - - /* - * We don't care about error returns - we just try to mark - * these reserved so that nobody else is confused into thinking - * that this region might be unused.. - * - * (In particular, allocating the IO range for Cardbus) - */ - request_region(pr->throttling.address, 6, "ACPI CPU throttle"); } /* diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 3d5b8a099351..c1d138e128cb 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device, } int acpi_video_get_levels(struct acpi_device *device, - struct acpi_video_device_brightness **dev_br) + struct acpi_video_device_brightness **dev_br, + int *pmax_level) { union acpi_object *obj = NULL; int i, max_level = 0, count = 0, level_ac_battery = 0; @@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device, br->count = count; *dev_br = br; + if (pmax_level) + *pmax_level = max_level; out: kfree(obj); @@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device) struct acpi_video_device_brightness *br = NULL; int result = -EINVAL; - result = acpi_video_get_levels(device->dev, &br); + result = acpi_video_get_levels(device->dev, &br, &max_level); if (result) return result; device->brightness = br; @@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video) mutex_lock(&video->device_list_lock); list_for_each_entry(dev, &video->video_device_list, entry) { - if (!acpi_video_device_lcd_query_levels(dev, &levels)) + if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels)) kfree(levels); } mutex_unlock(&video->device_list_lock); diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 0f18dbc9a37f..daceb80022b0 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value, static u8 acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) { - u64 address; - if (!reg->access_width) { + if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + max_bit_width = 32; + } + /* * Detect old register descriptors where only the bit_width field - * makes senses. The target address is copied to handle possible - * alignment issues. + * makes senses. */ - ACPI_MOVE_64_TO_64(&address, ®->address); - if (!reg->bit_offset && reg->bit_width && + if (reg->bit_width < max_bit_width && + !reg->bit_offset && reg->bit_width && ACPI_IS_POWER_OF_TWO(reg->bit_width) && - ACPI_IS_ALIGNED(reg->bit_width, 8) && - ACPI_IS_ALIGNED(address, reg->bit_width)) { + ACPI_IS_ALIGNED(reg->bit_width, 8)) { return (reg->bit_width); - } else { - if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { - return (32); - } else { - return (max_bit_width); - } } + return (max_bit_width); } else { return (1 << (reg->access_width + 2)); } diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index f170d746336d..c72e64893d03 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) if (!pr->flags.throttling) return -ENODEV; + /* + * We don't care about error returns - we just try to mark + * these reserved so that nobody else is confused into thinking + * that this region might be unused.. + * + * (In particular, allocating the IO range for Cardbus) + */ + request_region(pr->throttling.address, 6, "ACPI CPU throttle"); + pr->throttling.state = 0; duty_mask = pr->throttling.state_count - 1; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index a969a7e443be..85aaf2222587 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -181,13 +181,17 @@ static char *res_strings[] = { "reserved 27", "reserved 28", "reserved 29", - "reserved 30", + "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */ "reassembly abort: no buffers", "receive buffer overflow", "change in GFC", "receive buffer full", "low priority discard - no receive descriptor", "low priority discard - missing end of packet", + "reserved 37", + "reserved 38", + "reserved 39", + "reseverd 40", "reserved 41", "reserved 42", "reserved 43", diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 7d00f2994738..809dd1e02091 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev) /* make the ptr point to the corresponding buffer desc entry */ buf_desc_ptr += desc; if (!desc || (desc > iadev->num_rx_desc) || - ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { + ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) { free_desc(dev, desc); IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) return -1; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 36bc11a106aa..9009295f5134 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, unsigned int target_freq) { - clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, policy->min, policy->max); return cpufreq_driver->fast_switch(policy, target_freq); } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3a9c4325d6e2..0d159b513469 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -449,7 +449,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) cpu->acpi_perf_data.states[0].core_frequency = policy->cpuinfo.max_freq / 1000; cpu->valid_pss_table = true; - pr_info("_PPC limits will be enforced\n"); + pr_debug("_PPC limits will be enforced\n"); return; diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 52c7395cb8d8..0d0d4529ee36 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c @@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); unsigned int unit; + u32 unit_size; int ret; if (!ctx->u.aes.key_len) @@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, if (!req->info) return -EINVAL; - for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) - if (!(req->nbytes & (unit_size_map[unit].size - 1))) - break; + unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; + if (req->nbytes <= unit_size_map[0].size) { + for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) { + if (!(req->nbytes & (unit_size_map[unit].size - 1))) { + unit_size = unit_size_map[unit].value; + break; + } + } + } - if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || + if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) || (ctx->u.aes.key_len != AES_KEYSIZE_128)) { /* Use the fallback to process the request for any * unsupported unit sizes or key sizes @@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; - rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; + rctx->cmd.u.xts.unit_size = unit_size; rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; rctx->cmd.u.xts.iv = &rctx->iv_sg; diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 6eefaa2fe58f..63464e86f2b1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -1986,7 +1986,7 @@ err_algs: &dd->pdata->algs_info[i].algs_list[j]); err_pm: pm_runtime_disable(dev); - if (dd->polling_mode) + if (!dd->polling_mode) dma_release_channel(dd->dma_lch); data_err: dev_err(dev, "initialization failed.\n"); diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 4a424eca75ed..f353db213a81 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,2 +1,2 @@ -obj-y := dma-buf.o fence.o reservation.o seqno-fence.o +obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o obj-$(CONFIG_SYNC_FILE) += sync_file.o diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 4a2c07ee6677..6355ab38d630 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -33,6 +33,7 @@ #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/reservation.h> +#include <linux/mm.h> #include <uapi/linux/dma-buf.h> @@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) dmabuf = file->private_data; /* check for overflowing the buffer's size */ - if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > + if (vma->vm_pgoff + vma_pages(vma) > dmabuf->size >> PAGE_SHIFT) return -EINVAL; @@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, return -EINVAL; /* check for offset overflow */ - if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) + if (pgoff + vma_pages(vma) < pgoff) return -EOVERFLOW; /* check for overflowing the buffer's size */ - if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > + if (pgoff + vma_pages(vma) > dmabuf->size >> PAGE_SHIFT) return -EINVAL; diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c new file mode 100644 index 000000000000..a8731c853da6 --- /dev/null +++ b/drivers/dma-buf/fence-array.c @@ -0,0 +1,144 @@ +/* + * fence-array: aggregate fences to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan <gustavo@padovan.org> + * Christian König <christian.koenig@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/fence-array.h> + +static void fence_array_cb_func(struct fence *f, struct fence_cb *cb); + +static const char *fence_array_get_driver_name(struct fence *fence) +{ + return "fence_array"; +} + +static const char *fence_array_get_timeline_name(struct fence *fence) +{ + return "unbound"; +} + +static void fence_array_cb_func(struct fence *f, struct fence_cb *cb) +{ + struct fence_array_cb *array_cb = + container_of(cb, struct fence_array_cb, cb); + struct fence_array *array = array_cb->array; + + if (atomic_dec_and_test(&array->num_pending)) + fence_signal(&array->base); + fence_put(&array->base); +} + +static bool fence_array_enable_signaling(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + struct fence_array_cb *cb = (void *)(&array[1]); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) { + cb[i].array = array; + /* + * As we may report that the fence is signaled before all + * callbacks are complete, we need to take an additional + * reference count on the array so that we do not free it too + * early. The core fence handling will only hold the reference + * until we signal the array as complete (but that is now + * insufficient). + */ + fence_get(&array->base); + if (fence_add_callback(array->fences[i], &cb[i].cb, + fence_array_cb_func)) { + fence_put(&array->base); + if (atomic_dec_and_test(&array->num_pending)) + return false; + } + } + + return true; +} + +static bool fence_array_signaled(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + + return atomic_read(&array->num_pending) <= 0; +} + +static void fence_array_release(struct fence *fence) +{ + struct fence_array *array = to_fence_array(fence); + unsigned i; + + for (i = 0; i < array->num_fences; ++i) + fence_put(array->fences[i]); + + kfree(array->fences); + fence_free(fence); +} + +const struct fence_ops fence_array_ops = { + .get_driver_name = fence_array_get_driver_name, + .get_timeline_name = fence_array_get_timeline_name, + .enable_signaling = fence_array_enable_signaling, + .signaled = fence_array_signaled, + .wait = fence_default_wait, + .release = fence_array_release, +}; + +/** + * fence_array_create - Create a custom fence array + * @num_fences: [in] number of fences to add in the array + * @fences: [in] array containing the fences + * @context: [in] fence context to use + * @seqno: [in] sequence number to use + * @signal_on_any [in] signal on any fence in the array + * + * Allocate a fence_array object and initialize the base fence with fence_init(). + * In case of error it returns NULL. + * + * The caller should allocte the fences array with num_fences size + * and fill it with the fences it wants to add to the object. Ownership of this + * array is take and fence_put() is used on each fence on release. + * + * If @signal_on_any is true the fence array signals if any fence in the array + * signals, otherwise it signals when all fences in the array signal. + */ +struct fence_array *fence_array_create(int num_fences, struct fence **fences, + u64 context, unsigned seqno, + bool signal_on_any) +{ + struct fence_array *array; + size_t size = sizeof(*array); + + /* Allocate the callback structures behind the array. */ + size += num_fences * sizeof(struct fence_array_cb); + array = kzalloc(size, GFP_KERNEL); + if (!array) + return NULL; + + spin_lock_init(&array->lock); + fence_init(&array->base, &fence_array_ops, &array->lock, + context, seqno); + + array->num_fences = num_fences; + atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences); + array->fences = fences; + + return array; +} +EXPORT_SYMBOL(fence_array_create); diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c index 7b05dbe9b296..4d51f9e83fa8 100644 --- a/drivers/dma-buf/fence.c +++ b/drivers/dma-buf/fence.c @@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit); * context or not. One device can have multiple separate contexts, * and they're used if some engine can run independently of another. */ -static atomic_t fence_context_counter = ATOMIC_INIT(0); +static atomic64_t fence_context_counter = ATOMIC64_INIT(0); /** * fence_context_alloc - allocate an array of fence contexts @@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0); * This function will return the first index of the number of fences allocated. * The fence context is used for setting fence->context to a unique number. */ -unsigned fence_context_alloc(unsigned num) +u64 fence_context_alloc(unsigned num) { BUG_ON(!num); - return atomic_add_return(num, &fence_context_counter) - num; + return atomic64_add_return(num, &fence_context_counter) - num; } EXPORT_SYMBOL(fence_context_alloc); @@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout); */ void fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, unsigned context, unsigned seqno) + spinlock_t *lock, u64 context, unsigned seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->wait || !ops->enable_signaling || diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index c0bd5722c997..9566a62ad8e3 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c @@ -35,6 +35,17 @@ #include <linux/reservation.h> #include <linux/export.h> +/** + * DOC: Reservation Object Overview + * + * The reservation object provides a mechanism to manage shared and + * exclusive fences associated with a buffer. A reservation object + * can have attached one exclusive fence (normally associated with + * write operations) or N shared fences (read operations). The RCU + * mechanism is used to protect read access to fences from locked + * write-side updates. + */ + DEFINE_WW_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); @@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class); const char reservation_seqcount_string[] = "reservation_seqcount"; EXPORT_SYMBOL(reservation_seqcount_string); -/* - * Reserve space to add a shared fence to a reservation_object, - * must be called with obj->lock held. + +/** + * reservation_object_reserve_shared - Reserve space to add a shared + * fence to a reservation_object. + * @obj: reservation object + * + * Should be called before reservation_object_add_shared_fence(). Must + * be called with obj->lock held. + * + * RETURNS + * Zero for success, or -errno */ int reservation_object_reserve_shared(struct reservation_object *obj) { @@ -180,7 +199,11 @@ done: fence_put(old_fence); } -/* +/** + * reservation_object_add_shared_fence - Add a fence to a shared slot + * @obj: the reservation object + * @fence: the shared fence to add + * * Add a fence to a shared slot, obj->lock must be held, and * reservation_object_reserve_shared_fence has been called. */ @@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, } EXPORT_SYMBOL(reservation_object_add_shared_fence); +/** + * reservation_object_add_excl_fence - Add an exclusive fence. + * @obj: the reservation object + * @fence: the shared fence to add + * + * Add a fence to the exclusive slot. The obj->lock must be held. + */ void reservation_object_add_excl_fence(struct reservation_object *obj, struct fence *fence) { @@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, } EXPORT_SYMBOL(reservation_object_add_excl_fence); +/** + * reservation_object_get_fences_rcu - Get an object's shared and exclusive + * fences without update side lock held + * @obj: the reservation object + * @pfence_excl: the returned exclusive fence (or NULL) + * @pshared_count: the number of shared fences returned + * @pshared: the array of shared fence ptrs returned (array is krealloc'd to + * the required size, and must be freed by caller) + * + * RETURNS + * Zero or -errno + */ int reservation_object_get_fences_rcu(struct reservation_object *obj, struct fence **pfence_excl, unsigned *pshared_count, @@ -319,6 +361,18 @@ unlock: } EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); +/** + * reservation_object_wait_timeout_rcu - Wait on reservation's objects + * shared and/or exclusive fences. + * @obj: the reservation object + * @wait_all: if true, wait on all fences, else wait on just exclusive fence + * @intr: if true, do interruptible wait + * @timeout: timeout value in jiffies or zero to return immediately + * + * RETURNS + * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or + * greater than zer on success. + */ long reservation_object_wait_timeout_rcu(struct reservation_object *obj, bool wait_all, bool intr, unsigned long timeout) @@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence) return ret; } +/** + * reservation_object_test_signaled_rcu - Test if a reservation object's + * fences have been signaled. + * @obj: the reservation object + * @test_all: if true, test all fences, otherwise only test the exclusive + * fence + * + * RETURNS + * true if all fences signaled, else false + */ bool reservation_object_test_signaled_rcu(struct reservation_object *obj, bool test_all) { diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index f08cf2d8309e..9aaa608dfe01 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence) sync_file->num_fences = 1; atomic_set(&sync_file->status, 1); - snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d", + snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, fence->seqno); diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index d39014daeef9..fc5f197906ac 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c @@ -29,7 +29,6 @@ #include <mach/hardware.h> #include <mach/platform.h> -#include <mach/irqs.h> #define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) #define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) @@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin) static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) { - return IRQ_LPC32XX_P0_P1_IRQ; + return -ENXIO; } -static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = { - IRQ_LPC32XX_GPIO_00, - IRQ_LPC32XX_GPIO_01, - IRQ_LPC32XX_GPIO_02, - IRQ_LPC32XX_GPIO_03, - IRQ_LPC32XX_GPIO_04, - IRQ_LPC32XX_GPIO_05, -}; - static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) { - if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table)) - return lpc32xx_gpio_to_irq_gpio_p3_table[offset]; return -ENXIO; } -static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = { - IRQ_LPC32XX_GPI_00, - IRQ_LPC32XX_GPI_01, - IRQ_LPC32XX_GPI_02, - IRQ_LPC32XX_GPI_03, - IRQ_LPC32XX_GPI_04, - IRQ_LPC32XX_GPI_05, - IRQ_LPC32XX_GPI_06, - IRQ_LPC32XX_GPI_07, - IRQ_LPC32XX_GPI_08, - IRQ_LPC32XX_GPI_09, - -ENXIO, /* 10 */ - -ENXIO, /* 11 */ - -ENXIO, /* 12 */ - -ENXIO, /* 13 */ - -ENXIO, /* 14 */ - -ENXIO, /* 15 */ - -ENXIO, /* 16 */ - -ENXIO, /* 17 */ - -ENXIO, /* 18 */ - IRQ_LPC32XX_GPI_19, - -ENXIO, /* 20 */ - -ENXIO, /* 21 */ - -ENXIO, /* 22 */ - -ENXIO, /* 23 */ - -ENXIO, /* 24 */ - -ENXIO, /* 25 */ - -ENXIO, /* 26 */ - -ENXIO, /* 27 */ - IRQ_LPC32XX_GPI_28, -}; - static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) { - if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table)) - return lpc32xx_gpio_to_irq_gpi_p3_table[offset]; return -ENXIO; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index d407f904a31c..24f60d28f0c0 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -20,6 +20,7 @@ #include <linux/cdev.h> #include <linux/fs.h> #include <linux/uaccess.h> +#include <linux/compat.h> #include <uapi/linux/gpio.h> #include "gpiolib.h" @@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct gpio_device *gdev = filp->private_data; struct gpio_chip *chip = gdev->chip; - int __user *ip = (int __user *)arg; + void __user *ip = (void __user *)arg; /* We fail any subsequent ioctl():s when the chip is gone */ if (!chip) @@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EINVAL; } +#ifdef CONFIG_COMPAT +static long gpio_ioctl_compat(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + /** * gpio_chrdev_open() - open the chardev for ioctl operations * @inode: inode for this chardev @@ -431,7 +440,9 @@ static const struct file_operations gpio_fileops = { .owner = THIS_MODULE, .llseek = noop_llseek, .unlocked_ioctl = gpio_ioctl, - .compat_ioctl = gpio_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = gpio_ioctl_compat, +#endif }; static void gpiodevice_release(struct device *dev) @@ -618,6 +629,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) goto err_free_label; } + spin_unlock_irqrestore(&gpio_lock, flags); + for (i = 0; i < chip->ngpio; i++) { struct gpio_desc *desc = &gdev->descs[i]; @@ -649,8 +662,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) } } - spin_unlock_irqrestore(&gpio_lock, flags); - #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&gdev->pin_ranges); #endif @@ -1356,10 +1367,13 @@ done: /* * This descriptor validation needs to be inserted verbatim into each * function taking a descriptor, so we need to use a preprocessor - * macro to avoid endless duplication. + * macro to avoid endless duplication. If the desc is NULL it is an + * optional GPIO and calls should just bail out. */ #define VALIDATE_DESC(desc) do { \ - if (!desc || !desc->gdev) { \ + if (!desc) \ + return 0; \ + if (!desc->gdev) { \ pr_warn("%s: invalid GPIO\n", __func__); \ return -EINVAL; \ } \ @@ -1370,7 +1384,9 @@ done: } } while (0) #define VALIDATE_DESC_VOID(desc) do { \ - if (!desc || !desc->gdev) { \ + if (!desc) \ + return; \ + if (!desc->gdev) { \ pr_warn("%s: invalid GPIO\n", __func__); \ return; \ } \ @@ -2066,17 +2082,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq); */ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) { - if (offset >= chip->ngpio) - return -EINVAL; + struct gpio_desc *desc; + + desc = gpiochip_get_desc(chip, offset); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + /* Flush direction if something changed behind our back */ + if (chip->get_direction) { + int dir = chip->get_direction(chip, offset); + + if (dir) + clear_bit(FLAG_IS_OUT, &desc->flags); + else + set_bit(FLAG_IS_OUT, &desc->flags); + } - if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) { + if (test_bit(FLAG_IS_OUT, &desc->flags)) { chip_err(chip, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); return -EIO; } - set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags); + set_bit(FLAG_USED_AS_IRQ, &desc->flags); return 0; } EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index be43afb08c69..e3dba6f44a79 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_lock.o drm_memory.o drm_drv.o drm_vm.o \ drm_scatter.o drm_pci.o \ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ - drm_crtc.o drm_modes.o drm_edid.o \ + drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \ drm_info.o drm_debugfs.o drm_encoder_slave.o \ drm_trace_points.o drm_global.o drm_prime.o \ drm_rect.o drm_vma_manager.o drm_flip_work.o \ @@ -23,7 +23,8 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ - drm_kms_helper_common.o drm_dp_dual_mode_helper.o + drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ + drm_simple_kms_helper.o drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 992f00b65be4..da3d02154fa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -2032,7 +2032,7 @@ struct amdgpu_device { struct amdgpu_irq_src hpd_irq; /* rings */ - unsigned fence_context; + u64 fence_context; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; bool ib_pool_ready; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b0832da2ef7e..a6eecf6f9065 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -240,7 +240,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, work->base = base; - r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id); + r = drm_crtc_vblank_get(crtc); if (r) { DRM_ERROR("failed to get vblank before flip\n"); goto pflip_cleanup; @@ -268,7 +268,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, return 0; vblank_cleanup: - drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(crtc); pflip_cleanup: if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8bf84efafb04..b16366c2b4a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -427,7 +427,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, soffset, eoffset, eoffset - soffset); if (i->fence) - seq_printf(m, " protected by 0x%08x on context %d", + seq_printf(m, " protected by 0x%08x on context %llu", i->fence->seqno, i->fence->context); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 8227344d2ff6..c1b04e9aab57 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v10_0_crtc_load_lut(crtc); + + return 0; } static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) @@ -2717,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v10_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index af26ec0bc59d..c90408bc0fde 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2678,19 +2678,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v11_0_crtc_load_lut(crtc); + + return 0; } static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) @@ -2728,13 +2730,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v11_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3433,7 +3435,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 3fb65e41a6ef..300ff4aab0fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2574,19 +2574,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc) } } -static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { amdgpu_crtc->lut_r[i] = red[i] >> 6; amdgpu_crtc->lut_g[i] = green[i] >> 6; amdgpu_crtc->lut_b[i] = blue[i] >> 6; } dce_v8_0_crtc_load_lut(crtc); + + return 0; } static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) @@ -2624,13 +2626,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type); - drm_vblank_on(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_on(crtc); dce_v8_0_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (amdgpu_crtc->enabled) { dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); @@ -3376,7 +3378,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); + drm_crtc_vblank_put(&amdgpu_crtc->base); schedule_work(&works->unpin_work); return 0; diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h index 86574b698a78..8c01a25d279a 100644 --- a/drivers/gpu/drm/arc/arcpgu.h +++ b/drivers/gpu/drm/arc/arcpgu.h @@ -22,7 +22,6 @@ struct arcpgu_drm_private { struct clk *clk; struct drm_fbdev_cma *fbdev; struct drm_framebuffer *fb; - struct list_head event_list; struct drm_crtc crtc; struct drm_plane *plane; }; diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 92f8beff8e60..ee0a61c2861b 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -145,20 +145,14 @@ static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc, static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); - unsigned long flags; - - if (crtc->state->event) { - struct drm_pending_vblank_event *event = crtc->state->event; + struct drm_pending_vblank_event *event = crtc->state->event; + if (event) { crtc->state->event = NULL; - event->pipe = drm_crtc_index(crtc); - - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - list_add_tail(&event->base.link, &arcpgu->event_list); - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); } } diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index 76e187a5bde0..a92e533531c3 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -32,17 +32,11 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev) drm_fbdev_cma_hotplug_event(arcpgu->fbdev); } -static int arcpgu_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, bool async) -{ - return drm_atomic_helper_commit(dev, state, false); -} - static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = arcpgu_fb_output_poll_changed, .atomic_check = drm_atomic_helper_check, - .atomic_commit = arcpgu_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static void arcpgu_setup_mode_config(struct drm_device *drm) @@ -81,22 +75,6 @@ static const struct file_operations arcpgu_drm_ops = { .mmap = arcpgu_gem_mmap, }; -static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file) -{ - struct arcpgu_drm_private *arcpgu = drm->dev_private; - struct drm_pending_vblank_event *e, *t; - unsigned long flags; - - spin_lock_irqsave(&drm->event_lock, flags); - list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) { - if (e->base.file_priv != file) - continue; - list_del(&e->base.link); - e->base.destroy(&e->base); - } - spin_unlock_irqrestore(&drm->event_lock, flags); -} - static void arcpgu_lastclose(struct drm_device *drm) { struct arcpgu_drm_private *arcpgu = drm->dev_private; @@ -122,8 +100,6 @@ static int arcpgu_load(struct drm_device *drm) if (IS_ERR(arcpgu->clk)) return PTR_ERR(arcpgu->clk); - INIT_LIST_HEAD(&arcpgu->event_list); - arcpgu_setup_mode_config(drm); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -192,7 +168,6 @@ int arcpgu_unload(struct drm_device *drm) static struct drm_driver arcpgu_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, - .preclose = arcpgu_preclose, .lastclose = arcpgu_lastclose, .name = "drm-arcpgu", .desc = "ARC PGU Controller", @@ -207,7 +182,7 @@ static struct drm_driver arcpgu_drm_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c index 08b6baeb320d..b7a8b2ac4055 100644 --- a/drivers/gpu/drm/arc/arcpgu_hdmi.c +++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c @@ -46,23 +46,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) return sfuncs->get_modes(&slave->base, connector); } -struct drm_encoder * -arcpgu_drm_connector_best_encoder(struct drm_connector *connector) -{ - struct drm_encoder_slave *slave; - struct arcpgu_drm_connector *con = - container_of(connector, struct arcpgu_drm_connector, connector); - - slave = con->encoder_slave; - if (slave == NULL) { - dev_err(connector->dev->dev, - "connector_best_encoder: cannot find slave encoder for connector\n"); - return NULL; - } - - return &slave->base; -} - static enum drm_connector_status arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) { @@ -97,7 +80,6 @@ static void arcpgu_drm_connector_destroy(struct drm_connector *connector) static const struct drm_connector_helper_funcs arcpgu_drm_connector_helper_funcs = { .get_modes = arcpgu_drm_connector_get_modes, - .best_encoder = arcpgu_drm_connector_best_encoder, }; static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index fef1b04c2aab..48019ae22ddb 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -33,8 +33,17 @@ * */ +static void hdlcd_crtc_cleanup(struct drm_crtc *crtc) +{ + struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + + /* stop the controller on cleanup */ + hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); + drm_crtc_cleanup(crtc); +} + static const struct drm_crtc_funcs hdlcd_crtc_funcs = { - .destroy = drm_crtc_cleanup, + .destroy = hdlcd_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .reset = drm_atomic_helper_crtc_reset, @@ -97,7 +106,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); struct drm_display_mode *m = &crtc->state->adjusted_mode; struct videomode vm; - unsigned int polarities, line_length, err; + unsigned int polarities, err; vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; @@ -113,23 +122,18 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) if (m->flags & DRM_MODE_FLAG_PVSYNC) polarities |= HDLCD_POLARITY_VSYNC; - line_length = crtc->primary->state->fb->pitches[0]; - /* Allow max number of outstanding requests and largest burst size */ hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length); - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length); - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); + hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); - hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); err = hdlcd_set_pxl_fmt(crtc); @@ -144,20 +148,19 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc) struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); clk_prepare_enable(hdlcd->clk); + hdlcd_crtc_mode_set_nofb(crtc); hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); - drm_crtc_vblank_on(crtc); } static void hdlcd_crtc_disable(struct drm_crtc *crtc) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - if (!crtc->primary->fb) + if (!crtc->state->active) return; - clk_disable_unprepare(hdlcd->clk); hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); - drm_crtc_vblank_off(crtc); + clk_disable_unprepare(hdlcd->clk); } static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, @@ -179,52 +182,39 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *state) { - struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - unsigned long flags; - - if (crtc->state->event) { - struct drm_pending_vblank_event *event = crtc->state->event; + struct drm_pending_vblank_event *event = crtc->state->event; + if (event) { crtc->state->event = NULL; - event->pipe = drm_crtc_index(crtc); - - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - spin_lock_irqsave(&crtc->dev->event_lock, flags); - list_add_tail(&event->base.link, &hdlcd->event_list); - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); } } -static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ -} - -static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - return true; -} - static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { - .mode_fixup = hdlcd_crtc_mode_fixup, - .mode_set = drm_helper_crtc_mode_set, - .mode_set_base = drm_helper_crtc_mode_set_base, - .mode_set_nofb = hdlcd_crtc_mode_set_nofb, .enable = hdlcd_crtc_enable, .disable = hdlcd_crtc_disable, - .prepare = hdlcd_crtc_disable, - .commit = hdlcd_crtc_enable, .atomic_check = hdlcd_crtc_atomic_check, .atomic_begin = hdlcd_crtc_atomic_begin, - .atomic_flush = hdlcd_crtc_atomic_flush, }; static int hdlcd_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { + u32 src_w, src_h; + + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + /* we can't do any scaling of the plane source */ + if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) + return -EINVAL; + return 0; } @@ -233,20 +223,31 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane, { struct hdlcd_drm_private *hdlcd; struct drm_gem_cma_object *gem; + unsigned int depth, bpp; + u32 src_w, src_h, dest_w, dest_h; dma_addr_t scanout_start; - if (!plane->state->crtc || !plane->state->fb) + if (!plane->state->fb) return; - hdlcd = crtc_to_hdlcd_priv(plane->state->crtc); + drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp); + src_w = plane->state->src_w >> 16; + src_h = plane->state->src_h >> 16; + dest_w = plane->state->crtc_w; + dest_h = plane->state->crtc_h; gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); - scanout_start = gem->paddr; + scanout_start = gem->paddr + plane->state->fb->offsets[0] + + plane->state->crtc_y * plane->state->fb->pitches[0] + + plane->state->crtc_x * bpp / 8; + + hdlcd = plane->dev->dev_private; + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1); hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); } static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { - .prepare_fb = NULL, - .cleanup_fb = NULL, .atomic_check = hdlcd_plane_atomic_check, .atomic_update = hdlcd_plane_atomic_update, }; @@ -294,16 +295,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) return plane; } -void hdlcd_crtc_suspend(struct drm_crtc *crtc) -{ - hdlcd_crtc_disable(crtc); -} - -void hdlcd_crtc_resume(struct drm_crtc *crtc) -{ - hdlcd_crtc_enable(crtc); -} - int hdlcd_setup_crtc(struct drm_device *drm) { struct hdlcd_drm_private *hdlcd = drm->dev_private; diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index b987c63ba8d6..74279be20b75 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -49,8 +49,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) atomic_set(&hdlcd->dma_end_count, 0); #endif - INIT_LIST_HEAD(&hdlcd->event_list); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hdlcd->mmio = devm_ioremap_resource(drm->dev, res); if (IS_ERR(hdlcd->mmio)) { @@ -84,11 +82,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) goto setup_fail; } - pm_runtime_enable(drm->dev); - - pm_runtime_get_sync(drm->dev); ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); - pm_runtime_put_sync(drm->dev); if (ret < 0) { DRM_ERROR("failed to install IRQ handler\n"); goto irq_fail; @@ -112,17 +106,11 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm) drm_fbdev_cma_hotplug_event(hdlcd->fbdev); } -static int hdlcd_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, bool nonblock) -{ - return drm_atomic_helper_commit(dev, state, false); -} - static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = hdlcd_fb_output_poll_changed, .atomic_check = drm_atomic_helper_check, - .atomic_commit = hdlcd_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; static void hdlcd_setup_mode_config(struct drm_device *drm) @@ -164,24 +152,9 @@ static irqreturn_t hdlcd_irq(int irq, void *arg) atomic_inc(&hdlcd->vsync_count); #endif - if (irq_status & HDLCD_INTERRUPT_VSYNC) { - bool events_sent = false; - unsigned long flags; - struct drm_pending_vblank_event *e, *t; - + if (irq_status & HDLCD_INTERRUPT_VSYNC) drm_crtc_handle_vblank(&hdlcd->crtc); - spin_lock_irqsave(&drm->event_lock, flags); - list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) { - list_del(&e->base.link); - drm_crtc_send_vblank_event(&hdlcd->crtc, e); - events_sent = true; - } - if (events_sent) - drm_crtc_vblank_put(&hdlcd->crtc); - spin_unlock_irqrestore(&drm->event_lock, flags); - } - /* acknowledge interrupt(s) */ hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); @@ -275,6 +248,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg) static struct drm_info_list hdlcd_debugfs_list[] = { { "interrupt_count", hdlcd_show_underrun_count, 0 }, { "clocks", hdlcd_show_pxlclock, 0 }, + { "fb", drm_fb_cma_debugfs_show, 0 }, }; static int hdlcd_debugfs_init(struct drm_minor *minor) @@ -316,7 +290,7 @@ static struct drm_driver hdlcd_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = hdlcd_enable_vblank, .disable_vblank = hdlcd_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, @@ -357,6 +331,8 @@ static int hdlcd_drm_bind(struct device *dev) return -ENOMEM; drm->dev_private = hdlcd; + dev_set_drvdata(dev, drm); + hdlcd_setup_mode_config(drm); ret = hdlcd_load(drm, 0); if (ret) @@ -366,14 +342,18 @@ static int hdlcd_drm_bind(struct device *dev) if (ret) goto err_unload; - dev_set_drvdata(dev, drm); - ret = component_bind_all(dev, drm); if (ret) { DRM_ERROR("Failed to bind all components\n"); goto err_unregister; } + ret = pm_runtime_set_active(dev); + if (ret) + goto err_pm_active; + + pm_runtime_enable(dev); + ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { DRM_ERROR("failed to initialise vblank\n"); @@ -399,16 +379,16 @@ err_fbdev: drm_mode_config_cleanup(drm); drm_vblank_cleanup(drm); err_vblank: + pm_runtime_disable(drm->dev); +err_pm_active: component_unbind_all(dev, drm); err_unregister: drm_dev_unregister(drm); err_unload: - pm_runtime_get_sync(drm->dev); drm_irq_uninstall(drm); - pm_runtime_put_sync(drm->dev); - pm_runtime_disable(drm->dev); of_reserved_mem_device_release(drm->dev); err_free: + dev_set_drvdata(dev, NULL); drm_dev_unref(drm); return ret; @@ -495,30 +475,34 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match); static int __maybe_unused hdlcd_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct drm_crtc *crtc; + struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; - if (pm_runtime_suspended(dev)) + if (!hdlcd) return 0; - drm_modeset_lock_all(drm); - list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) - hdlcd_crtc_suspend(crtc); - drm_modeset_unlock_all(drm); + drm_kms_helper_poll_disable(drm); + + hdlcd->state = drm_atomic_helper_suspend(drm); + if (IS_ERR(hdlcd->state)) { + drm_kms_helper_poll_enable(drm); + return PTR_ERR(hdlcd->state); + } + return 0; } static int __maybe_unused hdlcd_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct drm_crtc *crtc; + struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; - if (!pm_runtime_suspended(dev)) + if (!hdlcd) return 0; - drm_modeset_lock_all(drm); - list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) - hdlcd_crtc_resume(crtc); - drm_modeset_unlock_all(drm); + drm_atomic_helper_resume(drm, hdlcd->state); + drm_kms_helper_poll_enable(drm); + pm_runtime_set_active(dev); + return 0; } diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h index aa234784f053..e3950a071152 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.h +++ b/drivers/gpu/drm/arm/hdlcd_drv.h @@ -9,10 +9,9 @@ struct hdlcd_drm_private { void __iomem *mmio; struct clk *clk; struct drm_fbdev_cma *fbdev; - struct drm_framebuffer *fb; - struct list_head event_list; struct drm_crtc crtc; struct drm_plane *plane; + struct drm_atomic_state *state; #ifdef CONFIG_DEBUG_FS atomic_t buffer_underrun_count; atomic_t bus_error_count; @@ -36,7 +35,5 @@ static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg) int hdlcd_setup_crtc(struct drm_device *dev); void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); -void hdlcd_crtc_suspend(struct drm_crtc *crtc); -void hdlcd_crtc_resume(struct drm_crtc *crtc); #endif /* __HDLCD_DRV_H__ */ diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 3130aa8bcdd0..34405e4a5d36 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -199,7 +199,7 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc, /* Handle any pending frame work. */ if (work) { work->fn(dcrtc, plane, work); - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); } wake_up(&plane->frame_wait); @@ -210,7 +210,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc, { int ret; - ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); + ret = drm_crtc_vblank_get(&dcrtc->crtc); if (ret) { DRM_ERROR("failed to acquire vblank counter\n"); return ret; @@ -218,7 +218,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc, ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0; if (ret) - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); return ret; } @@ -234,7 +234,7 @@ struct armada_plane_work *armada_drm_plane_work_cancel( struct armada_plane_work *work = xchg(&plane->work, NULL); if (work) - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); return work; } @@ -260,7 +260,7 @@ static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc, if (fwork->event) { spin_lock_irqsave(&dev->event_lock, flags); - drm_send_vblank_event(dev, dcrtc->num, fwork->event); + drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event); spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -592,9 +592,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc, if (interlaced ^ dcrtc->interlaced) { if (adj->flags & DRM_MODE_FLAG_INTERLACE) - drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_get(&dcrtc->crtc); else - drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_vblank_put(&dcrtc->crtc); dcrtc->interlaced = interlaced; } diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 439824a61aa5..cb21c0b6374a 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -197,7 +197,7 @@ static struct drm_driver armada_drm_driver = { .debugfs_init = armada_drm_debugfs_init, .debugfs_cleanup = armada_drm_debugfs_cleanup, #endif - .gem_free_object = armada_gem_free_object, + .gem_free_object_unlocked = armada_gem_free_object, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = armada_gem_prime_export, diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index fcd9c0714836..f54afd2113a9 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -209,7 +209,7 @@ static struct drm_driver driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - .gem_free_object = ast_gem_free_object, + .gem_free_object_unlocked = ast_gem_free_object, .dumb_create = ast_dumb_create, .dumb_map_offset = ast_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index 5320f8c57884..c017a9330a18 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -167,12 +167,9 @@ static int astfb_create_object(struct ast_fbdev *afbdev, struct drm_gem_object **gobj_p) { struct drm_device *dev = afbdev->helper.dev; - u32 bpp, depth; u32 size; struct drm_gem_object *gobj; - int ret = 0; - drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); size = mode_cmd->pitches[0] * mode_cmd->height; ret = ast_gem_create(dev, size, true, &gobj); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index c337922606e3..5957c3e659fe 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -624,19 +624,21 @@ static void ast_crtc_reset(struct drm_crtc *crtc) } -static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct ast_crtc *ast_crtc = to_ast_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { ast_crtc->lut_r[i] = red[i] >> 8; ast_crtc->lut_g[i] = green[i] >> 8; ast_crtc->lut_b[i] = blue[i] >> 8; } ast_crtc_load_lut(crtc); + + return 0; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index cf23a755f777..613f6c99b76a 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -374,8 +374,8 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc) spin_lock_irqsave(&dev->event_lock, flags); if (crtc->event) { - drm_send_vblank_event(dev, crtc->id, crtc->event); - drm_vblank_put(dev, crtc->id); + drm_crtc_send_vblank_event(&crtc->base, crtc->event); + drm_crtc_vblank_put(&crtc->base); crtc->event = NULL; } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -391,12 +391,11 @@ void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc) { struct atmel_hlcdc_crtc_state *state; - if (crtc->state && crtc->state->mode_blob) - drm_property_unreference_blob(crtc->state->mode_blob); - if (crtc->state) { + __drm_atomic_helper_crtc_destroy_state(crtc->state); state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); kfree(state); + crtc->state = NULL; } state = kzalloc(sizeof(*state), GFP_KERNEL); @@ -415,8 +414,9 @@ atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc) return NULL; state = kmalloc(sizeof(*state), GFP_KERNEL); - if (state) - __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); + if (!state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); state->output_mode = cur->output_mode; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 8ded7645747e..9ecf16c7911d 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -519,7 +519,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev, } /* Swap the state, this is the point of no return. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (async) queue_work(dc->wq, &commit->work); @@ -776,7 +776,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = atmel_hlcdc_dc_enable_vblank, .disable_vblank = atmel_hlcdc_dc_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 39802c0539b6..473a475f27b1 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -113,21 +113,9 @@ static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector, return atmel_hlcdc_dc_mode_valid(rgb->dc, mode); } - - -static struct drm_encoder * -atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector) -{ - struct atmel_hlcdc_rgb_output *rgb = - drm_connector_to_atmel_hlcdc_rgb_output(connector); - - return &rgb->encoder; -} - static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = { .get_modes = atmel_hlcdc_panel_get_modes, .mode_valid = atmel_hlcdc_rgb_mode_valid, - .best_encoder = atmel_hlcdc_rgb_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index b332b4d3b0e2..abace82de6ea 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -89,7 +89,7 @@ static struct drm_driver bochs_driver = { .date = "20130925", .major = 1, .minor = 0, - .gem_free_object = bochs_gem_free_object, + .gem_free_object_unlocked = bochs_gem_free_object, .dumb_create = bochs_dumb_create, .dumb_map_offset = bochs_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c index d087b054c360..f9f03bcba0af 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c @@ -986,16 +986,8 @@ unlock: return num_modes; } -static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector) -{ - struct anx78xx *anx78xx = connector_to_anx78xx(connector); - - return anx78xx->bridge.encoder; -} - static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = { .get_modes = anx78xx_get_modes, - .best_encoder = anx78xx_best_encoder, }; static enum drm_connector_status anx78xx_detect(struct drm_connector *connector, diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c index c9d941283d30..70b1f7d4270b 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/dw-hdmi.c @@ -1476,15 +1476,6 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector, return mode_status; } -static struct drm_encoder *dw_hdmi_connector_best_encoder(struct drm_connector - *connector) -{ - struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, - connector); - - return hdmi->encoder; -} - static void dw_hdmi_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); @@ -1525,7 +1516,7 @@ static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = { static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { .get_modes = dw_hdmi_connector_get_modes, .mode_valid = dw_hdmi_connector_mode_valid, - .best_encoder = dw_hdmi_connector_best_encoder, + .best_encoder = drm_atomic_helper_best_encoder, }; static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index 7ecd59f70b8e..93f3dacf9e27 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -235,16 +235,8 @@ out: return num_modes; } -static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector) -{ - struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector); - - return ptn_bridge->bridge.encoder; -} - static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { .get_modes = ptn3460_get_modes, - .best_encoder = ptn3460_best_encoder, }; static enum drm_connector_status ptn3460_detect(struct drm_connector *connector, diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index be881e9fef8f..5cd8dd7e5904 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -474,18 +474,8 @@ static int ps8622_get_modes(struct drm_connector *connector) return drm_panel_get_modes(ps8622->panel); } -static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector) -{ - struct ps8622_bridge *ps8622; - - ps8622 = connector_to_ps8622(connector); - - return ps8622->bridge.encoder; -} - static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = { .get_modes = ps8622_get_modes, - .best_encoder = ps8622_best_encoder, }; static enum drm_connector_status ps8622_detect(struct drm_connector *connector, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index dc83f69da6f1..b05f7eae32ce 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -142,7 +142,7 @@ static struct drm_driver driver = { .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - .gem_free_object = cirrus_gem_free_object, + .gem_free_object_unlocked = cirrus_gem_free_object, .dumb_create = cirrus_dumb_create, .dumb_map_offset = cirrus_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index d3d8d7bfcc57..17c915d9a03e 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -325,21 +325,20 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc) * use this for 8-bit mode so can't perform smooth fades on deeper modes, * but it's a requirement that we provide the function */ -static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc); int i; - if (size != CIRRUS_LUT_SIZE) - return; - - for (i = 0; i < CIRRUS_LUT_SIZE; i++) { + for (i = 0; i < size; i++) { cirrus_crtc->lut_r[i] = red[i]; cirrus_crtc->lut_g[i] = green[i]; cirrus_crtc->lut_b[i] = blue[i]; } cirrus_crtc_load_lut(crtc); + + return 0; } /* Simple cleanup function */ diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 3ff1ed7b33db..d99ab2f6663f 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -33,6 +33,20 @@ #include "drm_crtc_internal.h" +static void crtc_commit_free(struct kref *kref) +{ + struct drm_crtc_commit *commit = + container_of(kref, struct drm_crtc_commit, ref); + + kfree(commit); +} + +void drm_crtc_commit_put(struct drm_crtc_commit *commit) +{ + kref_put(&commit->ref, crtc_commit_free); +} +EXPORT_SYMBOL(drm_crtc_commit_put); + /** * drm_atomic_state_default_release - * release memory initialized by drm_atomic_state_init @@ -44,11 +58,8 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state) { kfree(state->connectors); - kfree(state->connector_states); kfree(state->crtcs); - kfree(state->crtc_states); kfree(state->planes); - kfree(state->plane_states); } EXPORT_SYMBOL(drm_atomic_state_default_release); @@ -72,18 +83,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state) sizeof(*state->crtcs), GFP_KERNEL); if (!state->crtcs) goto fail; - state->crtc_states = kcalloc(dev->mode_config.num_crtc, - sizeof(*state->crtc_states), GFP_KERNEL); - if (!state->crtc_states) - goto fail; state->planes = kcalloc(dev->mode_config.num_total_plane, sizeof(*state->planes), GFP_KERNEL); if (!state->planes) goto fail; - state->plane_states = kcalloc(dev->mode_config.num_total_plane, - sizeof(*state->plane_states), GFP_KERNEL); - if (!state->plane_states) - goto fail; state->dev = dev; @@ -139,40 +142,48 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state) DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); for (i = 0; i < state->num_connector; i++) { - struct drm_connector *connector = state->connectors[i]; + struct drm_connector *connector = state->connectors[i].ptr; if (!connector) continue; connector->funcs->atomic_destroy_state(connector, - state->connector_states[i]); - state->connectors[i] = NULL; - state->connector_states[i] = NULL; + state->connectors[i].state); + state->connectors[i].ptr = NULL; + state->connectors[i].state = NULL; drm_connector_unreference(connector); } for (i = 0; i < config->num_crtc; i++) { - struct drm_crtc *crtc = state->crtcs[i]; + struct drm_crtc *crtc = state->crtcs[i].ptr; if (!crtc) continue; crtc->funcs->atomic_destroy_state(crtc, - state->crtc_states[i]); - state->crtcs[i] = NULL; - state->crtc_states[i] = NULL; + state->crtcs[i].state); + + if (state->crtcs[i].commit) { + kfree(state->crtcs[i].commit->event); + state->crtcs[i].commit->event = NULL; + drm_crtc_commit_put(state->crtcs[i].commit); + } + + state->crtcs[i].commit = NULL; + state->crtcs[i].ptr = NULL; + state->crtcs[i].state = NULL; } for (i = 0; i < config->num_total_plane; i++) { - struct drm_plane *plane = state->planes[i]; + struct drm_plane *plane = state->planes[i].ptr; if (!plane) continue; plane->funcs->atomic_destroy_state(plane, - state->plane_states[i]); - state->planes[i] = NULL; - state->plane_states[i] = NULL; + state->planes[i].state); + state->planes[i].ptr = NULL; + state->planes[i].state = NULL; } } EXPORT_SYMBOL(drm_atomic_state_default_clear); @@ -270,8 +281,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, if (!crtc_state) return ERR_PTR(-ENOMEM); - state->crtc_states[index] = crtc_state; - state->crtcs[index] = crtc; + state->crtcs[index].state = crtc_state; + state->crtcs[index].ptr = crtc; crtc_state->state = state; DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", @@ -351,6 +362,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, drm_property_unreference_blob(state->mode_blob); state->mode_blob = NULL; + memset(&state->mode, 0, sizeof(state->mode)); + if (blob) { if (blob->length != sizeof(struct drm_mode_modeinfo) || drm_mode_convert_umode(&state->mode, @@ -363,7 +376,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", state->mode.name, state); } else { - memset(&state->mode, 0, sizeof(state->mode)); state->enable = false; DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", state); @@ -631,8 +643,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, if (!plane_state) return ERR_PTR(-ENOMEM); - state->plane_states[index] = plane_state; - state->planes[index] = plane; + state->planes[index].state = plane_state; + state->planes[index].ptr = plane; plane_state->state = state; DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", @@ -896,8 +908,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, index = drm_connector_index(connector); if (index >= state->num_connector) { - struct drm_connector **c; - struct drm_connector_state **cs; + struct __drm_connnectors_state *c; int alloc = max(index + 1, config->num_connector); c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); @@ -908,26 +919,19 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, memset(&state->connectors[state->num_connector], 0, sizeof(*state->connectors) * (alloc - state->num_connector)); - cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL); - if (!cs) - return ERR_PTR(-ENOMEM); - - state->connector_states = cs; - memset(&state->connector_states[state->num_connector], 0, - sizeof(*state->connector_states) * (alloc - state->num_connector)); state->num_connector = alloc; } - if (state->connector_states[index]) - return state->connector_states[index]; + if (state->connectors[index].state) + return state->connectors[index].state; connector_state = connector->funcs->atomic_duplicate_state(connector); if (!connector_state) return ERR_PTR(-ENOMEM); drm_connector_reference(connector); - state->connector_states[index] = connector_state; - state->connectors[index] = connector; + state->connectors[index].state = connector_state; + state->connectors[index].ptr = connector; connector_state->state = state; DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n", @@ -1431,7 +1435,8 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit); */ static struct drm_pending_vblank_event *create_vblank_event( - struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data) + struct drm_device *dev, struct drm_file *file_priv, + struct fence *fence, uint64_t user_data) { struct drm_pending_vblank_event *e = NULL; int ret; @@ -1444,12 +1449,17 @@ static struct drm_pending_vblank_event *create_vblank_event( e->event.base.length = sizeof(e->event); e->event.user_data = user_data; - ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); - if (ret) { - kfree(e); - return NULL; + if (file_priv) { + ret = drm_event_reserve_init(dev, file_priv, &e->base, + &e->event.base); + if (ret) { + kfree(e); + return NULL; + } } + e->base.fence = fence; + return e; } @@ -1689,7 +1699,8 @@ retry: for_each_crtc_in_state(state, crtc, crtc_state, i) { struct drm_pending_vblank_event *e; - e = create_vblank_event(dev, file_priv, arg->user_data); + e = create_vblank_event(dev, file_priv, NULL, + arg->user_data); if (!e) { ret = -ENOMEM; goto out; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ddfa0d120e39..6a13df8691d4 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -110,8 +110,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, if (funcs->atomic_best_encoder) new_encoder = funcs->atomic_best_encoder(connector, conn_state); - else + else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); + else + new_encoder = drm_atomic_helper_best_encoder(connector); if (new_encoder) { if (encoder_mask & (1 << drm_encoder_index(new_encoder))) { @@ -298,8 +300,10 @@ update_connector_routing(struct drm_atomic_state *state, if (funcs->atomic_best_encoder) new_encoder = funcs->atomic_best_encoder(connector, connector_state); - else + else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); + else + new_encoder = drm_atomic_helper_best_encoder(connector); if (!new_encoder) { DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", @@ -414,6 +418,9 @@ mode_fixup(struct drm_atomic_state *state) for_each_crtc_in_state(state, crtc, crtc_state, i) { const struct drm_crtc_helper_funcs *funcs; + if (!crtc_state->enable) + continue; + if (!crtc_state->mode_changed && !crtc_state->connectors_changed) continue; @@ -458,7 +465,7 @@ mode_fixup(struct drm_atomic_state *state) * times for the same update, e.g. when the ->atomic_check functions depend upon * the adjusted dotclock for fifo space allocation and watermark computation. * - * RETURNS + * RETURNS: * Zero for success or -errno */ int @@ -572,7 +579,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset); * It also sets crtc_state->planes_changed to indicate that a crtc has * updated planes. * - * RETURNS + * RETURNS: * Zero for success or -errno */ int @@ -611,7 +618,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev, if (!funcs || !funcs->atomic_check) continue; - ret = funcs->atomic_check(crtc, state->crtc_states[i]); + ret = funcs->atomic_check(crtc, crtc_state); if (ret) { DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", crtc->base.id, crtc->name); @@ -640,7 +647,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes); * ->atomic_check functions depend upon an updated adjusted_mode.clock to * e.g. properly compute watermarks. * - * RETURNS + * RETURNS: * Zero for success or -errno */ int drm_atomic_helper_check(struct drm_device *dev, @@ -1113,22 +1120,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); /** - * drm_atomic_helper_commit - commit validated state object - * @dev: DRM device - * @state: the driver state object - * @nonblocking: whether nonblocking behavior is requested. + * drm_atomic_helper_commit_tail - commit atomic update to hardware + * @state: new modeset state to be committed * - * This function commits a with drm_atomic_helper_check() pre-validated state - * object. This can still fail when e.g. the framebuffer reservation fails. For - * now this doesn't implement nonblocking commits. + * This is the default implemenation for the ->atomic_commit_tail() hook of the + * &drm_mode_config_helper_funcs vtable. * - * Note that right now this function does not support nonblocking commits, hence - * driver writers must implement their own version for now. Also note that the - * default ordering of how the various stages are called is to match the legacy - * modeset helper library closest. One peculiarity of that is that it doesn't - * mesh well with runtime PM at all. + * Note that the default ordering of how the various stages are called is to + * match the legacy modeset helper library closest. One peculiarity of that is + * that it doesn't mesh well with runtime PM at all. * - * For drivers supporting runtime PM the recommended sequence is + * For drivers supporting runtime PM the recommended sequence is instead :: * * drm_atomic_helper_commit_modeset_disables(dev, state); * @@ -1136,9 +1138,75 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); * * drm_atomic_helper_commit_planes(dev, state, true); * - * See the kerneldoc entries for these three functions for more details. + * for committing the atomic update to hardware. See the kerneldoc entries for + * these three functions for more details. + */ +void drm_atomic_helper_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + + drm_atomic_helper_commit_planes(dev, state, false); + + drm_atomic_helper_commit_modeset_enables(dev, state); + + drm_atomic_helper_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(dev, state); + + drm_atomic_helper_cleanup_planes(dev, state); +} +EXPORT_SYMBOL(drm_atomic_helper_commit_tail); + +static void commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct drm_mode_config_helper_funcs *funcs; + + funcs = dev->mode_config.helper_private; + + drm_atomic_helper_wait_for_fences(dev, state); + + drm_atomic_helper_wait_for_dependencies(state); + + if (funcs && funcs->atomic_commit_tail) + funcs->atomic_commit_tail(state); + else + drm_atomic_helper_commit_tail(state); + + drm_atomic_helper_commit_cleanup_done(state); + + drm_atomic_state_free(state); +} + +static void commit_work(struct work_struct *work) +{ + struct drm_atomic_state *state = container_of(work, + struct drm_atomic_state, + commit_work); + commit_tail(state); +} + +/** + * drm_atomic_helper_commit - commit validated state object + * @dev: DRM device + * @state: the driver state object + * @nonblock: whether nonblocking behavior is requested. + * + * This function commits a with drm_atomic_helper_check() pre-validated state + * object. This can still fail when e.g. the framebuffer reservation fails. This + * function implements nonblocking commits, using + * drm_atomic_helper_setup_commit() and related functions. + * + * Note that right now this function does not support nonblocking commits, hence + * driver writers must implement their own version for now. + * + * Committing the actual hardware state is done through the + * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable, + * or it's default implementation drm_atomic_helper_commit_tail(). * - * RETURNS + * RETURNS: * Zero for success or -errno. */ int drm_atomic_helper_commit(struct drm_device *dev, @@ -1147,8 +1215,11 @@ int drm_atomic_helper_commit(struct drm_device *dev, { int ret; - if (nonblock) - return -EBUSY; + ret = drm_atomic_helper_setup_commit(state, nonblock); + if (ret) + return ret; + + INIT_WORK(&state->commit_work, commit_work); ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) @@ -1160,7 +1231,7 @@ int drm_atomic_helper_commit(struct drm_device *dev, * the software side now. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); /* * Everything below can be run asynchronously without the need to grab @@ -1176,21 +1247,16 @@ int drm_atomic_helper_commit(struct drm_device *dev, * update. Which is important since compositors need to figure out the * composition of the next frame right after having submitted the * current layout. + * + * NOTE: Commit work has multiple phases, first hardware commit, then + * cleanup. We want them to overlap, hence need system_unbound_wq to + * make sure work items don't artifically stall on each another. */ - drm_atomic_helper_wait_for_fences(dev, state); - - drm_atomic_helper_commit_modeset_disables(dev, state); - - drm_atomic_helper_commit_planes(dev, state, false); - - drm_atomic_helper_commit_modeset_enables(dev, state); - - drm_atomic_helper_wait_for_vblanks(dev, state); - - drm_atomic_helper_cleanup_planes(dev, state); - - drm_atomic_state_free(state); + if (nonblock) + queue_work(system_unbound_wq, &state->commit_work); + else + commit_tail(state); return 0; } @@ -1199,12 +1265,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); /** * DOC: implementing nonblocking commit * - * For now the atomic helpers don't support nonblocking commit directly. If - * there is real need it could be added though, using the dma-buf fence - * infrastructure for generic synchronization with outstanding rendering. - * - * For now drivers have to implement nonblocking commit themselves, with the - * following sequence being the recommended one: + * Nonblocking atomic commits have to be implemented in the following sequence: * * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function * which commit needs to call which can fail, so we want to run it first and @@ -1216,10 +1277,14 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); * cancelled updates. Note that it is important to ensure that the framebuffer * cleanup is still done when cancelling. * - * For sufficient parallelism it is recommended to have a work item per crtc - * (for updates which don't touch global state) and a global one. Then we only - * need to synchronize with the crtc work items for changed crtcs and the global - * work item, which allows nice concurrent updates on disjoint sets of crtcs. + * Asynchronous workers need to have sufficient parallelism to be able to run + * different atomic commits on different CRTCs in parallel. The simplest way to + * achive this is by running them on the &system_unbound_wq work queue. Note + * that drivers are not required to split up atomic commits and run an + * individual commit in parallel - userspace is supposed to do that if it cares. + * But it might be beneficial to do that for modesets, since those necessarily + * must be done as one global operation, and enabling or disabling a CRTC can + * take a long time. But even that is not required. * * 3. The software state is updated synchronously with * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset @@ -1232,7 +1297,311 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and * then cleaning up the framebuffers after the old framebuffer is no longer * being displayed. + * + * The above scheme is implemented in the atomic helper libraries in + * drm_atomic_helper_commit() using a bunch of helper functions. See + * drm_atomic_helper_setup_commit() for a starting point. + */ + +static int stall_checks(struct drm_crtc *crtc, bool nonblock) +{ + struct drm_crtc_commit *commit, *stall_commit = NULL; + bool completed = true; + int i; + long ret = 0; + + spin_lock(&crtc->commit_lock); + i = 0; + list_for_each_entry(commit, &crtc->commit_list, commit_entry) { + if (i == 0) { + completed = try_wait_for_completion(&commit->flip_done); + /* Userspace is not allowed to get ahead of the previous + * commit with nonblocking ones. */ + if (!completed && nonblock) { + spin_unlock(&crtc->commit_lock); + return -EBUSY; + } + } else if (i == 1) { + stall_commit = commit; + drm_crtc_commit_get(stall_commit); + } else + break; + + i++; + } + spin_unlock(&crtc->commit_lock); + + if (!stall_commit) + return 0; + + /* We don't want to let commits get ahead of cleanup work too much, + * stalling on 2nd previous commit means triple-buffer won't ever stall. + */ + ret = wait_for_completion_interruptible_timeout(&commit->cleanup_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n", + crtc->base.id, crtc->name); + + drm_crtc_commit_put(stall_commit); + + return ret < 0 ? ret : 0; +} + +/** + * drm_atomic_helper_setup_commit - setup possibly nonblocking commit + * @state: new modeset state to be committed + * @nonblock: whether nonblocking behavior is requested. + * + * This function prepares @state to be used by the atomic helper's support for + * nonblocking commits. Drivers using the nonblocking commit infrastructure + * should always call this function from their ->atomic_commit hook. + * + * To be able to use this support drivers need to use a few more helper + * functions. drm_atomic_helper_wait_for_dependencies() must be called before + * actually committing the hardware state, and for nonblocking commits this call + * must be placed in the async worker. See also drm_atomic_helper_swap_state() + * and it's stall parameter, for when a driver's commit hooks look at the + * ->state pointers of struct &drm_crtc, &drm_plane or &drm_connector directly. + * + * Completion of the hardware commit step must be signalled using + * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed + * to read or change any permanent software or hardware modeset state. The only + * exception is state protected by other means than &drm_modeset_lock locks. + * Only the free standing @state with pointers to the old state structures can + * be inspected, e.g. to clean up old buffers using + * drm_atomic_helper_cleanup_planes(). + * + * At the very end, before cleaning up @state drivers must call + * drm_atomic_helper_commit_cleanup_done(). + * + * This is all implemented by in drm_atomic_helper_commit(), giving drivers a + * complete and esay-to-use default implementation of the atomic_commit() hook. + * + * The tracking of asynchronously executed and still pending commits is done + * using the core structure &drm_crtc_commit. + * + * By default there's no need to clean up resources allocated by this function + * explicitly: drm_atomic_state_default_clear() will take care of that + * automatically. + * + * Returns: + * + * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast, + * -ENOMEM on allocation failures and -EINTR when a signal is pending. + */ +int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, + bool nonblock) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i, ret; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + commit = kzalloc(sizeof(*commit), GFP_KERNEL); + if (!commit) + return -ENOMEM; + + init_completion(&commit->flip_done); + init_completion(&commit->hw_done); + init_completion(&commit->cleanup_done); + INIT_LIST_HEAD(&commit->commit_entry); + kref_init(&commit->ref); + commit->crtc = crtc; + + state->crtcs[i].commit = commit; + + ret = stall_checks(crtc, nonblock); + if (ret) + return ret; + + /* Drivers only send out events when at least either current or + * new CRTC state is active. Complete right away if everything + * stays off. */ + if (!crtc->state->active && !crtc_state->active) { + complete_all(&commit->flip_done); + continue; + } + + /* Legacy cursor updates are fully unsynced. */ + if (state->legacy_cursor_update) { + complete_all(&commit->flip_done); + continue; + } + + if (!crtc_state->event) { + commit->event = kzalloc(sizeof(*commit->event), + GFP_KERNEL); + if (!commit->event) + return -ENOMEM; + + crtc_state->event = commit->event; + } + + crtc_state->event->base.completion = &commit->flip_done; + } + + return 0; +} +EXPORT_SYMBOL(drm_atomic_helper_setup_commit); + + +static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc) +{ + struct drm_crtc_commit *commit; + int i = 0; + + list_for_each_entry(commit, &crtc->commit_list, commit_entry) { + /* skip the first entry, that's the current commit */ + if (i == 1) + return commit; + i++; + } + + return NULL; +} + +/** + * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits + * @state: new modeset state to be committed + * + * This function waits for all preceeding commits that touch the same CRTC as + * @state to both be committed to the hardware (as signalled by + * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled + * by calling drm_crtc_vblank_send_event on the event member of + * &drm_crtc_state). + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i; + long ret; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + spin_lock(&crtc->commit_lock); + commit = preceeding_commit(crtc); + if (commit) + drm_crtc_commit_get(commit); + spin_unlock(&crtc->commit_lock); + + if (!commit) + continue; + + ret = wait_for_completion_timeout(&commit->hw_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", + crtc->base.id, crtc->name); + + /* Currently no support for overwriting flips, hence + * stall for previous one to execute completely. */ + ret = wait_for_completion_timeout(&commit->flip_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); + + drm_crtc_commit_put(commit); + } +} +EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies); + +/** + * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit + * @state: new modeset state to be committed + * + * This function is used to signal completion of the hardware commit step. After + * this step the driver is not allowed to read or change any permanent software + * or hardware modeset state. The only exception is state protected by other + * means than &drm_modeset_lock locks. + * + * Drivers should try to postpone any expensive or delayed cleanup work after + * this function is called. + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. + */ +void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + commit = state->crtcs[i].commit; + if (!commit) + continue; + + /* backend must have consumed any event by now */ + WARN_ON(crtc->state->event); + spin_lock(&crtc->commit_lock); + complete_all(&commit->hw_done); + spin_unlock(&crtc->commit_lock); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done); + +/** + * drm_atomic_helper_commit_cleanup_done - signal completion of commit + * @state: new modeset state to be committed + * + * This signals completion of the atomic update @state, including any cleanup + * work. If used, it must be called right before calling + * drm_atomic_state_free(). + * + * This is part of the atomic helper support for nonblocking commits, see + * drm_atomic_helper_setup_commit() for an overview. */ +void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_crtc_commit *commit; + int i; + long ret; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + commit = state->crtcs[i].commit; + if (WARN_ON(!commit)) + continue; + + spin_lock(&crtc->commit_lock); + complete_all(&commit->cleanup_done); + WARN_ON(!try_wait_for_completion(&commit->hw_done)); + + /* commit_list borrows our reference, need to remove before we + * clean up our drm_atomic_state. But only after it actually + * completed, otherwise subsequent commits won't stall properly. */ + if (try_wait_for_completion(&commit->flip_done)) { + list_del(&commit->commit_entry); + spin_unlock(&crtc->commit_lock); + continue; + } + + spin_unlock(&crtc->commit_lock); + + /* We must wait for the vblank event to signal our completion + * before releasing our reference, since the vblank work does + * not hold a reference of its own. */ + ret = wait_for_completion_timeout(&commit->flip_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n", + crtc->base.id, crtc->name); + + spin_lock(&crtc->commit_lock); + list_del(&commit->commit_entry); + spin_unlock(&crtc->commit_lock); + } +} +EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done); /** * drm_atomic_helper_prepare_planes - prepare plane resources before commit @@ -1249,16 +1618,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); int drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state) { - int nplanes = dev->mode_config.num_total_plane; - int ret, i; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + int ret, i, j; - for (i = 0; i < nplanes; i++) { + for_each_plane_in_state(state, plane, plane_state, i) { const struct drm_plane_helper_funcs *funcs; - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *plane_state = state->plane_states[i]; - - if (!plane) - continue; funcs = plane->helper_private; @@ -1272,12 +1637,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, return 0; fail: - for (i--; i >= 0; i--) { + for_each_plane_in_state(state, plane, plane_state, j) { const struct drm_plane_helper_funcs *funcs; - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *plane_state = state->plane_states[i]; - if (!plane) + if (j >= i) continue; funcs = plane->helper_private; @@ -1537,8 +1900,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); /** * drm_atomic_helper_swap_state - store atomic state into current sw state - * @dev: DRM device * @state: atomic state + * @stall: stall for proceeding commits * * This function stores the atomic state into the current state pointers in all * driver objects. It should be called after all failing steps have been done @@ -1559,42 +1922,70 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); * * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3 * contains the old state. Also do any other cleanup required with that state. + * + * @stall must be set when nonblocking commits for this driver directly access + * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the + * current atomic helpers this is almost always the case, since the helpers + * don't pass the right state structures to the callbacks. */ -void drm_atomic_helper_swap_state(struct drm_device *dev, - struct drm_atomic_state *state) +void drm_atomic_helper_swap_state(struct drm_atomic_state *state, + bool stall) { int i; + long ret; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + struct drm_crtc_commit *commit; + + if (stall) { + for_each_crtc_in_state(state, crtc, crtc_state, i) { + spin_lock(&crtc->commit_lock); + commit = list_first_entry_or_null(&crtc->commit_list, + struct drm_crtc_commit, commit_entry); + if (commit) + drm_crtc_commit_get(commit); + spin_unlock(&crtc->commit_lock); + + if (!commit) + continue; - for (i = 0; i < state->num_connector; i++) { - struct drm_connector *connector = state->connectors[i]; - - if (!connector) - continue; + ret = wait_for_completion_timeout(&commit->hw_done, + 10*HZ); + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n", + crtc->base.id, crtc->name); + drm_crtc_commit_put(commit); + } + } + for_each_connector_in_state(state, connector, conn_state, i) { connector->state->state = state; - swap(state->connector_states[i], connector->state); + swap(state->connectors[i].state, connector->state); connector->state->state = NULL; } - for (i = 0; i < dev->mode_config.num_crtc; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(state, crtc, crtc_state, i) { crtc->state->state = state; - swap(state->crtc_states[i], crtc->state); + swap(state->crtcs[i].state, crtc->state); crtc->state->state = NULL; - } - for (i = 0; i < dev->mode_config.num_total_plane; i++) { - struct drm_plane *plane = state->planes[i]; + if (state->crtcs[i].commit) { + spin_lock(&crtc->commit_lock); + list_add(&state->crtcs[i].commit->commit_entry, + &crtc->commit_list); + spin_unlock(&crtc->commit_lock); - if (!plane) - continue; + state->crtcs[i].commit->event = NULL; + } + } + for_each_plane_in_state(state, plane, plane_state, i) { plane->state->state = state; - swap(state->plane_states[i], plane->state); + swap(state->planes[i].state, plane->state); plane->state->state = NULL; } } @@ -2409,7 +2800,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip); * This is the main helper function provided by the atomic helper framework for * implementing the legacy DPMS connector interface. It computes the new desired * ->active state for the corresponding CRTC (if the connector is enabled) and - * updates it. + * updates it. * * Returns: * Returns 0 on success, negative errno numbers on failure. @@ -2930,16 +3321,15 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); * @red: red correction table * @green: green correction table * @blue: green correction table - * @start: * @size: size of the tables * * Implements support for legacy gamma correction table for drivers * that support color management through the DEGAMMA_LUT/GAMMA_LUT * properties. */ -void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, - u16 *red, u16 *green, u16 *blue, - uint32_t start, uint32_t size) +int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, + uint32_t size) { struct drm_device *dev = crtc->dev; struct drm_mode_config *config = &dev->mode_config; @@ -2951,7 +3341,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, state = drm_atomic_state_alloc(crtc->dev); if (!state) - return; + return -ENOMEM; blob = drm_property_create_blob(dev, sizeof(struct drm_color_lut) * size, @@ -3002,7 +3392,7 @@ retry: drm_property_unreference_blob(blob); - return; + return 0; fail: if (ret == -EDEADLK) goto backoff; @@ -3010,7 +3400,7 @@ fail: drm_atomic_state_free(state); drm_property_unreference_blob(blob); - return; + return ret; backoff: drm_atomic_state_clear(state); drm_atomic_legacy_backoff(state); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index b3654404abd0..255543086590 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -36,7 +36,7 @@ * encoder chain. * * A bridge is always attached to a single &drm_encoder at a time, but can be - * either connected to it directly, or through an intermediate bridge: + * either connected to it directly, or through an intermediate bridge:: * * encoder ---> bridge B ---> bridge A * diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index d2a6d958ca76..4ec35f9e6de5 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -239,37 +239,6 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order) } EXPORT_SYMBOL(drm_get_subpixel_order_name); -static char printable_char(int c) -{ - return isascii(c) && isprint(c) ? c : '?'; -} - -/** - * drm_get_format_name - return a string for drm fourcc format - * @format: format to compute name of - * - * Note that the buffer used by this function is globally shared and owned by - * the function itself. - * - * FIXME: This isn't really multithreading safe. - */ -const char *drm_get_format_name(uint32_t format) -{ - static char buf[32]; - - snprintf(buf, sizeof(buf), - "%c%c%c%c %s-endian (0x%08x)", - printable_char(format & 0xff), - printable_char((format >> 8) & 0xff), - printable_char((format >> 16) & 0xff), - printable_char((format >> 24) & 0x7f), - format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little", - format); - - return buf; -} -EXPORT_SYMBOL(drm_get_format_name); - /* * Internal function to assign a slot in the object idr and optionally * register the object into the idr. @@ -535,7 +504,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); * * Cleanup framebuffer. This function is intended to be used from the drivers * ->destroy callback. It can also be used to clean up driver private - * framebuffers embedded into a larger structure. + * framebuffers embedded into a larger structure. * * Note that this function does not remove the fb from active usuage - if it is * still used anywhere, hilarity can ensue since userspace could call getfb on @@ -669,6 +638,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, crtc->dev = dev; crtc->funcs = funcs; + INIT_LIST_HEAD(&crtc->commit_list); + spin_lock_init(&crtc->commit_lock); + drm_modeset_lock_init(&crtc->mutex); ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); if (ret) @@ -692,7 +664,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, crtc->base.properties = &crtc->properties; list_add_tail(&crtc->head, &config->crtc_list); - config->num_crtc++; + crtc->index = config->num_crtc++; crtc->primary = primary; crtc->cursor = cursor; @@ -722,6 +694,11 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; + /* Note that the crtc_list is considered to be static; should we + * remove the drm_crtc at runtime we would have to decrement all + * the indices on the drm_crtc after us in the crtc_list. + */ + kfree(crtc->gamma_store); crtc->gamma_store = NULL; @@ -741,29 +718,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_cleanup); -/** - * drm_crtc_index - find the index of a registered CRTC - * @crtc: CRTC to find index for - * - * Given a registered CRTC, return the index of that CRTC within a DRM - * device's list of CRTCs. - */ -unsigned int drm_crtc_index(struct drm_crtc *crtc) -{ - unsigned int index = 0; - struct drm_crtc *tmp; - - drm_for_each_crtc(tmp, crtc->dev) { - if (tmp == crtc) - return index; - - index++; - } - - BUG(); -} -EXPORT_SYMBOL(drm_crtc_index); - /* * drm_mode_remove - remove and free a mode * @connector: connector list to modify @@ -1166,7 +1120,7 @@ int drm_encoder_init(struct drm_device *dev, } list_add_tail(&encoder->head, &dev->mode_config.encoder_list); - dev->mode_config.num_encoder++; + encoder->index = dev->mode_config.num_encoder++; out_put: if (ret) @@ -1180,29 +1134,6 @@ out_unlock: EXPORT_SYMBOL(drm_encoder_init); /** - * drm_encoder_index - find the index of a registered encoder - * @encoder: encoder to find index for - * - * Given a registered encoder, return the index of that encoder within a DRM - * device's list of encoders. - */ -unsigned int drm_encoder_index(struct drm_encoder *encoder) -{ - unsigned int index = 0; - struct drm_encoder *tmp; - - drm_for_each_encoder(tmp, encoder->dev) { - if (tmp == encoder) - return index; - - index++; - } - - BUG(); -} -EXPORT_SYMBOL(drm_encoder_index); - -/** * drm_encoder_cleanup - cleans up an initialised encoder * @encoder: encoder to cleanup * @@ -1212,6 +1143,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; + /* Note that the encoder_list is considered to be static; should we + * remove the drm_encoder at runtime we would have to decrement all + * the indices on the drm_encoder after us in the encoder_list. + */ + drm_modeset_lock_all(dev); drm_mode_object_unregister(dev, &encoder->base); kfree(encoder->name); @@ -1300,7 +1236,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, plane->type = type; list_add_tail(&plane->head, &config->plane_list); - config->num_total_plane++; + plane->index = config->num_total_plane++; if (plane->type == DRM_PLANE_TYPE_OVERLAY) config->num_overlay_plane++; @@ -1374,6 +1310,11 @@ void drm_plane_cleanup(struct drm_plane *plane) BUG_ON(list_empty(&plane->head)); + /* Note that the plane_list is considered to be static; should we + * remove the drm_plane at runtime we would have to decrement all + * the indices on the drm_plane after us in the plane_list. + */ + list_del(&plane->head); dev->mode_config.num_total_plane--; if (plane->type == DRM_PLANE_TYPE_OVERLAY) @@ -1391,29 +1332,6 @@ void drm_plane_cleanup(struct drm_plane *plane) EXPORT_SYMBOL(drm_plane_cleanup); /** - * drm_plane_index - find the index of a registered plane - * @plane: plane to find index for - * - * Given a registered plane, return the index of that CRTC within a DRM - * device's list of planes. - */ -unsigned int drm_plane_index(struct drm_plane *plane) -{ - unsigned int index = 0; - struct drm_plane *tmp; - - drm_for_each_plane(tmp, plane->dev) { - if (tmp == plane) - return index; - - index++; - } - - BUG(); -} -EXPORT_SYMBOL(drm_plane_index); - -/** * drm_plane_from_index - find the registered plane at an index * @dev: DRM device * @idx: index of registered plane to find for @@ -1425,13 +1343,11 @@ struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx) { struct drm_plane *plane; - unsigned int i = 0; - drm_for_each_plane(plane, dev) { - if (i == idx) + drm_for_each_plane(plane, dev) + if (idx == plane->index) return plane; - i++; - } + return NULL; } EXPORT_SYMBOL(drm_plane_from_index); @@ -2821,8 +2737,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, goto out; } - drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); - /* * Check whether the primary plane supports the fb pixel format. * Drivers not implementing the universal planes API use a @@ -2977,6 +2891,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); } + fb->hot_x = req->hot_x; + fb->hot_y = req->hot_y; } else { fb = NULL; } @@ -4841,7 +4757,8 @@ bool drm_property_change_valid_get(struct drm_property *property, if (value == 0) return true; - return _object_find(property->dev, value, property->values[0]) != NULL; + *ref = _object_find(property->dev, value, property->values[0]); + return *ref != NULL; } for (i = 0; i < property->num_values; i++) @@ -5139,6 +5056,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder); int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size) { + uint16_t *r_base, *g_base, *b_base; + int i; + crtc->gamma_size = gamma_size; crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3, @@ -5148,6 +5068,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, return -ENOMEM; } + r_base = crtc->gamma_store; + g_base = r_base + gamma_size; + b_base = g_base + gamma_size; + for (i = 0; i < gamma_size; i++) { + r_base[i] = i << 8; + g_base[i] = i << 8; + b_base[i] = i << 8; + } + + return 0; } EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); @@ -5215,7 +5145,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, goto out; } - crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); + ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); out: drm_modeset_unlock_all(dev); @@ -5545,264 +5475,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, } /** - * drm_fb_get_bpp_depth - get the bpp/depth values for format - * @format: pixel format (DRM_FORMAT_*) - * @depth: storage for the depth value - * @bpp: storage for the bpp value - * - * This only supports RGB formats here for compat with code that doesn't use - * pixel formats directly yet. - */ -void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, - int *bpp) -{ - switch (format) { - case DRM_FORMAT_C8: - case DRM_FORMAT_RGB332: - case DRM_FORMAT_BGR233: - *depth = 8; - *bpp = 8; - break; - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_XBGR1555: - case DRM_FORMAT_RGBX5551: - case DRM_FORMAT_BGRX5551: - case DRM_FORMAT_ARGB1555: - case DRM_FORMAT_ABGR1555: - case DRM_FORMAT_RGBA5551: - case DRM_FORMAT_BGRA5551: - *depth = 15; - *bpp = 16; - break; - case DRM_FORMAT_RGB565: - case DRM_FORMAT_BGR565: - *depth = 16; - *bpp = 16; - break; - case DRM_FORMAT_RGB888: - case DRM_FORMAT_BGR888: - *depth = 24; - *bpp = 24; - break; - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_RGBX8888: - case DRM_FORMAT_BGRX8888: - *depth = 24; - *bpp = 32; - break; - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_RGBX1010102: - case DRM_FORMAT_BGRX1010102: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_RGBA1010102: - case DRM_FORMAT_BGRA1010102: - *depth = 30; - *bpp = 32; - break; - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_RGBA8888: - case DRM_FORMAT_BGRA8888: - *depth = 32; - *bpp = 32; - break; - default: - DRM_DEBUG_KMS("unsupported pixel format %s\n", - drm_get_format_name(format)); - *depth = 0; - *bpp = 0; - break; - } -} -EXPORT_SYMBOL(drm_fb_get_bpp_depth); - -/** - * drm_format_num_planes - get the number of planes for format - * @format: pixel format (DRM_FORMAT_*) - * - * Returns: - * The number of planes used by the specified pixel format. - */ -int drm_format_num_planes(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV444: - case DRM_FORMAT_YVU444: - return 3; - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - case DRM_FORMAT_NV24: - case DRM_FORMAT_NV42: - return 2; - default: - return 1; - } -} -EXPORT_SYMBOL(drm_format_num_planes); - -/** - * drm_format_plane_cpp - determine the bytes per pixel value - * @format: pixel format (DRM_FORMAT_*) - * @plane: plane index - * - * Returns: - * The bytes per pixel value for the specified plane. - */ -int drm_format_plane_cpp(uint32_t format, int plane) -{ - unsigned int depth; - int bpp; - - if (plane >= drm_format_num_planes(format)) - return 0; - - switch (format) { - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - return 2; - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - case DRM_FORMAT_NV24: - case DRM_FORMAT_NV42: - return plane ? 2 : 1; - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV444: - case DRM_FORMAT_YVU444: - return 1; - default: - drm_fb_get_bpp_depth(format, &depth, &bpp); - return bpp >> 3; - } -} -EXPORT_SYMBOL(drm_format_plane_cpp); - -/** - * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor - * @format: pixel format (DRM_FORMAT_*) - * - * Returns: - * The horizontal chroma subsampling factor for the - * specified pixel format. - */ -int drm_format_horz_chroma_subsampling(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_YUV411: - case DRM_FORMAT_YVU411: - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - return 4; - case DRM_FORMAT_YUYV: - case DRM_FORMAT_YVYU: - case DRM_FORMAT_UYVY: - case DRM_FORMAT_VYUY: - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - case DRM_FORMAT_NV16: - case DRM_FORMAT_NV61: - case DRM_FORMAT_YUV422: - case DRM_FORMAT_YVU422: - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - return 2; - default: - return 1; - } -} -EXPORT_SYMBOL(drm_format_horz_chroma_subsampling); - -/** - * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor - * @format: pixel format (DRM_FORMAT_*) - * - * Returns: - * The vertical chroma subsampling factor for the - * specified pixel format. - */ -int drm_format_vert_chroma_subsampling(uint32_t format) -{ - switch (format) { - case DRM_FORMAT_YUV410: - case DRM_FORMAT_YVU410: - return 4; - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - case DRM_FORMAT_NV12: - case DRM_FORMAT_NV21: - return 2; - default: - return 1; - } -} -EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); - -/** - * drm_format_plane_width - width of the plane given the first plane - * @width: width of the first plane - * @format: pixel format - * @plane: plane index - * - * Returns: - * The width of @plane, given that the width of the first plane is @width. - */ -int drm_format_plane_width(int width, uint32_t format, int plane) -{ - if (plane >= drm_format_num_planes(format)) - return 0; - - if (plane == 0) - return width; - - return width / drm_format_horz_chroma_subsampling(format); -} -EXPORT_SYMBOL(drm_format_plane_width); - -/** - * drm_format_plane_height - height of the plane given the first plane - * @height: height of the first plane - * @format: pixel format - * @plane: plane index - * - * Returns: - * The height of @plane, given that the height of the first plane is @height. - */ -int drm_format_plane_height(int height, uint32_t format, int plane) -{ - if (plane >= drm_format_num_planes(format)) - return 0; - - if (plane == 0) - return height; - - return height / drm_format_vert_chroma_subsampling(format); -} -EXPORT_SYMBOL(drm_format_plane_height); - -/** * drm_rotation_simplify() - Try to simplify the rotation * @rotation: Rotation to be simplified * @supported_rotations: Supported rotations @@ -6065,3 +5737,48 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, return tg; } EXPORT_SYMBOL(drm_mode_create_tile_group); + +/** + * drm_crtc_enable_color_mgmt - enable color management properties + * @crtc: DRM CRTC + * @degamma_lut_size: the size of the degamma lut (before CSC) + * @has_ctm: whether to attach ctm_property for CSC matrix + * @gamma_lut_size: the size of the gamma lut (after CSC) + * + * This function lets the driver enable the color correction + * properties on a CRTC. This includes 3 degamma, csc and gamma + * properties that userspace can set and 2 size properties to inform + * the userspace of the lut sizes. Each of the properties are + * optional. The gamma and degamma properties are only attached if + * their size is not 0 and ctm_property is only attached if has_ctm is + * true. + */ +void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, + uint degamma_lut_size, + bool has_ctm, + uint gamma_lut_size) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + + if (degamma_lut_size) { + drm_object_attach_property(&crtc->base, + config->degamma_lut_property, 0); + drm_object_attach_property(&crtc->base, + config->degamma_lut_size_property, + degamma_lut_size); + } + + if (has_ctm) + drm_object_attach_property(&crtc->base, + config->ctm_property, 0); + + if (gamma_lut_size) { + drm_object_attach_property(&crtc->base, + config->gamma_lut_property, 0); + drm_object_attach_property(&crtc->base, + config->gamma_lut_size_property, + gamma_lut_size); + } +} +EXPORT_SYMBOL(drm_crtc_enable_color_mgmt); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a6e42433ef0e..bf10d7046aa6 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -1121,36 +1121,3 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, return drm_plane_helper_commit(plane, plane_state, old_fb); } EXPORT_SYMBOL(drm_helper_crtc_mode_set_base); - -/** - * drm_helper_crtc_enable_color_mgmt - enable color management properties - * @crtc: DRM CRTC - * @degamma_lut_size: the size of the degamma lut (before CSC) - * @gamma_lut_size: the size of the gamma lut (after CSC) - * - * This function lets the driver enable the color correction properties on a - * CRTC. This includes 3 degamma, csc and gamma properties that userspace can - * set and 2 size properties to inform the userspace of the lut sizes. - */ -void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc, - int degamma_lut_size, - int gamma_lut_size) -{ - struct drm_device *dev = crtc->dev; - struct drm_mode_config *config = &dev->mode_config; - - drm_object_attach_property(&crtc->base, - config->degamma_lut_property, 0); - drm_object_attach_property(&crtc->base, - config->ctm_property, 0); - drm_object_attach_property(&crtc->base, - config->gamma_lut_property, 0); - - drm_object_attach_property(&crtc->base, - config->degamma_lut_size_property, - degamma_lut_size); - drm_object_attach_property(&crtc->base, - config->gamma_lut_size_property, - gamma_lut_size); -} -EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index bff89226a344..8b2582aeaab6 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -605,8 +605,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); if (ret) goto err_minors; - - WARN_ON(driver->suspend || driver->resume); } if (drm_core_check_feature(dev, DRIVER_RENDER)) { diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 9a401aed98e0..622f788bff46 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) * by commas, search through the list looking for one that * matches the connector. * - * If there's one or more that don't't specify a connector, keep + * If there's one or more that doesn't specify a connector, keep * the last one found one as a fallback. */ fwstr = kstrdup(edid_firmware, GFP_KERNEL); diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 172cafe11c71..c0b0c718994a 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -23,6 +23,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> +#include <linux/dma-mapping.h> #include <linux/module.h> #define DEFAULT_FBDEFIO_DELAY_MS 50 @@ -52,7 +53,7 @@ struct drm_fbdev_cma { * will be set up automatically. dirty() is called by * drm_fb_helper_deferred_io() in process context (struct delayed_work). * - * Example fbdev deferred io code: + * Example fbdev deferred io code:: * * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb, * struct drm_file *file_priv, @@ -162,6 +163,10 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, * drm_fb_cma_create_with_funcs() - helper function for the * &drm_mode_config_funcs ->fb_create * callback function + * @dev: DRM device + * @file_priv: drm file for the ioctl call + * @mode_cmd: metadata from the userspace fb creation request + * @funcs: vtable to be used for the new framebuffer object * * This can be used to set &drm_framebuffer_funcs for drivers that need the * dirty() callback. Use drm_fb_cma_create() if you don't need to change @@ -223,6 +228,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs); /** * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function + * @dev: DRM device + * @file_priv: drm file for the ioctl call + * @mode_cmd: metadata from the userspace fb creation request * * If your hardware has special alignment or pitch requirements these should be * checked before calling this function. Use drm_fb_cma_create_with_funcs() if @@ -246,7 +254,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create); * This function will usually be called from the CRTC callback functions. */ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, - unsigned int plane) + unsigned int plane) { struct drm_fb_cma *fb_cma = to_fb_cma(fb); @@ -258,10 +266,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); #ifdef CONFIG_DEBUG_FS -/* - * drm_fb_cma_describe() - Helper to dump information about a single - * CMA framebuffer object - */ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) { struct drm_fb_cma *fb_cma = to_fb_cma(fb); @@ -279,7 +283,9 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) /** * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects - * in debugfs. + * in debugfs. + * @m: output file + * @arg: private data for the callback */ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg) { @@ -297,6 +303,12 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg) EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show); #endif +static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + return dma_mmap_writecombine(info->device, vma, info->screen_base, + info->fix.smem_start, info->fix.smem_len); +} + static struct fb_ops drm_fbdev_cma_ops = { .owner = THIS_MODULE, .fb_fillrect = drm_fb_helper_sys_fillrect, @@ -307,6 +319,7 @@ static struct fb_ops drm_fbdev_cma_ops = { .fb_blank = drm_fb_helper_blank, .fb_pan_display = drm_fb_helper_pan_display, .fb_setcmap = drm_fb_helper_setcmap, + .fb_mmap = drm_fb_cma_mmap, }; static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info, @@ -333,6 +346,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi, fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); if (!fbdefio || !fbops) { kfree(fbdefio); + kfree(fbops); return -ENOMEM; } @@ -445,7 +459,7 @@ err_cma_destroy: err_fb_info_destroy: drm_fb_helper_release_fbi(helper); err_gem_free_object: - dev->driver->gem_free_object(&obj->base); + drm_gem_object_unreference_unlocked(&obj->base); return ret; } EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 7c2eb75db60f..0bac5246e5a7 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -227,7 +227,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) g_base = r_base + crtc->gamma_size; b_base = g_base + crtc->gamma_size; - crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); + crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); } /** @@ -385,7 +385,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) drm_warn_on_modeset_not_all_locked(dev); - if (fb_helper->atomic) + if (dev->mode_config.funcs->atomic_commit) return restore_fbdev_mode_atomic(fb_helper); drm_for_each_plane(plane, dev) { @@ -716,8 +716,6 @@ int drm_fb_helper_init(struct drm_device *dev, i++; } - fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC); - return 0; out_free: drm_fb_helper_crtc_free(fb_helper); @@ -1042,7 +1040,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; - int pindex; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { u32 *palette; @@ -1074,38 +1071,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, !fb_helper->funcs->gamma_get)) return -EINVAL; - pindex = regno; - - if (fb->bits_per_pixel == 16) { - pindex = regno << 3; - - if (fb->depth == 16 && regno > 63) - return -EINVAL; - if (fb->depth == 15 && regno > 31) - return -EINVAL; - - if (fb->depth == 16) { - u16 r, g, b; - int i; - if (regno < 32) { - for (i = 0; i < 8; i++) - fb_helper->funcs->gamma_set(crtc, red, - green, blue, pindex + i); - } + WARN_ON(fb->bits_per_pixel != 8); - fb_helper->funcs->gamma_get(crtc, &r, - &g, &b, - pindex >> 1); + fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); - for (i = 0; i < 4; i++) - fb_helper->funcs->gamma_set(crtc, r, - green, b, - (pindex >> 1) + i); - } - } - - if (fb->depth != 16) - fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); return 0; } @@ -1373,7 +1342,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, return -EBUSY; } - if (fb_helper->atomic) { + if (dev->mode_config.funcs->atomic_commit) { ret = pan_display_atomic(var, info); goto unlock; } @@ -2000,7 +1969,18 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, my_score++; connector_funcs = connector->helper_private; - encoder = connector_funcs->best_encoder(connector); + + /* + * If the DRM device implements atomic hooks and ->best_encoder() is + * NULL we fallback to the default drm_atomic_helper_best_encoder() + * helper. + */ + if (fb_helper->dev->mode_config.funcs->atomic_commit && + !connector_funcs->best_encoder) + encoder = drm_atomic_helper_best_encoder(connector); + else + encoder = connector_funcs->best_encoder(connector); + if (!encoder) goto out; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 7af7f8bcb355..a27bc7cda975 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -67,7 +67,7 @@ DEFINE_MUTEX(drm_global_mutex); * specific implementations. For GEM-based drivers this is drm_gem_mmap(). * * No other file operations are supported by the DRM userspace API. Overall the - * following is an example #file_operations structure: + * following is an example #file_operations structure:: * * static const example_drm_fops = { * .owner = THIS_MODULE, @@ -368,7 +368,7 @@ static void drm_events_release(struct drm_file *file_priv) /* Remove unconsumed events */ list_for_each_entry_safe(e, et, &file_priv->event_list, link) { list_del(&e->link); - e->destroy(e); + kfree(e); } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -636,7 +636,7 @@ put_back_event: } ret += length; - e->destroy(e); + kfree(e); } } mutex_unlock(&file_priv->event_read_lock); @@ -713,9 +713,6 @@ int drm_event_reserve_init_locked(struct drm_device *dev, list_add(&p->pending_link, &file_priv->pending_event_list); p->file_priv = file_priv; - /* we *could* pass this in as arg, but everyone uses kfree: */ - p->destroy = (void (*) (struct drm_pending_event *)) kfree; - return 0; } EXPORT_SYMBOL(drm_event_reserve_init_locked); @@ -778,7 +775,7 @@ void drm_event_cancel_free(struct drm_device *dev, list_del(&p->pending_link); } spin_unlock_irqrestore(&dev->event_lock, flags); - p->destroy(p); + kfree(p); } EXPORT_SYMBOL(drm_event_cancel_free); @@ -800,8 +797,19 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e) { assert_spin_locked(&dev->event_lock); + if (e->completion) { + /* ->completion might disappear as soon as it signalled. */ + complete_all(e->completion); + e->completion = NULL; + } + + if (e->fence) { + fence_signal(e->fence); + fence_put(e->fence); + } + if (!e->file_priv) { - e->destroy(e); + kfree(e); return; } diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c new file mode 100644 index 000000000000..0645c85d5f95 --- /dev/null +++ b/drivers/gpu/drm/drm_fourcc.c @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * + * DRM core format related functions + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#include <linux/bug.h> +#include <linux/ctype.h> +#include <linux/export.h> +#include <linux/kernel.h> + +#include <drm/drmP.h> +#include <drm/drm_fourcc.h> + +static char printable_char(int c) +{ + return isascii(c) && isprint(c) ? c : '?'; +} + +/** + * drm_get_format_name - return a string for drm fourcc format + * @format: format to compute name of + * + * Note that the buffer used by this function is globally shared and owned by + * the function itself. + * + * FIXME: This isn't really multithreading safe. + */ +const char *drm_get_format_name(uint32_t format) +{ + static char buf[32]; + + snprintf(buf, sizeof(buf), + "%c%c%c%c %s-endian (0x%08x)", + printable_char(format & 0xff), + printable_char((format >> 8) & 0xff), + printable_char((format >> 16) & 0xff), + printable_char((format >> 24) & 0x7f), + format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little", + format); + + return buf; +} +EXPORT_SYMBOL(drm_get_format_name); + +/** + * drm_fb_get_bpp_depth - get the bpp/depth values for format + * @format: pixel format (DRM_FORMAT_*) + * @depth: storage for the depth value + * @bpp: storage for the bpp value + * + * This only supports RGB formats here for compat with code that doesn't use + * pixel formats directly yet. + */ +void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, + int *bpp) +{ + switch (format) { + case DRM_FORMAT_C8: + case DRM_FORMAT_RGB332: + case DRM_FORMAT_BGR233: + *depth = 8; + *bpp = 8; + break; + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + *depth = 15; + *bpp = 16; + break; + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + *depth = 16; + *bpp = 16; + break; + case DRM_FORMAT_RGB888: + case DRM_FORMAT_BGR888: + *depth = 24; + *bpp = 24; + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + *depth = 24; + *bpp = 32; + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_RGBX1010102: + case DRM_FORMAT_BGRX1010102: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + *depth = 30; + *bpp = 32; + break; + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + *depth = 32; + *bpp = 32; + break; + default: + DRM_DEBUG_KMS("unsupported pixel format %s\n", + drm_get_format_name(format)); + *depth = 0; + *bpp = 0; + break; + } +} +EXPORT_SYMBOL(drm_fb_get_bpp_depth); + +/** + * drm_format_num_planes - get the number of planes for format + * @format: pixel format (DRM_FORMAT_*) + * + * Returns: + * The number of planes used by the specified pixel format. + */ +int drm_format_num_planes(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 3; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_num_planes); + +/** + * drm_format_plane_cpp - determine the bytes per pixel value + * @format: pixel format (DRM_FORMAT_*) + * @plane: plane index + * + * Returns: + * The bytes per pixel value for the specified plane. + */ +int drm_format_plane_cpp(uint32_t format, int plane) +{ + unsigned int depth; + int bpp; + + if (plane >= drm_format_num_planes(format)) + return 0; + + switch (format) { + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + return 2; + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_NV24: + case DRM_FORMAT_NV42: + return plane ? 2 : 1; + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV444: + case DRM_FORMAT_YVU444: + return 1; + default: + drm_fb_get_bpp_depth(format, &depth, &bpp); + return bpp >> 3; + } +} +EXPORT_SYMBOL(drm_format_plane_cpp); + +/** + * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor + * @format: pixel format (DRM_FORMAT_*) + * + * Returns: + * The horizontal chroma subsampling factor for the + * specified pixel format. + */ +int drm_format_horz_chroma_subsampling(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV411: + case DRM_FORMAT_YVU411: + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV61: + case DRM_FORMAT_YUV422: + case DRM_FORMAT_YVU422: + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_horz_chroma_subsampling); + +/** + * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor + * @format: pixel format (DRM_FORMAT_*) + * + * Returns: + * The vertical chroma subsampling factor for the + * specified pixel format. + */ +int drm_format_vert_chroma_subsampling(uint32_t format) +{ + switch (format) { + case DRM_FORMAT_YUV410: + case DRM_FORMAT_YVU410: + return 4; + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + return 2; + default: + return 1; + } +} +EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); + +/** + * drm_format_plane_width - width of the plane given the first plane + * @width: width of the first plane + * @format: pixel format + * @plane: plane index + * + * Returns: + * The width of @plane, given that the width of the first plane is @width. + */ +int drm_format_plane_width(int width, uint32_t format, int plane) +{ + if (plane >= drm_format_num_planes(format)) + return 0; + + if (plane == 0) + return width; + + return width / drm_format_horz_chroma_subsampling(format); +} +EXPORT_SYMBOL(drm_format_plane_width); + +/** + * drm_format_plane_height - height of the plane given the first plane + * @height: height of the first plane + * @format: pixel format + * @plane: plane index + * + * Returns: + * The height of @plane, given that the height of the first plane is @height. + */ +int drm_format_plane_height(int height, uint32_t format, int plane) +{ + if (plane >= drm_format_num_planes(format)) + return 0; + + if (plane == 0) + return height; + + return height / drm_format_vert_chroma_subsampling(format); +} +EXPORT_SYMBOL(drm_format_plane_height); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 32156060b9c9..5c19dde1cd31 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release); * @kref: kref of the object to free * * Called after the last reference to the object has been lost. - * Must be called holding struct_ mutex + * Must be called holding &drm_device->struct_mutex. * * Frees the object */ diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index e1ab008b3f08..1d6c335584ec 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, return cma_obj; error: - drm->driver->gem_free_object(&cma_obj->base); + drm_gem_object_unreference_unlocked(&cma_obj->base); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_cma_create); @@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, * and handle has the id what user can see. */ ret = drm_gem_handle_create(file_priv, gem_obj, handle); - if (ret) - goto err_handle_create; - /* drop reference from allocate - handle holds it now. */ drm_gem_object_unreference_unlocked(gem_obj); + if (ret) + return ERR_PTR(ret); return cma_obj; - -err_handle_create: - drm->driver->gem_free_object(gem_obj); - - return ERR_PTR(ret); } /** diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 0fac801c18fe..76e39c50c90c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -42,10 +42,6 @@ #include <linux/vgaarb.h> #include <linux/export.h> -/* Access macro for slots in vblank timestamp ringbuffer. */ -#define vblanktimestamp(dev, pipe, count) \ - ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE]) - /* Retry timestamp calculation up to 3 times to satisfy * drm_timestamp_precision before giving up. */ @@ -82,36 +78,18 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe, struct timeval *t_vblank, u32 last) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - u32 tslot; assert_spin_locked(&dev->vblank_time_lock); vblank->last = last; - /* All writers hold the spinlock, but readers are serialized by - * the latching of vblank->count below. - */ - tslot = vblank->count + vblank_count_inc; - vblanktimestamp(dev, pipe, tslot) = *t_vblank; - - /* - * vblank timestamp updates are protected on the write side with - * vblank_time_lock, but on the read side done locklessly using a - * sequence-lock on the vblank counter. Ensure correct ordering using - * memory barrriers. We need the barrier both before and also after the - * counter update to synchronize with the next timestamp write. - * The read-side barriers for this are in drm_vblank_count_and_time. - */ - smp_wmb(); + write_seqlock(&vblank->seqlock); + vblank->time = *t_vblank; vblank->count += vblank_count_inc; - smp_wmb(); + write_sequnlock(&vblank->seqlock); } -/** - * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank - * @dev: DRM device - * @pipe: index of CRTC for which to reset the timestamp - * +/* * Reset the stored timestamp for the current vblank count to correspond * to the last vblank occurred. * @@ -155,11 +133,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe spin_unlock(&dev->vblank_time_lock); } -/** - * drm_update_vblank_count - update the master vblank counter - * @dev: DRM device - * @pipe: counter to update - * +/* * Call back into the driver to update the appropriate vblank counter * (specified by @pipe). Deal with wraparound, if it occurred, and * update the last read value so we can deal with wraparound on the next @@ -205,7 +179,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, const struct timeval *t_old; u64 diff_ns; - t_old = &vblanktimestamp(dev, pipe, vblank->count); + t_old = &vblank->time; diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old); /* @@ -239,49 +213,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, diff = 1; } - /* - * FIMXE: Need to replace this hack with proper seqlocks. - * - * Restrict the bump of the software vblank counter to a safe maximum - * value of +1 whenever there is the possibility that concurrent readers - * of vblank timestamps could be active at the moment, as the current - * implementation of the timestamp caching and updating is not safe - * against concurrent readers for calls to store_vblank() with a bump - * of anything but +1. A bump != 1 would very likely return corrupted - * timestamps to userspace, because the same slot in the cache could - * be concurrently written by store_vblank() and read by one of those - * readers without the read-retry logic detecting the collision. - * - * Concurrent readers can exist when we are called from the - * drm_vblank_off() or drm_vblank_on() functions and other non-vblank- - * irq callers. However, all those calls to us are happening with the - * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount - * can't increase while we are executing. Therefore a zero refcount at - * this point is safe for arbitrary counter bumps if we are called - * outside vblank irq, a non-zero count is not 100% safe. Unfortunately - * we must also accept a refcount of 1, as whenever we are called from - * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and - * we must let that one pass through in order to not lose vblank counts - * during vblank irq off - which would completely defeat the whole - * point of this routine. - * - * Whenever we are called from vblank irq, we have to assume concurrent - * readers exist or can show up any time during our execution, even if - * the refcount is currently zero, as vblank irqs are usually only - * enabled due to the presence of readers, and because when we are called - * from vblank irq we can't hold the vbl_lock to protect us from sudden - * bumps in vblank refcount. Therefore also restrict bumps to +1 when - * called from vblank irq. - */ - if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 || - (flags & DRM_CALLED_FROM_VBLIRQ))) { - DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u " - "refcount %u, vblirq %u\n", pipe, diff, - atomic_read(&vblank->refcount), - (flags & DRM_CALLED_FROM_VBLIRQ) != 0); - diff = 1; - } - DRM_DEBUG_VBL("updating vblank count on crtc %u:" " current=%u, diff=%u, hw=%u hw_last=%u\n", pipe, vblank->count, diff, cur_vblank, vblank->last); @@ -303,6 +234,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); } +/** + * drm_accurate_vblank_count - retrieve the master vblank counter + * @crtc: which counter to retrieve + * + * This function is similar to @drm_crtc_vblank_count but this + * function interpolates to handle a race with vblank irq's. + * + * This is mostly useful for hardware that can obtain the scanout + * position, but doesn't have a frame counter. + */ +u32 drm_accurate_vblank_count(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + u32 vblank; + unsigned long flags; + + WARN(!dev->driver->get_vblank_timestamp, + "This function requires support for accurate vblank timestamps."); + + spin_lock_irqsave(&dev->vblank_time_lock, flags); + + drm_update_vblank_count(dev, pipe, 0); + vblank = drm_vblank_count(dev, pipe); + + spin_unlock_irqrestore(&dev->vblank_time_lock, flags); + + return vblank; +} +EXPORT_SYMBOL(drm_accurate_vblank_count); + /* * Disable vblank irq's on crtc, make sure that last vblank count * of hardware and corresponding consistent software vblank counter @@ -417,6 +379,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs) init_waitqueue_head(&vblank->queue); setup_timer(&vblank->disable_timer, vblank_disable_fn, (unsigned long)vblank); + seqlock_init(&vblank->seqlock); } DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n"); @@ -986,25 +949,19 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, struct timeval *vblanktime) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - int count = DRM_TIMESTAMP_MAXRETRIES; - u32 cur_vblank; + u32 vblank_count; + unsigned int seq; if (WARN_ON(pipe >= dev->num_crtcs)) return 0; - /* - * Vblank timestamps are read lockless. To ensure consistency the vblank - * counter is rechecked and ordering is ensured using memory barriers. - * This works like a seqlock. The write-side barriers are in store_vblank. - */ do { - cur_vblank = vblank->count; - smp_rmb(); - *vblanktime = vblanktimestamp(dev, pipe, cur_vblank); - smp_rmb(); - } while (cur_vblank != vblank->count && --count > 0); + seq = read_seqbegin(&vblank->seqlock); + vblank_count = vblank->count; + *vblanktime = vblank->time; + } while (read_seqretry(&vblank->seqlock, seq)); - return cur_vblank; + return vblank_count; } EXPORT_SYMBOL(drm_vblank_count_and_time); @@ -1044,34 +1001,6 @@ static void send_vblank_event(struct drm_device *dev, } /** - * drm_arm_vblank_event - arm vblank event after pageflip - * @dev: DRM device - * @pipe: CRTC index - * @e: the event to prepare to send - * - * A lot of drivers need to generate vblank events for the very next vblank - * interrupt. For example when the page flip interrupt happens when the page - * flip gets armed, but not when it actually executes within the next vblank - * period. This helper function implements exactly the required vblank arming - * behaviour. - * - * Caller must hold event lock. Caller must also hold a vblank reference for - * the event @e, which will be dropped when the next vblank arrives. - * - * This is the legacy version of drm_crtc_arm_vblank_event(). - */ -void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, - struct drm_pending_vblank_event *e) -{ - assert_spin_locked(&dev->event_lock); - - e->pipe = pipe; - e->event.sequence = drm_vblank_count(dev, pipe); - list_add_tail(&e->base.link, &dev->vblank_event_list); -} -EXPORT_SYMBOL(drm_arm_vblank_event); - -/** * drm_crtc_arm_vblank_event - arm vblank event after pageflip * @crtc: the source CRTC of the vblank event * @e: the event to send @@ -1084,32 +1013,35 @@ EXPORT_SYMBOL(drm_arm_vblank_event); * * Caller must hold event lock. Caller must also hold a vblank reference for * the event @e, which will be dropped when the next vblank arrives. - * - * This is the native KMS version of drm_arm_vblank_event(). */ void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e) { - drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e); + struct drm_device *dev = crtc->dev; + unsigned int pipe = drm_crtc_index(crtc); + + assert_spin_locked(&dev->event_lock); + + e->pipe = pipe; + e->event.sequence = drm_vblank_count(dev, pipe); + list_add_tail(&e->base.link, &dev->vblank_event_list); } EXPORT_SYMBOL(drm_crtc_arm_vblank_event); /** - * drm_send_vblank_event - helper to send vblank event after pageflip - * @dev: DRM device - * @pipe: CRTC index + * drm_crtc_send_vblank_event - helper to send vblank event after pageflip + * @crtc: the source CRTC of the vblank event * @e: the event to send * * Updates sequence # and timestamp on event, and sends it to userspace. * Caller must hold event lock. - * - * This is the legacy version of drm_crtc_send_vblank_event(). */ -void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, - struct drm_pending_vblank_event *e) +void drm_crtc_send_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e) { + struct drm_device *dev = crtc->dev; + unsigned int seq, pipe = drm_crtc_index(crtc); struct timeval now; - unsigned int seq; if (dev->num_crtcs > 0) { seq = drm_vblank_count_and_time(dev, pipe, &now); @@ -1121,23 +1053,6 @@ void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, e->pipe = pipe; send_vblank_event(dev, e, seq, &now); } -EXPORT_SYMBOL(drm_send_vblank_event); - -/** - * drm_crtc_send_vblank_event - helper to send vblank event after pageflip - * @crtc: the source CRTC of the vblank event - * @e: the event to send - * - * Updates sequence # and timestamp on event, and sends it to userspace. - * Caller must hold event lock. - * - * This is the native KMS version of drm_send_vblank_event(). - */ -void drm_crtc_send_vblank_event(struct drm_crtc *crtc, - struct drm_pending_vblank_event *e) -{ - drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e); -} EXPORT_SYMBOL(drm_crtc_send_vblank_event); /** @@ -1193,7 +1108,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe) * Returns: * Zero on success or a negative error code on failure. */ -int drm_vblank_get(struct drm_device *dev, unsigned int pipe) +static int drm_vblank_get(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; unsigned long irqflags; @@ -1219,7 +1134,6 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe) return ret; } -EXPORT_SYMBOL(drm_vblank_get); /** * drm_crtc_vblank_get - get a reference count on vblank events @@ -1228,8 +1142,6 @@ EXPORT_SYMBOL(drm_vblank_get); * Acquire a reference count on vblank events to avoid having them disabled * while in use. * - * This is the native kms version of drm_vblank_get(). - * * Returns: * Zero on success or a negative error code on failure. */ @@ -1249,7 +1161,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get); * * This is the legacy version of drm_crtc_vblank_put(). */ -void drm_vblank_put(struct drm_device *dev, unsigned int pipe) +static void drm_vblank_put(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; @@ -1270,7 +1182,6 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe) jiffies + ((drm_vblank_offdelay * HZ)/1000)); } } -EXPORT_SYMBOL(drm_vblank_put); /** * drm_crtc_vblank_put - give up ownership of vblank events @@ -1278,8 +1189,6 @@ EXPORT_SYMBOL(drm_vblank_put); * * Release ownership of a given vblank counter, turning off interrupts * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. - * - * This is the native kms version of drm_vblank_put(). */ void drm_crtc_vblank_put(struct drm_crtc *crtc) { diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index f5d80839a90c..49311fc61d5d 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -60,6 +60,21 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv) return 0; } +static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); + int err; + + err = of_device_uevent_modalias(dev, env); + if (err != -ENODEV) + return err; + + add_uevent_var(env, "MODALIAS=%s%s", MIPI_DSI_MODULE_PREFIX, + dsi->name); + + return 0; +} + static const struct dev_pm_ops mipi_dsi_device_pm_ops = { .runtime_suspend = pm_generic_runtime_suspend, .runtime_resume = pm_generic_runtime_resume, @@ -74,6 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = { static struct bus_type mipi_dsi_bus_type = { .name = "mipi-dsi", .match = mipi_dsi_device_match, + .uevent = mipi_dsi_uevent, .pm = &mipi_dsi_device_pm_ops, }; @@ -983,6 +999,28 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on); /** + * mipi_dsi_set_tear_scanline() - turn on the display module's Tearing Effect + * output signal on the TE signal line when display module reaches line N + * defined by STS[n:0]. + * @dsi: DSI peripheral device + * @param: STS[10:0] + * Return: 0 on success or a negative error code on failure + */ +int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param) +{ + u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, param >> 8, + param & 0xff }; + ssize_t err; + + err = mipi_dsi_generic_write(dsi, payload, sizeof(payload)); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(mipi_dsi_set_tear_scanline); + +/** * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image * data used by the interface * @dsi: DSI peripheral device diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 04de6fd88f8c..cb39f45d6a16 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) { struct drm_mm_node *hole; - u64 end = node->start + node->size; + u64 end; u64 hole_start; u64 hole_end; BUG_ON(node == NULL); + end = node->start + node->size; + /* Find the relevant hole to add our node to */ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { if (hole_start > node->start || hole_end < end) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 7def3d58da18..fc5040ae5f25 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -544,6 +544,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex); * * This function is to create the modeline based on the GTF algorithm. * Generalized Timing Formula is derived from: + * * GTF Spreadsheet by Andy Morrish (1/5/97) * available at http://www.vesa.org * @@ -552,7 +553,8 @@ EXPORT_SYMBOL(drm_gtf_mode_complex); * I also refer to the function of fb_get_mode in the file of * drivers/video/fbmon.c * - * Standard GTF parameters: + * Standard GTF parameters:: + * * M = 600 * C = 40 * K = 128 @@ -1518,6 +1520,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out, if (out->status != MODE_OK) goto out; + drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V); + ret = 0; out: diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c index e3a4adf03e7b..61146f5b4f56 100644 --- a/drivers/gpu/drm/drm_modeset_lock.c +++ b/drivers/gpu/drm/drm_modeset_lock.c @@ -30,14 +30,14 @@ * * As KMS moves toward more fine grained locking, and atomic ioctl where * userspace can indirectly control locking order, it becomes necessary - * to use ww_mutex and acquire-contexts to avoid deadlocks. But because + * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because * the locking is more distributed around the driver code, we want a bit * of extra utility/tracking out of our acquire-ctx. This is provided * by drm_modeset_lock / drm_modeset_acquire_ctx. * - * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt + * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt * - * The basic usage pattern is to: + * The basic usage pattern is to:: * * drm_modeset_acquire_init(&ctx) * retry: @@ -51,6 +51,13 @@ * ... do stuff ... * drm_modeset_drop_locks(&ctx); * drm_modeset_acquire_fini(&ctx); + * + * On top of of these per-object locks using &ww_mutex there's also an overall + * dev->mode_config.lock, for protecting everything else. Mostly this means + * probe state of connectors, and preventing hotplug add/removal of connectors. + * + * Finally there's a bunch of dedicated locks to protect drm core internal + * lists and lookup data structures. */ /** diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 369d2898ff9e..fc51306fe365 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -219,10 +219,12 @@ EXPORT_SYMBOL(drm_plane_helper_check_update); * * Note that we make some assumptions about hardware limitations that may not be * true for all hardware -- - * 1) Primary plane cannot be repositioned. - * 2) Primary plane cannot be scaled. - * 3) Primary plane must cover the entire CRTC. - * 4) Subpixel positioning is not supported. + * + * 1. Primary plane cannot be repositioned. + * 2. Primary plane cannot be scaled. + * 3. Primary plane must cover the entire CRTC. + * 4. Subpixel positioning is not supported. + * * Drivers for hardware that don't have these restrictions can provide their * own implementation rather than using this helper. * diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index aab0f3f1f42d..780589b420a4 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -593,7 +593,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, get_dma_buf(dma_buf); } - /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ + /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, handle); drm_gem_object_unreference_unlocked(obj); if (ret) @@ -601,11 +601,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, ret = drm_prime_add_buf_handle(&file_priv->prime, dma_buf, *handle); + mutex_unlock(&file_priv->prime.lock); if (ret) goto fail; - mutex_unlock(&file_priv->prime.lock); - dma_buf_put(dma_buf); return 0; @@ -615,11 +614,14 @@ fail: * to detach.. which seems ok.. */ drm_gem_handle_delete(file_priv, *handle); + dma_buf_put(dma_buf); + return ret; + out_unlock: mutex_unlock(&dev->object_name_lock); out_put: - dma_buf_put(dma_buf); mutex_unlock(&file_priv->prime.lock); + dma_buf_put(dma_buf); return ret; } EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 0329080d7f7c..a0df377d7d1c 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -82,13 +82,30 @@ drm_mode_validate_flag(const struct drm_display_mode *mode, static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) { + struct drm_cmdline_mode *cmdline_mode; struct drm_display_mode *mode; - if (!connector->cmdline_mode.specified) + cmdline_mode = &connector->cmdline_mode; + if (!cmdline_mode->specified) return 0; + /* Only add a GTF mode if we find no matching probed modes */ + list_for_each_entry(mode, &connector->probed_modes, head) { + if (mode->hdisplay != cmdline_mode->xres || + mode->vdisplay != cmdline_mode->yres) + continue; + + if (cmdline_mode->refresh_specified) { + /* The probed mode's vrefresh is set until later */ + if (drm_mode_vrefresh(mode) != cmdline_mode->refresh) + continue; + } + + return 0; + } + mode = drm_mode_create_from_cmdline_mode(connector->dev, - &connector->cmdline_mode); + cmdline_mode); if (mode == NULL) return 0; diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c new file mode 100644 index 000000000000..b2071d495ada --- /dev/null +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_simple_kms_helper.h> +#include <linux/slab.h> + +/** + * DOC: overview + * + * This helper library provides helpers for drivers for simple display + * hardware. + * + * drm_simple_display_pipe_init() initializes a simple display pipeline + * which has only one full-screen scanout buffer feeding one output. The + * pipeline is represented by struct &drm_simple_display_pipe and binds + * together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed + * entity. Some flexibility for code reuse is provided through a separately + * allocated &drm_connector object and supporting optional &drm_bridge + * encoder drivers. + */ + +static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); + if (!pipe->funcs || !pipe->funcs->enable) + return; + + pipe->funcs->enable(pipe, crtc->state); +} + +static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(crtc, struct drm_simple_display_pipe, crtc); + if (!pipe->funcs || !pipe->funcs->disable) + return; + + pipe->funcs->disable(pipe); +} + +static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = { + .disable = drm_simple_kms_crtc_disable, + .enable = drm_simple_kms_crtc_enable, +}; + +static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *plane_state) +{ + struct drm_rect src = { + .x1 = plane_state->src_x, + .y1 = plane_state->src_y, + .x2 = plane_state->src_x + plane_state->src_w, + .y2 = plane_state->src_y + plane_state->src_h, + }; + struct drm_rect dest = { + .x1 = plane_state->crtc_x, + .y1 = plane_state->crtc_y, + .x2 = plane_state->crtc_x + plane_state->crtc_w, + .y2 = plane_state->crtc_y + plane_state->crtc_h, + }; + struct drm_rect clip = { 0 }; + struct drm_simple_display_pipe *pipe; + struct drm_crtc_state *crtc_state; + bool visible; + int ret; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state, + &pipe->crtc); + if (crtc_state->enable != !!plane_state->crtc) + return -EINVAL; /* plane must match crtc enable state */ + + if (!crtc_state->enable) + return 0; /* nothing to check when disabling or disabled */ + + clip.x2 = crtc_state->adjusted_mode.hdisplay; + clip.y2 = crtc_state->adjusted_mode.vdisplay; + ret = drm_plane_helper_check_update(plane, &pipe->crtc, + plane_state->fb, + &src, &dest, &clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true, &visible); + if (ret) + return ret; + + if (!visible) + return -EINVAL; + + if (!pipe->funcs || !pipe->funcs->check) + return 0; + + return pipe->funcs->check(pipe, plane_state, crtc_state); +} + +static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *pstate) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + if (!pipe->funcs || !pipe->funcs->update) + return; + + pipe->funcs->update(pipe, pstate); +} + +static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = { + .atomic_check = drm_simple_kms_plane_atomic_check, + .atomic_update = drm_simple_kms_plane_atomic_update, +}; + +static const struct drm_plane_funcs drm_simple_kms_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +/** + * drm_simple_display_pipe_init - Initialize a simple display pipeline + * @dev: DRM device + * @pipe: simple display pipe object to initialize + * @funcs: callbacks for the display pipe (optional) + * @formats: array of supported formats (%DRM_FORMAT_*) + * @format_count: number of elements in @formats + * @connector: connector to attach and register + * + * Sets up a display pipeline which consist of a really simple + * plane-crtc-encoder pipe coupled with the provided connector. + * Teardown of a simple display pipe is all handled automatically by the drm + * core through calling drm_mode_config_cleanup(). Drivers afterwards need to + * release the memory for the structure themselves. + * + * Returns: + * Zero on success, negative error code on failure. + */ +int drm_simple_display_pipe_init(struct drm_device *dev, + struct drm_simple_display_pipe *pipe, + const struct drm_simple_display_pipe_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + struct drm_connector *connector) +{ + struct drm_encoder *encoder = &pipe->encoder; + struct drm_plane *plane = &pipe->plane; + struct drm_crtc *crtc = &pipe->crtc; + int ret; + + pipe->connector = connector; + pipe->funcs = funcs; + + drm_plane_helper_add(plane, &drm_simple_kms_plane_helper_funcs); + ret = drm_universal_plane_init(dev, plane, 0, + &drm_simple_kms_plane_funcs, + formats, format_count, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(crtc, &drm_simple_kms_crtc_helper_funcs); + ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL, + &drm_simple_kms_crtc_funcs, NULL); + if (ret) + return ret; + + encoder->possible_crtcs = 1 << drm_crtc_index(crtc); + ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ret; + + return drm_mode_connector_attach_encoder(connector, encoder); +} +EXPORT_SYMBOL(drm_simple_display_pipe_init); + +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index fa7fadce8063..32dd821b7202 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -32,75 +32,6 @@ static struct device_type drm_sysfs_device_minor = { struct class *drm_class; -/** - * __drm_class_suspend - internal DRM class suspend routine - * @dev: Linux device to suspend - * @state: power state to enter - * - * Just figures out what the actual struct drm_device associated with - * @dev is and calls its suspend hook, if present. - */ -static int __drm_class_suspend(struct device *dev, pm_message_t state) -{ - if (dev->type == &drm_sysfs_device_minor) { - struct drm_minor *drm_minor = to_drm_minor(dev); - struct drm_device *drm_dev = drm_minor->dev; - - if (drm_minor->type == DRM_MINOR_LEGACY && - !drm_core_check_feature(drm_dev, DRIVER_MODESET) && - drm_dev->driver->suspend) - return drm_dev->driver->suspend(drm_dev, state); - } - return 0; -} - -/** - * drm_class_suspend - internal DRM class suspend hook. Simply calls - * __drm_class_suspend() with the correct pm state. - * @dev: Linux device to suspend - */ -static int drm_class_suspend(struct device *dev) -{ - return __drm_class_suspend(dev, PMSG_SUSPEND); -} - -/** - * drm_class_freeze - internal DRM class freeze hook. Simply calls - * __drm_class_suspend() with the correct pm state. - * @dev: Linux device to freeze - */ -static int drm_class_freeze(struct device *dev) -{ - return __drm_class_suspend(dev, PMSG_FREEZE); -} - -/** - * drm_class_resume - DRM class resume hook - * @dev: Linux device to resume - * - * Just figures out what the actual struct drm_device associated with - * @dev is and calls its resume hook, if present. - */ -static int drm_class_resume(struct device *dev) -{ - if (dev->type == &drm_sysfs_device_minor) { - struct drm_minor *drm_minor = to_drm_minor(dev); - struct drm_device *drm_dev = drm_minor->dev; - - if (drm_minor->type == DRM_MINOR_LEGACY && - !drm_core_check_feature(drm_dev, DRIVER_MODESET) && - drm_dev->driver->resume) - return drm_dev->driver->resume(drm_dev); - } - return 0; -} - -static const struct dev_pm_ops drm_class_dev_pm_ops = { - .suspend = drm_class_suspend, - .resume = drm_class_resume, - .freeze = drm_class_freeze, -}; - static char *drm_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); @@ -131,8 +62,6 @@ int drm_sysfs_init(void) if (IS_ERR(drm_class)) return PTR_ERR(drm_class); - drm_class->pm = &drm_class_dev_pm_ops; - err = class_create_file(drm_class, &class_attr_version.attr); if (err) { class_destroy(drm_class); diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index 2f2ecde8285b..f306c8855978 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -127,6 +127,9 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy); * used to implement weakly referenced lookups using kref_get_unless_zero(). * * Example: + * + * :: + * * drm_vma_offset_lock_lookup(mgr); * node = drm_vma_offset_lookup_locked(mgr); * if (node) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index f5321e2f25ff..a69cdd526bf8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -125,7 +125,7 @@ struct etnaviv_gpu { u32 completed_fence; u32 retired_fence; wait_queue_head_t fence_event; - unsigned int fence_context; + u64 fence_context; spinlock_t fence_spinlock; /* worker for handling active-list retiring: */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c index 5e38e749ac17..ad6b73c7fc59 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c @@ -93,17 +93,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector) return 0; } -static struct drm_encoder * -exynos_dpi_best_encoder(struct drm_connector *connector) -{ - struct exynos_dpi *ctx = connector_to_dpi(connector); - - return &ctx->encoder; -} - static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { .get_modes = exynos_dpi_get_modes, - .best_encoder = exynos_dpi_best_encoder, }; static int exynos_dpi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 2dd820e23b0c..4a679fb9bb02 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -267,6 +267,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, { struct exynos_drm_private *priv = dev->dev_private; struct exynos_atomic_commit *commit; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; int i, ret; commit = kzalloc(sizeof(*commit), GFP_KERNEL); @@ -288,10 +290,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, /* Wait until all affected CRTCs have completed previous commits and * mark them as pending. */ - for (i = 0; i < dev->mode_config.num_crtc; ++i) { - if (state->crtcs[i]) - commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + commit->crtcs |= drm_crtc_mask(crtc); wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs)); @@ -299,7 +299,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, priv->pending |= commit->crtcs; spin_unlock(&priv->lock); - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) schedule_work(&commit->work); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 601ecf8006a7..e07cb1fe4860 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1566,17 +1566,8 @@ static int exynos_dsi_get_modes(struct drm_connector *connector) return 0; } -static struct drm_encoder * -exynos_dsi_best_encoder(struct drm_connector *connector) -{ - struct exynos_dsi *dsi = connector_to_dsi(connector); - - return &dsi->encoder; -} - static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { .get_modes = exynos_dsi_get_modes, - .best_encoder = exynos_dsi_best_encoder, }; static int exynos_dsi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 55f1d37c666a..77f12c00abf9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config, state->v_ratio == (1 << 15)) height_ok = true; - if (width_ok & height_ok) + if (width_ok && height_ok) return 0; DRM_DEBUG_KMS("scaling mode is not supported"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 608b0afa337f..e8f6c92b2a36 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -378,16 +378,8 @@ static int vidi_get_modes(struct drm_connector *connector) return drm_add_edid_modes(connector, edid); } -static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector) -{ - struct vidi_context *ctx = ctx_from_connector(connector); - - return &ctx->encoder; -} - static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { .get_modes = vidi_get_modes, - .best_encoder = vidi_best_encoder, }; static int vidi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 58de5a430508..1625d7c8a319 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -937,17 +937,9 @@ static int hdmi_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector) -{ - struct hdmi_context *hdata = connector_to_hdmi(connector); - - return &hdata->encoder; -} - static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { .get_modes = hdmi_get_modes, .mode_valid = hdmi_mode_valid, - .best_encoder = hdmi_best_encoder, }; static int hdmi_create_connector(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index 89c0084c2814..706de3278f1c 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -22,20 +22,21 @@ #include "fsl_dcu_drm_drv.h" #include "fsl_dcu_drm_plane.h" -static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc, +static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { -} + struct drm_pending_vblank_event *event = crtc->state->event; -static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - return 0; -} + if (event) { + crtc->state->event = NULL; -static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state) -{ + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } } static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) @@ -117,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) } static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { - .atomic_begin = fsl_dcu_drm_crtc_atomic_begin, - .atomic_check = fsl_dcu_drm_crtc_atomic_check, .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, .disable = fsl_dcu_drm_disable_crtc, .enable = fsl_dcu_drm_crtc_enable, diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 0ec1ad961e0d..33727d5d826a 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -198,7 +198,7 @@ static struct drm_driver fsl_dcu_drm_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = fsl_dcu_drm_enable_vblank, .disable_vblank = fsl_dcu_drm_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 98c998da91eb..0b0989e503ea 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -102,14 +102,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = { .reset = drm_atomic_helper_connector_reset, }; -static struct drm_encoder * -fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector) -{ - struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector); - - return fsl_con->encoder; -} - static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) { struct fsl_dcu_drm_connector *fsl_connector; @@ -136,7 +128,6 @@ static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector, } static const struct drm_connector_helper_funcs connector_helper_funcs = { - .best_encoder = fsl_dcu_drm_connector_best_encoder, .get_modes = fsl_dcu_drm_connector_get_modes, .mode_valid = fsl_dcu_drm_connector_mode_valid, }; diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index c95406e6f44d..1a1cf7a3b5ef 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -175,20 +175,21 @@ void gma_crtc_load_lut(struct drm_crtc *crtc) } } -void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, - u32 start, u32 size) +int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, + u32 size) { struct gma_crtc *gma_crtc = to_gma_crtc(crtc); int i; - int end = (start + size > 256) ? 256 : start + size; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { gma_crtc->lut_r[i] = red[i] >> 8; gma_crtc->lut_g[i] = green[i] >> 8; gma_crtc->lut_b[i] = blue[i] >> 8; } gma_crtc_load_lut(crtc); + + return 0; } /** @@ -281,7 +282,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode) REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); /* Turn off vblank interrupts */ - drm_vblank_off(dev, pipe); + drm_crtc_vblank_off(crtc); /* Wait for vblank for the disable to take effect */ gma_wait_for_vblank(dev); diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h index b2491c65f053..e72dd08b701b 100644 --- a/drivers/gpu/drm/gma500/gma_display.h +++ b/drivers/gpu/drm/gma500/gma_display.h @@ -72,8 +72,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc, uint32_t width, uint32_t height); extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); extern void gma_crtc_load_lut(struct drm_crtc *crtc); -extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, u32 start, u32 size); +extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, u32 size); extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode); extern void gma_crtc_prepare(struct drm_crtc *crtc); extern void gma_crtc_commit(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 398015be87e4..7b6c84925098 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, struct drm_psb_private *dev_priv = dev->dev_private; struct gma_crtc *gma_crtc; int i; - uint16_t *r_base, *g_base, *b_base; /* We allocate a extra array of drm_connector pointers * for fbdev after the crtc */ @@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, gma_crtc->pipe = pipe; gma_crtc->plane = pipe; - r_base = gma_crtc->base.gamma_store; - g_base = r_base + 256; - b_base = g_base + 256; for (i = 0; i < 256; i++) { gma_crtc->lut_r[i] = i; gma_crtc->lut_g[i] = i; gma_crtc->lut_b[i] = i; - r_base[i] = i << 8; - g_base[i] = i << 8; - b_base[i] = i << 8; gma_crtc->lut_adj[i] = 0; } diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index fba6372d060e..ed76baad525f 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -502,13 +502,6 @@ static void ade_crtc_disable(struct drm_crtc *crtc) acrtc->enable = false; } -static int ade_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) -{ - /* do nothing */ - return 0; -} - static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct ade_crtc *acrtc = to_ade_crtc(crtc); @@ -537,6 +530,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc, { struct ade_crtc *acrtc = to_ade_crtc(crtc); struct ade_hw_ctx *ctx = acrtc->ctx; + struct drm_pending_vblank_event *event = crtc->state->event; void __iomem *base = ctx->base; /* only crtc is enabled regs take effect */ @@ -545,12 +539,22 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc, /* flush ade registers */ writel(ADE_ENABLE, base + ADE_EN); } + + if (event) { + crtc->state->event = NULL; + + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } } static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = { .enable = ade_crtc_enable, .disable = ade_crtc_disable, - .atomic_check = ade_crtc_atomic_check, .mode_set_nofb = ade_crtc_mode_set_nofb, .atomic_begin = ade_crtc_atomic_begin, .atomic_flush = ade_crtc_atomic_flush, diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 3f94785fbcca..193657259ee9 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -173,7 +173,7 @@ static struct drm_driver kirin_drm_driver = { .fops = &kirin_drm_fops, .set_busid = drm_platform_set_busid, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = kirin_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 0b88ba0f3c1f..7e2944406b8f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -59,6 +59,7 @@ i915-y += intel_audio.o \ intel_bios.o \ intel_color.o \ intel_display.o \ + intel_dpio_phy.o \ intel_dpll_mgr.o \ intel_fbc.o \ intel_fifo_underrun.o \ @@ -81,10 +82,12 @@ i915-y += dvo_ch7017.o \ dvo_tfp410.o \ intel_crt.o \ intel_ddi.o \ + intel_dp_aux_backlight.o \ intel_dp_link_training.o \ intel_dp_mst.o \ intel_dp.o \ intel_dsi.o \ + intel_dsi_dcs_backlight.o \ intel_dsi_panel_vbt.o \ intel_dsi_pll.o \ intel_dvo.o \ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index a337f33bec5b..d97f28bfa9db 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { CMD( MI_RS_CONTEXT, SMI, F, 1, S ), CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), - CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), + CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W, + .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ), CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), @@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) int cmd_table_count; int ret; - if (!IS_GEN7(engine->dev)) + if (!IS_GEN7(engine->i915)) return 0; switch (engine->id) { case RCS: - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_render_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_render_ring_cmds); @@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) cmd_table_count = ARRAY_SIZE(gen7_render_cmds); } - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_render_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); } else { @@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case BCS: - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_blt_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); } else { @@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); } - if (IS_HASWELL(engine->dev)) { + if (IS_HASWELL(engine->i915)) { engine->reg_tables = hsw_blt_reg_tables; engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); } else { @@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine) if (!engine->needs_cmd_parser) return false; - if (!USES_PPGTT(engine->dev)) + if (!USES_PPGTT(engine->i915)) return false; return (i915.enable_cmd_parser == 1); @@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } + if (desc->cmd.value == MI_LOAD_REGISTER_REG) { + DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n"); + return false; + } + if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) *oacontrol_set = (cmd[offset + 1] != 0); } @@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine, return false; } + if (desc->cmd.value == MI_LOAD_REGISTER_REG) { + DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n", + reg_addr); + return false; + } + if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && (offset + 2 > length || (cmd[offset + 1] & reg->mask) != reg->value)) { @@ -1275,8 +1287,21 @@ int i915_parse_cmds(struct intel_engine_cs *engine, * * Return: the current version number of the cmd parser */ -int i915_cmd_parser_get_version(void) +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) { + struct intel_engine_cs *engine; + bool active = false; + + /* If the command parser is not enabled, report 0 - unsupported */ + for_each_engine(engine, dev_priv) { + if (i915_needs_cmd_parser(engine)) { + active = true; + break; + } + } + if (!active) + return 0; + /* * Command parser version history * @@ -1288,6 +1313,7 @@ int i915_cmd_parser_get_version(void) * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. * 5. GPGPU dispatch compute indirect registers. * 6. TIMESTAMP register and Haswell CS GPR registers + * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers. */ - return 6; + return 7; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 32690332d441..614ac085e51f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } -static const char get_active_flag(struct drm_i915_gem_object *obj) +static char get_active_flag(struct drm_i915_gem_object *obj) { return obj->active ? '*' : ' '; } -static const char get_pin_flag(struct drm_i915_gem_object *obj) +static char get_pin_flag(struct drm_i915_gem_object *obj) { return obj->pin_display ? 'p' : ' '; } -static const char get_tiling_flag(struct drm_i915_gem_object *obj) +static char get_tiling_flag(struct drm_i915_gem_object *obj) { switch (obj->tiling_mode) { default: @@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj) } } -static inline const char get_global_flag(struct drm_i915_gem_object *obj) +static char get_global_flag(struct drm_i915_gem_object *obj) { return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; } -static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) +static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) { return obj->mapping ? 'M' : ' '; } @@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); } -static void describe_ctx(struct seq_file *m, struct intel_context *ctx) -{ - seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i'); - seq_putc(m, ctx->remap_slice ? 'R' : 'r'); - seq_putc(m, ' '); -} - static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; @@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m, print_file_stats(m, "[k]batch pool", stats); } +static int per_file_ctx_stats(int id, void *ptr, void *data) +{ + struct i915_gem_context *ctx = ptr; + int n; + + for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { + if (ctx->engine[n].state) + per_file_stats(0, ctx->engine[n].state, data); + if (ctx->engine[n].ringbuf) + per_file_stats(0, ctx->engine[n].ringbuf->obj, data); + } + + return 0; +} + +static void print_context_stats(struct seq_file *m, + struct drm_i915_private *dev_priv) +{ + struct file_stats stats; + struct drm_file *file; + + memset(&stats, 0, sizeof(stats)); + + mutex_lock(&dev_priv->dev->struct_mutex); + if (dev_priv->kernel_context) + per_file_ctx_stats(0, dev_priv->kernel_context, &stats); + + list_for_each_entry(file, &dev_priv->dev->filelist, lhead) { + struct drm_i915_file_private *fpriv = file->driver_priv; + idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); + } + mutex_unlock(&dev_priv->dev->struct_mutex); + + print_file_stats(m, "[k]contexts", stats); +} + #define count_vmas(list, member) do { \ list_for_each_entry(vma, list, member) { \ size += i915_gem_obj_total_ggtt_size(vma->obj); \ @@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data) seq_putc(m, '\n'); print_batch_pool_stats(m, dev_priv); - mutex_unlock(&dev->struct_mutex); mutex_lock(&dev->filelist_mutex); + print_context_stats(m, dev_priv); list_for_each_entry_reverse(file, &dev->filelist, lhead) { struct file_stats stats; struct task_struct *task; @@ -607,18 +636,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) for_each_intel_crtc(dev, crtc) { const char pipe = pipe_name(crtc->pipe); const char plane = plane_name(crtc->plane); - struct intel_unpin_work *work; + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - work = crtc->unpin_work; + work = crtc->flip_work; if (work == NULL) { seq_printf(m, "No flip due on pipe %c (plane %c)\n", pipe, plane); } else { + u32 pending; u32 addr; - if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { - seq_printf(m, "Flip queued on pipe %c (plane %c)\n", + pending = atomic_read(&work->pending); + if (pending) { + seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n", pipe, plane); } else { seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", @@ -638,11 +669,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", work->flip_queued_vblank, work->flip_ready_vblank, - drm_crtc_vblank_count(&crtc->base)); - if (work->enable_stall_check) - seq_puts(m, "Stall check enabled, "); - else - seq_puts(m, "Stall check waiting for page flip ioctl, "); + intel_crtc_get_vblank_counter(crtc)); seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); if (INTEL_INFO(dev)->gen >= 4) @@ -1281,6 +1308,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) } seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); + seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "Render p-state ratio: %d\n", (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); @@ -1383,7 +1411,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seqno[id] = engine->get_seqno(engine); } - i915_get_extra_instdone(dev, instdone); + i915_get_extra_instdone(dev_priv, instdone); intel_runtime_pm_put(dev_priv); @@ -1991,8 +2019,7 @@ static int i915_context_status(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - struct intel_context *ctx; - enum intel_engine_id id; + struct i915_gem_context *ctx; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -2000,32 +2027,36 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->context_list, link) { - if (!i915.enable_execlists && - ctx->legacy_hw_ctx.rcs_state == NULL) - continue; - - seq_puts(m, "HW context "); - describe_ctx(m, ctx); - if (ctx == dev_priv->kernel_context) - seq_printf(m, "(kernel context) "); + seq_printf(m, "HW context %u ", ctx->hw_id); + if (IS_ERR(ctx->file_priv)) { + seq_puts(m, "(deleted) "); + } else if (ctx->file_priv) { + struct pid *pid = ctx->file_priv->file->pid; + struct task_struct *task; - if (i915.enable_execlists) { - seq_putc(m, '\n'); - for_each_engine_id(engine, dev_priv, id) { - struct drm_i915_gem_object *ctx_obj = - ctx->engine[id].state; - struct intel_ringbuffer *ringbuf = - ctx->engine[id].ringbuf; - - seq_printf(m, "%s: ", engine->name); - if (ctx_obj) - describe_obj(m, ctx_obj); - if (ringbuf) - describe_ctx_ringbuf(m, ringbuf); - seq_putc(m, '\n'); + task = get_pid_task(pid, PIDTYPE_PID); + if (task) { + seq_printf(m, "(%s [%d]) ", + task->comm, task->pid); + put_task_struct(task); } } else { - describe_obj(m, ctx->legacy_hw_ctx.rcs_state); + seq_puts(m, "(kernel) "); + } + + seq_putc(m, ctx->remap_slice ? 'R' : 'r'); + seq_putc(m, '\n'); + + for_each_engine(engine, dev_priv) { + struct intel_context *ce = &ctx->engine[engine->id]; + + seq_printf(m, "%s: ", engine->name); + seq_putc(m, ce->initialised ? 'I' : 'i'); + if (ce->state) + describe_obj(m, ce->state); + if (ce->ringbuf) + describe_ctx_ringbuf(m, ce->ringbuf); + seq_putc(m, '\n'); } seq_putc(m, '\n'); @@ -2037,24 +2068,22 @@ static int i915_context_status(struct seq_file *m, void *unused) } static void i915_dump_lrc_obj(struct seq_file *m, - struct intel_context *ctx, + struct i915_gem_context *ctx, struct intel_engine_cs *engine) { + struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct page *page; uint32_t *reg_state; int j; - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; unsigned long ggtt_offset = 0; + seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); + if (ctx_obj == NULL) { - seq_printf(m, "Context on %s with no gem object\n", - engine->name); + seq_puts(m, "\tNot allocated\n"); return; } - seq_printf(m, "CONTEXT: %s %u\n", engine->name, - intel_execlists_ctx_id(ctx, engine)); - if (!i915_gem_obj_ggtt_bound(ctx_obj)) seq_puts(m, "\tNot bound in GGTT\n"); else @@ -2087,7 +2116,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; if (!i915.enable_execlists) { @@ -2100,9 +2129,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->context_list, link) - if (ctx != dev_priv->kernel_context) - for_each_engine(engine, dev_priv) - i915_dump_lrc_obj(m, ctx, engine); + for_each_engine(engine, dev_priv) + i915_dump_lrc_obj(m, ctx, engine); mutex_unlock(&dev->struct_mutex); @@ -2173,8 +2201,8 @@ static int i915_execlists(struct seq_file *m, void *data) seq_printf(m, "\t%d requests in queue\n", count); if (head_req) { - seq_printf(m, "\tHead request id: %u\n", - intel_execlists_ctx_id(head_req->ctx, engine)); + seq_printf(m, "\tHead request context: %u\n", + head_req->ctx->hw_id); seq_printf(m, "\tHead request tail: %u\n", head_req->tail); } @@ -2268,7 +2296,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) static int per_file_ctx(int id, void *ptr, void *data) { - struct intel_context *ctx = ptr; + struct i915_gem_context *ctx = ptr; struct seq_file *m = data; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; @@ -2313,12 +2341,12 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); for_each_engine(engine, dev_priv) { seq_printf(m, "%s\n", engine->name); - if (INTEL_INFO(dev)->gen == 7) + if (IS_GEN7(dev_priv)) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(engine))); seq_printf(m, "PP_DIR_BASE: 0x%08x\n", @@ -2365,16 +2393,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) task = get_pid_task(file->pid, PIDTYPE_PID); if (!task) { ret = -ESRCH; - goto out_put; + goto out_unlock; } seq_printf(m, "\nproc: %s\n", task->comm); put_task_struct(task); idr_for_each(&file_priv->context_idr, per_file_ctx, (void *)(unsigned long)m); } +out_unlock: mutex_unlock(&dev->filelist_mutex); -out_put: intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -2509,6 +2537,7 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", client->wq_size, client->wq_offset, client->wq_tail); + seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space); seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); seq_printf(m, "\tLast submission result: %d\n", client->retcode); @@ -3168,7 +3197,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) enum intel_engine_id id; int j, ret; - if (!i915_semaphore_is_enabled(dev)) { + if (!i915_semaphore_is_enabled(dev_priv)) { seq_puts(m, "Semaphores are disabled\n"); return 0; } @@ -4769,7 +4798,7 @@ i915_wedged_set(void *data, u64 val) intel_runtime_pm_get(dev_priv); - i915_handle_error(dev, val, + i915_handle_error(dev_priv, val, "Manually setting wedged to %llu", val); intel_runtime_pm_put(dev_priv); @@ -4919,7 +4948,7 @@ i915_drop_caches_set(void *data, u64 val) } if (val & (DROP_RETIRE | DROP_ACTIVE)) - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); if (val & DROP_BOUND) i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); @@ -4993,7 +5022,7 @@ i915_max_freq_set(void *data, u64 val) dev_priv->rps.max_freq_softlimit = val; - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -5060,7 +5089,7 @@ i915_min_freq_set(void *data, u64 val) dev_priv->rps.min_freq_softlimit = val; - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index b3198fcd0536..07edaed9d5a2 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -186,7 +186,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = 1; break; case I915_PARAM_HAS_SEMAPHORES: - value = i915_semaphore_is_enabled(dev); + value = i915_semaphore_is_enabled(dev_priv); break; case I915_PARAM_HAS_PRIME_VMAP_FLUSH: value = 1; @@ -204,7 +204,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = 1; break; case I915_PARAM_CMD_PARSER_VERSION: - value = i915_cmd_parser_get_version(); + value = i915_cmd_parser_get_version(dev_priv); break; case I915_PARAM_HAS_COHERENT_PHYS_GTT: value = 1; @@ -223,8 +223,7 @@ static int i915_getparam(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_HAS_GPU_RESET: - value = i915.enable_hangcheck && - intel_has_gpu_reset(dev); + value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); break; case I915_PARAM_HAS_RESOURCE_STREAMER: value = HAS_RESOURCE_STREAMER(dev); @@ -425,6 +424,43 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { .can_switch = i915_switcheroo_can_switch, }; +static void i915_gem_fini(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + /* + * Neither the BIOS, ourselves or any other kernel + * expects the system to be in execlists mode on startup, + * so we need to reset the GPU back to legacy mode. And the only + * known way to disable logical contexts is through a GPU reset. + * + * So in order to leave the system in a known default configuration, + * always reset the GPU upon unload. Afterwards we then clean up the + * GEM state tracking, flushing off the requests and leaving the + * system in a known idle state. + * + * Note that is of the upmost importance that the GPU is idle and + * all stray writes are flushed *before* we dismantle the backing + * storage for the pinned objects. + * + * However, since we are uncertain that reseting the GPU on older + * machines is a good idea, we don't - just in case it leaves the + * machine in an unusable condition. + */ + if (HAS_HW_CONTEXTS(dev)) { + int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); + WARN_ON(reset && reset != -ENODEV); + } + + mutex_lock(&dev->struct_mutex); + i915_gem_reset(dev); + i915_gem_cleanup_engines(dev); + i915_gem_context_fini(dev); + mutex_unlock(&dev->struct_mutex); + + WARN_ON(!list_empty(&to_i915(dev)->context_list)); +} + static int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -454,6 +490,9 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_vga_client; + /* must happen before intel_power_domains_init_hw() on VLV/CHV */ + intel_update_rawclk(dev_priv); + intel_power_domains_init_hw(dev_priv, false); intel_csr_ucode_init(dev_priv); @@ -468,7 +507,7 @@ static int i915_load_modeset_init(struct drm_device *dev) * working irqs for e.g. gmbus and dp aux transfers. */ intel_modeset_init(dev); - intel_guc_ucode_init(dev); + intel_guc_init(dev); ret = i915_gem_init(dev); if (ret) @@ -503,12 +542,9 @@ static int i915_load_modeset_init(struct drm_device *dev) return 0; cleanup_gem: - mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_engines(dev); - i915_gem_context_fini(dev); - mutex_unlock(&dev->struct_mutex); + i915_gem_fini(dev); cleanup_irq: - intel_guc_ucode_fini(dev); + intel_guc_fini(dev); drm_irq_uninstall(dev); intel_teardown_gmbus(dev); cleanup_csr: @@ -850,7 +886,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) DRM_INFO("Display disabled (module parameter)\n"); info->num_pipes = 0; } else if (info->num_pipes > 0 && - (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && + (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && HAS_PCH_SPLIT(dev)) { u32 fuse_strap = I915_READ(FUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP); @@ -874,7 +910,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev) DRM_INFO("PipeC fused off\n"); info->num_pipes -= 1; } - } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { + } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { u32 dfsm = I915_READ(SKL_DFSM); u8 disabled_mask = 0; bool invalid; @@ -915,9 +951,11 @@ static void intel_device_info_runtime_init(struct drm_device *dev) else if (INTEL_INFO(dev)->gen >= 9) gen9_sseu_info_init(dev); - /* Snooping is broken on BXT A stepping. */ info->has_snoop = !info->has_llc; - info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1); + + /* Snooping is broken on BXT A stepping. */ + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + info->has_snoop = false; DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); @@ -930,6 +968,20 @@ static void intel_device_info_runtime_init(struct drm_device *dev) info->has_subslice_pg ? "y" : "n"); DRM_DEBUG_DRIVER("has EU power gating: %s\n", info->has_eu_pg ? "y" : "n"); + + i915.enable_execlists = + intel_sanitize_enable_execlists(dev_priv, + i915.enable_execlists); + + /* + * i915.enable_ppgtt is read-only, so do an early pass to validate the + * user's requested state against the hardware/driver capabilities. We + * do this now so that we can print out any log messages once rather + * than every time we check intel_enable_ppgtt(). + */ + i915.enable_ppgtt = + intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); } static void intel_init_dpio(struct drm_i915_private *dev_priv) @@ -1020,6 +1072,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, memcpy(device_info, info, sizeof(dev_priv->info)); device_info->device_id = dev->pdev->device; + BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); + device_info->gen_mask = BIT(device_info->gen - 1); + spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); @@ -1137,7 +1192,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) if (ret < 0) goto put_bridge; - intel_uncore_init(dev); + intel_uncore_init(dev_priv); return 0; @@ -1155,7 +1210,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; - intel_uncore_fini(dev); + intel_uncore_fini(dev_priv); i915_mmio_cleanup(dev); pci_dev_put(dev_priv->bridge_dev); } @@ -1206,8 +1261,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pci_set_master(dev->pdev); /* overlay on gen2 is broken and can't address above 1G */ - if (IS_GEN2(dev)) - dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + if (IS_GEN2(dev)) { + ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + if (ret) { + DRM_ERROR("failed to set DMA mask\n"); + + goto out_ggtt; + } + } + /* 965GM sometimes incorrectly writes to hardware status page (HWS) * using 32bit addressing, overwriting memory if HWS is located @@ -1217,8 +1279,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) * behaviour if any general state is accessed within a page above 4GB, * which also needs to be handled carefully. */ - if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) - dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { + ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); + + if (ret) { + DRM_ERROR("failed to set DMA mask\n"); + + goto out_ggtt; + } + } aperture_size = ggtt->mappable_end; @@ -1236,9 +1305,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - intel_uncore_sanitize(dev); + intel_uncore_sanitize(dev_priv); - intel_opregion_setup(dev); + intel_opregion_setup(dev_priv); i915_gem_load_init_fences(dev_priv); @@ -1300,14 +1369,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * Notify a valid surface after modesetting, * when running inside a VM. */ - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); i915_setup_sysfs(dev); if (INTEL_INFO(dev_priv)->num_pipes) { /* Must be done after probing outputs */ - intel_opregion_init(dev); + intel_opregion_register(dev_priv); acpi_video_register(); } @@ -1326,7 +1395,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) i915_audio_component_cleanup(dev_priv); intel_gpu_ips_teardown(); acpi_video_unregister(); - intel_opregion_fini(dev_priv->dev); + intel_opregion_unregister(dev_priv); i915_teardown_sysfs(dev_priv->dev); i915_gem_shrinker_cleanup(dev_priv); } @@ -1458,11 +1527,8 @@ int i915_driver_unload(struct drm_device *dev) /* Flush any outstanding unpin_work. */ flush_workqueue(dev_priv->wq); - intel_guc_ucode_fini(dev); - mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_engines(dev); - i915_gem_context_fini(dev); - mutex_unlock(&dev->struct_mutex); + intel_guc_fini(dev); + i915_gem_fini(dev); intel_fbc_cleanup_cfb(dev_priv); intel_power_domains_fini(dev_priv); @@ -1570,15 +1636,15 @@ const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f313b4d8344f..872c60608dbd 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -35,11 +35,9 @@ #include "i915_trace.h" #include "intel_drv.h" -#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> -#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <drm/drm_crtc_helper.h> @@ -300,22 +298,26 @@ static const struct intel_device_info intel_haswell_m_info = { static const struct intel_device_info intel_broadwell_d_info = { BDW_FEATURES, .gen = 8, + .is_broadwell = 1, }; static const struct intel_device_info intel_broadwell_m_info = { BDW_FEATURES, .gen = 8, .is_mobile = 1, + .is_broadwell = 1, }; static const struct intel_device_info intel_broadwell_gt3d_info = { BDW_FEATURES, .gen = 8, + .is_broadwell = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; static const struct intel_device_info intel_broadwell_gt3m_info = { BDW_FEATURES, .gen = 8, .is_mobile = 1, + .is_broadwell = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; @@ -530,9 +532,9 @@ void intel_detect_pch(struct drm_device *dev) pci_dev_put(pch); } -bool i915_semaphore_is_enabled(struct drm_device *dev) +bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) return false; if (i915.semaphores >= 0) @@ -542,13 +544,9 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) if (i915.enable_execlists) return false; - /* Until we get further testing... */ - if (IS_GEN8(dev)) - return false; - #ifdef CONFIG_INTEL_IOMMU /* Enable semaphores on SNB when IO remapping is off */ - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) + if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) return false; #endif @@ -610,7 +608,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_guc_suspend(dev); - intel_suspend_gt_powersave(dev); + intel_suspend_gt_powersave(dev_priv); intel_display_suspend(dev); @@ -628,10 +626,10 @@ static int i915_drm_suspend(struct drm_device *dev) i915_save_state(dev); opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; - intel_opregion_notify_adapter(dev, opregion_target_state); + intel_opregion_notify_adapter(dev_priv, opregion_target_state); - intel_uncore_forcewake_reset(dev, false); - intel_opregion_fini(dev); + intel_uncore_forcewake_reset(dev_priv, false); + intel_opregion_unregister(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); @@ -749,7 +747,7 @@ static int i915_drm_resume(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); i915_restore_state(dev); - intel_opregion_setup(dev); + intel_opregion_setup(dev_priv); intel_init_pch_refclk(dev); drm_mode_config_reset(dev); @@ -777,7 +775,7 @@ static int i915_drm_resume(struct drm_device *dev) spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); intel_dp_mst_resume(dev); @@ -794,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev) /* Config may have changed between suspend and resume */ drm_helper_hpd_irq_event(dev); - intel_opregion_init(dev); + intel_opregion_register(dev_priv); intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); @@ -802,7 +800,7 @@ static int i915_drm_resume(struct drm_device *dev) dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); - intel_opregion_notify_adapter(dev, PCI_D0); + intel_opregion_notify_adapter(dev_priv, PCI_D0); drm_kms_helper_poll_enable(dev); @@ -870,9 +868,9 @@ static int i915_drm_resume_early(struct drm_device *dev) DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", ret); - intel_uncore_early_sanitize(dev, true); + intel_uncore_early_sanitize(dev_priv, true); - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { if (!dev_priv->suspended_to_idle) gen9_sanitize_dc_state(dev_priv); bxt_disable_dc9(dev_priv); @@ -880,7 +878,7 @@ static int i915_drm_resume_early(struct drm_device *dev) hsw_disable_pc8(dev_priv); } - intel_uncore_sanitize(dev); + intel_uncore_sanitize(dev_priv); if (IS_BROXTON(dev_priv) || !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) @@ -923,14 +921,14 @@ int i915_resume_switcheroo(struct drm_device *dev) * - re-init interrupt state * - re-init display */ -int i915_reset(struct drm_device *dev) +int i915_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; struct i915_gpu_error *error = &dev_priv->gpu_error; unsigned reset_counter; int ret; - intel_reset_gt_powersave(dev); + intel_reset_gt_powersave(dev_priv); mutex_lock(&dev->struct_mutex); @@ -946,7 +944,7 @@ int i915_reset(struct drm_device *dev) i915_gem_reset(dev); - ret = intel_gpu_reset(dev, ALL_ENGINES); + ret = intel_gpu_reset(dev_priv, ALL_ENGINES); /* Also reset the gpu hangman. */ if (error->stop_rings != 0) { @@ -1001,7 +999,7 @@ int i915_reset(struct drm_device *dev) * of re-init after reset. */ if (INTEL_INFO(dev)->gen > 5) - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); return 0; @@ -1030,13 +1028,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (PCI_FUNC(pdev->devfn)) return -ENODEV; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && - apple_gmux_present() && pdev != vga_default_device() && - !vga_switcheroo_handler_flags()) + if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; return drm_get_pci_dev(pdev, ent, &driver); @@ -1115,6 +1107,49 @@ static int i915_pm_resume(struct device *dev) return i915_drm_resume(drm_dev); } +/* freeze: before creating the hibernation_image */ +static int i915_pm_freeze(struct device *dev) +{ + return i915_pm_suspend(dev); +} + +static int i915_pm_freeze_late(struct device *dev) +{ + int ret; + + ret = i915_pm_suspend_late(dev); + if (ret) + return ret; + + ret = i915_gem_freeze_late(dev_to_i915(dev)); + if (ret) + return ret; + + return 0; +} + +/* thaw: called after creating the hibernation image, but before turning off. */ +static int i915_pm_thaw_early(struct device *dev) +{ + return i915_pm_resume_early(dev); +} + +static int i915_pm_thaw(struct device *dev) +{ + return i915_pm_resume(dev); +} + +/* restore: called after loading the hibernation image. */ +static int i915_pm_restore_early(struct device *dev) +{ + return i915_pm_resume_early(dev); +} + +static int i915_pm_restore(struct device *dev) +{ + return i915_pm_resume(dev); +} + /* * Save all Gunit registers that may be lost after a D3 and a subsequent * S0i[R123] transition. The list of registers needing a save/restore is @@ -1478,7 +1513,7 @@ static int intel_runtime_suspend(struct device *device) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) + if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) return -ENODEV; if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) @@ -1517,7 +1552,7 @@ static int intel_runtime_suspend(struct device *device) intel_guc_suspend(dev); - intel_suspend_gt_powersave(dev); + intel_suspend_gt_powersave(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv); ret = 0; @@ -1539,7 +1574,7 @@ static int intel_runtime_suspend(struct device *device) return ret; } - intel_uncore_forcewake_reset(dev, false); + intel_uncore_forcewake_reset(dev_priv, false); enable_rpm_wakeref_asserts(dev_priv); WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); @@ -1553,14 +1588,14 @@ static int intel_runtime_suspend(struct device *device) * FIXME: We really should find a document that references the arguments * used below! */ - if (IS_BROADWELL(dev)) { + if (IS_BROADWELL(dev_priv)) { /* * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop * being detected, and the call we do at intel_runtime_resume() * won't be able to restore them. Since PCI_D3hot matches the * actual specification and appears to be working, use it. */ - intel_opregion_notify_adapter(dev, PCI_D3hot); + intel_opregion_notify_adapter(dev_priv, PCI_D3hot); } else { /* * current versions of firmware which depend on this opregion @@ -1569,7 +1604,7 @@ static int intel_runtime_suspend(struct device *device) * to distinguish it from notifications that might be sent via * the suspend path. */ - intel_opregion_notify_adapter(dev, PCI_D1); + intel_opregion_notify_adapter(dev_priv, PCI_D1); } assert_forcewakes_inactive(dev_priv); @@ -1593,7 +1628,7 @@ static int intel_runtime_resume(struct device *device) WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); disable_rpm_wakeref_asserts(dev_priv); - intel_opregion_notify_adapter(dev, PCI_D0); + intel_opregion_notify_adapter(dev_priv, PCI_D0); dev_priv->pm.suspended = false; if (intel_uncore_unclaimed_mmio(dev_priv)) DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); @@ -1620,7 +1655,7 @@ static int intel_runtime_resume(struct device *device) * we can do is to hope that things will still work (and disable RPM). */ i915_gem_init_swizzling(dev); - gen6_update_ring_freq(dev); + gen6_update_ring_freq(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); @@ -1632,7 +1667,7 @@ static int intel_runtime_resume(struct device *device) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_init(dev_priv); - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); enable_rpm_wakeref_asserts(dev_priv); @@ -1669,14 +1704,14 @@ static const struct dev_pm_ops i915_pm_ops = { * @restore, @restore_early : called after rebooting and restoring the * hibernation image [PMSG_RESTORE] */ - .freeze = i915_pm_suspend, - .freeze_late = i915_pm_suspend_late, - .thaw_early = i915_pm_resume_early, - .thaw = i915_pm_resume, + .freeze = i915_pm_freeze, + .freeze_late = i915_pm_freeze_late, + .thaw_early = i915_pm_thaw_early, + .thaw = i915_pm_thaw, .poweroff = i915_pm_suspend, .poweroff_late = i915_pm_poweroff_late, - .restore_early = i915_pm_resume_early, - .restore = i915_pm_resume, + .restore_early = i915_pm_restore_early, + .restore = i915_pm_restore, /* S0ix (via runtime suspend) event handlers */ .runtime_suspend = intel_runtime_suspend, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5faacc6e548d..0113207967d9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -66,7 +66,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20160425" +#define DRIVER_DATE "20160606" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -324,6 +324,12 @@ struct i915_hotplug { &dev->mode_config.plane_list, \ base.head) +#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ + list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \ + base.head) \ + for_each_if ((plane_mask) & \ + (1 << drm_plane_index(&intel_plane->base))) + #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ list_for_each_entry(intel_plane, \ &(dev)->mode_config.plane_list, \ @@ -333,6 +339,10 @@ struct i915_hotplug { #define for_each_intel_crtc(dev, intel_crtc) \ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) +#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ + list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \ + for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) + #define for_each_intel_encoder(dev, intel_encoder) \ list_for_each_entry(intel_encoder, \ &(dev)->mode_config.encoder_list, \ @@ -588,6 +598,7 @@ struct drm_i915_display_funcs { struct intel_crtc_state *newstate); void (*initial_watermarks)(struct intel_crtc_state *cstate); void (*optimize_watermarks)(struct intel_crtc_state *cstate); + int (*compute_global_watermarks)(struct drm_atomic_state *state); void (*update_wm)(struct drm_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state); @@ -612,7 +623,7 @@ struct drm_i915_display_funcs { struct drm_i915_gem_object *obj, struct drm_i915_gem_request *req, uint32_t flags); - void (*hpd_irq_setup)(struct drm_device *dev); + void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ @@ -735,6 +746,7 @@ struct intel_csr { func(is_valleyview) sep \ func(is_cherryview) sep \ func(is_haswell) sep \ + func(is_broadwell) sep \ func(is_skylake) sep \ func(is_broxton) sep \ func(is_kabylake) sep \ @@ -757,9 +769,10 @@ struct intel_csr { struct intel_device_info { u32 display_mmio_offset; u16 device_id; - u8 num_pipes:3; + u8 num_pipes; u8 num_sprites[I915_MAX_PIPES]; u8 gen; + u16 gen_mask; u8 ring_mask; /* Rings supported by the HW */ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); /* Register offsets for the various display pipes and transcoders */ @@ -821,9 +834,8 @@ struct i915_ctx_hang_stats { /* This must match up with the value previously used for execbuf2.rsvd1. */ #define DEFAULT_CONTEXT_HANDLE 0 -#define CONTEXT_NO_ZEROMAP (1<<0) /** - * struct intel_context - as the name implies, represents a context. + * struct i915_gem_context - as the name implies, represents a context. * @ref: reference count. * @user_handle: userspace tracking identity for this context. * @remap_slice: l3 row remapping information. @@ -841,33 +853,33 @@ struct i915_ctx_hang_stats { * Contexts are memory images used by the hardware to store copies of their * internal state. */ -struct intel_context { +struct i915_gem_context { struct kref ref; - int user_handle; - uint8_t remap_slice; struct drm_i915_private *i915; - int flags; struct drm_i915_file_private *file_priv; - struct i915_ctx_hang_stats hang_stats; struct i915_hw_ppgtt *ppgtt; - /* Legacy ring buffer submission */ - struct { - struct drm_i915_gem_object *rcs_state; - bool initialized; - } legacy_hw_ctx; + struct i915_ctx_hang_stats hang_stats; - /* Execlists */ - struct { + /* Unique identifier for this context, used by the hw for tracking */ + unsigned long flags; + unsigned hw_id; + u32 user_handle; +#define CONTEXT_NO_ZEROMAP (1<<0) + + struct intel_context { struct drm_i915_gem_object *state; struct intel_ringbuffer *ringbuf; - int pin_count; struct i915_vma *lrc_vma; - u64 lrc_desc; uint32_t *lrc_reg_state; + u64 lrc_desc; + int pin_count; + bool initialised; } engine[I915_NUM_ENGINES]; struct list_head link; + + u8 remap_slice; }; enum fb_op_origin { @@ -1115,6 +1127,8 @@ struct intel_gen6_power_mgmt { bool interrupts_enabled; u32 pm_iir; + u32 pm_intr_keep; + /* Frequencies are stored in potentially platform dependent multiples. * In other words, *_freq needs to be multiplied by X to be interesting. * Soft limits are those which are used for the dynamic reclocking done @@ -1488,6 +1502,7 @@ struct intel_vbt_data { bool present; bool active_low_pwm; u8 min_brightness; /* min_brightness/255 of max */ + enum intel_backlight_type type; } backlight; /* MIPI DSI */ @@ -1580,7 +1595,7 @@ struct skl_ddb_allocation { }; struct skl_wm_values { - bool dirty[I915_MAX_PIPES]; + unsigned dirty_pipes; struct skl_ddb_allocation ddb; uint32_t wm_linetime[I915_MAX_PIPES]; uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; @@ -1697,7 +1712,7 @@ struct i915_execbuffer_params { uint64_t batch_obj_vm_offset; struct intel_engine_cs *engine; struct drm_i915_gem_object *batch_obj; - struct intel_context *ctx; + struct i915_gem_context *ctx; struct drm_i915_gem_request *request; }; @@ -1747,6 +1762,7 @@ struct drm_i915_private { wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; + struct i915_gem_context *kernel_context; struct intel_engine_cs engine[I915_NUM_ENGINES]; struct drm_i915_gem_object *semaphore_obj; uint32_t last_seqno, next_seqno; @@ -1802,13 +1818,17 @@ struct drm_i915_private { int num_fence_regs; /* 8 on pre-965, 16 otherwise */ unsigned int fsb_freq, mem_freq, is_ddr3; - unsigned int skl_boot_cdclk; + unsigned int skl_preferred_vco_freq; unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; unsigned int max_dotclk_freq; unsigned int rawclk_freq; unsigned int hpll_freq; unsigned int czclk_freq; + struct { + unsigned int vco, ref; + } cdclk_pll; + /** * wq - Driver workqueue for GEM. * @@ -1838,6 +1858,13 @@ struct drm_i915_private { DECLARE_HASHTABLE(mm_structs, 7); struct mutex mm_lock; + /* The hw wants to have a stable context identifier for the lifetime + * of the context (for OA, PASID, faults, etc). This is limited + * in execlists to 21 bits. + */ + struct ida context_hw_ida; +#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ + /* Kernel Modesetting */ struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; @@ -1950,9 +1977,6 @@ struct drm_i915_private { */ uint16_t skl_latency[8]; - /* Committed wm config */ - struct intel_wm_config config; - /* * The skl_wm_values structure is a bit too big for stack * allocation, so we keep the staging struct where we store @@ -1975,6 +1999,13 @@ struct drm_i915_private { * cstate->wm.need_postvbl_update. */ struct mutex wm_mutex; + + /* + * Set during HW readout of watermarks/DDB. Some platforms + * need to know when we're still using BIOS-provided values + * (which we don't fully trust). + */ + bool distrust_bios_wm; } wm; struct i915_runtime_pm pm; @@ -1989,8 +2020,6 @@ struct drm_i915_private { void (*stop_engine)(struct intel_engine_cs *engine); } gt; - struct intel_context *kernel_context; - /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; @@ -2227,9 +2256,75 @@ struct drm_i915_gem_object { }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) -void i915_gem_track_fb(struct drm_i915_gem_object *old, - struct drm_i915_gem_object *new, - unsigned frontbuffer_bits); +/* + * Optimised SGL iterator for GEM objects + */ +static __always_inline struct sgt_iter { + struct scatterlist *sgp; + union { + unsigned long pfn; + dma_addr_t dma; + }; + unsigned int curr; + unsigned int max; +} __sgt_iter(struct scatterlist *sgl, bool dma) { + struct sgt_iter s = { .sgp = sgl }; + + if (s.sgp) { + s.max = s.curr = s.sgp->offset; + s.max += s.sgp->length; + if (dma) + s.dma = sg_dma_address(s.sgp); + else + s.pfn = page_to_pfn(sg_page(s.sgp)); + } + + return s; +} + +/** + * __sg_next - return the next scatterlist entry in a list + * @sg: The current sg entry + * + * Description: + * If the entry is the last, return NULL; otherwise, step to the next + * element in the array (@sg@+1). If that's a chain pointer, follow it; + * otherwise just return the pointer to the current element. + **/ +static inline struct scatterlist *__sg_next(struct scatterlist *sg) +{ +#ifdef CONFIG_DEBUG_SG + BUG_ON(sg->sg_magic != SG_MAGIC); +#endif + return sg_is_last(sg) ? NULL : + likely(!sg_is_chain(++sg)) ? sg : + sg_chain_ptr(sg); +} + +/** + * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table + * @__dmap: DMA address (output) + * @__iter: 'struct sgt_iter' (iterator state, internal) + * @__sgt: sg_table to iterate over (input) + */ +#define for_each_sgt_dma(__dmap, __iter, __sgt) \ + for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ + ((__dmap) = (__iter).dma + (__iter).curr); \ + (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ + ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) + +/** + * for_each_sgt_page - iterate over the pages of the given sg_table + * @__pp: page pointer (output) + * @__iter: 'struct sgt_iter' (iterator state, internal) + * @__sgt: sg_table to iterate over (input) + */ +#define for_each_sgt_page(__pp, __iter, __sgt) \ + for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ + ((__pp) = (__iter).pfn == 0 ? NULL : \ + pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ + (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ + ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) /** * Request queue structure. @@ -2278,6 +2373,9 @@ struct drm_i915_gem_request { /** Position in the ringbuffer of the end of the whole request */ u32 tail; + /** Preallocate space in the ringbuffer for the emitting the request */ + u32 reserved_space; + /** * Context and ring buffer related to this request * Contexts are refcounted, so when this request is associated with a @@ -2288,9 +2386,20 @@ struct drm_i915_gem_request { * i915_gem_request_free() will then decrement the refcount on the * context. */ - struct intel_context *ctx; + struct i915_gem_context *ctx; struct intel_ringbuffer *ringbuf; + /** + * Context related to the previous request. + * As the contexts are accessed by the hardware until the switch is + * completed to a new context, the hardware may still be writing + * to the context object after the breadcrumb is visible. We must + * not unpin/unbind/prune that object whilst still active and so + * we keep the previous context pinned until the following (this) + * request is retired. + */ + struct i915_gem_context *previous_context; + /** Batch buffer related to this request if any (used for error state dump only) */ struct drm_i915_gem_object *batch_obj; @@ -2327,11 +2436,13 @@ struct drm_i915_gem_request { /** Execlists no. of times this request has been sent to the ELSP */ int elsp_submitted; + /** Execlists context hardware id. */ + unsigned ctx_hw_id; }; struct drm_i915_gem_request * __must_check i915_gem_request_alloc(struct intel_engine_cs *engine, - struct intel_context *ctx); + struct i915_gem_context *ctx); void i915_gem_request_free(struct kref *req_ref); int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, struct drm_file *file); @@ -2359,23 +2470,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req) static inline void i915_gem_request_unreference(struct drm_i915_gem_request *req) { - WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex)); kref_put(&req->ref, i915_gem_request_free); } -static inline void -i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) -{ - struct drm_device *dev; - - if (!req) - return; - - dev = req->engine->dev; - if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) - mutex_unlock(&dev->struct_mutex); -} - static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, struct drm_i915_gem_request *src) { @@ -2503,9 +2600,29 @@ struct drm_i915_cmd_table { #define INTEL_INFO(p) (&__I915__(p)->info) #define INTEL_GEN(p) (INTEL_INFO(p)->gen) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) -#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) #define REVID_FOREVER 0xff +#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) + +#define GEN_FOREVER (0) +/* + * Returns true if Gen is in inclusive range [Start, End]. + * + * Use GEN_FOREVER for unbound start and or end. + */ +#define IS_GEN(p, s, e) ({ \ + unsigned int __s = (s), __e = (e); \ + BUILD_BUG_ON(!__builtin_constant_p(s)); \ + BUILD_BUG_ON(!__builtin_constant_p(e)); \ + if ((__s) != GEN_FOREVER) \ + __s = (s) - 1; \ + if ((__e) == GEN_FOREVER) \ + __e = BITS_PER_LONG - 1; \ + else \ + __e = (e) - 1; \ + !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \ +}) + /* * Return true if revision is in range [since,until] inclusive. * @@ -2538,7 +2655,7 @@ struct drm_i915_cmd_table { #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) -#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) +#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) @@ -2606,14 +2723,14 @@ struct drm_i915_cmd_table { * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular * chips, etc.). */ -#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) -#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) -#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) -#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) -#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) -#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) -#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) -#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) +#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1)) +#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2)) +#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3)) +#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4)) +#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5)) +#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6)) +#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7)) +#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8)) #define RENDER_RING (1<<RCS) #define BSD_RING (1<<VCS) @@ -2686,12 +2803,18 @@ struct drm_i915_cmd_table { IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ IS_KABYLAKE(dev) || IS_BROXTON(dev)) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) -#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) +#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) #define HAS_CSR(dev) (IS_GEN9(dev)) -#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) -#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) +/* + * For now, anything with a GuC requires uCode loading, and then supports + * command submission once loaded. But these are logically independent + * properties, so we have separate macros to test them. + */ +#define HAS_GUC(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) +#define HAS_GUC_UCODE(dev) (HAS_GUC(dev)) +#define HAS_GUC_SCHED(dev) (HAS_GUC(dev)) #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ INTEL_INFO(dev)->gen >= 8) @@ -2740,6 +2863,9 @@ extern int i915_max_ioctl; extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); +int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, + int enable_ppgtt); + /* i915_dma.c */ void __printf(3, 4) __i915_printk(struct drm_i915_private *dev_priv, const char *level, @@ -2760,9 +2886,9 @@ extern void i915_driver_postclose(struct drm_device *dev, extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif -extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); -extern bool intel_has_gpu_reset(struct drm_device *dev); -extern int i915_reset(struct drm_device *dev); +extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); +extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); +extern int i915_reset(struct drm_i915_private *dev_priv); extern int intel_guc_reset(struct drm_i915_private *dev_priv); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); @@ -2772,30 +2898,33 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); /* intel_hotplug.c */ -void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); +void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 pin_mask, u32 long_mask); void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init_work(struct drm_i915_private *dev_priv); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); /* i915_irq.c */ -void i915_queue_hangcheck(struct drm_device *dev); +void i915_queue_hangcheck(struct drm_i915_private *dev_priv); __printf(3, 4) -void i915_handle_error(struct drm_device *dev, u32 engine_mask, +void i915_handle_error(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *fmt, ...); extern void intel_irq_init(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); -extern void intel_uncore_sanitize(struct drm_device *dev); -extern void intel_uncore_early_sanitize(struct drm_device *dev, +extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); +extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, bool restore_forcewake); -extern void intel_uncore_init(struct drm_device *dev); +extern void intel_uncore_init(struct drm_i915_private *dev_priv); extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); -extern void intel_uncore_fini(struct drm_device *dev); -extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); +extern void intel_uncore_fini(struct drm_i915_private *dev_priv); +extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, + bool restore); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, enum forcewake_domains domains); @@ -2811,9 +2940,9 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); -static inline bool intel_vgpu_active(struct drm_device *dev) +static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) { - return to_i915(dev)->vgpu.active; + return dev_priv->vgpu.active; } void @@ -2909,7 +3038,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); -int i915_gem_init_userptr(struct drm_device *dev); +void i915_gem_init_userptr(struct drm_i915_private *dev_priv); int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, @@ -2919,11 +3048,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, void i915_gem_load_init(struct drm_device *dev); void i915_gem_load_cleanup(struct drm_device *dev); void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); +int i915_gem_freeze_late(struct drm_i915_private *dev_priv); + void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); -struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, +struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, size_t size); struct drm_i915_gem_object *i915_gem_object_create_from_data( struct drm_device *dev, const void *data, size_t size); @@ -3054,6 +3185,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); + +void i915_gem_track_fb(struct drm_i915_gem_object *old, + struct drm_i915_gem_object *new, + unsigned frontbuffer_bits); + /** * Returns true if seq1 is later than seq2. */ @@ -3081,13 +3217,13 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, req->seqno); } -int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); +int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); struct drm_i915_gem_request * i915_gem_find_active_request(struct intel_engine_cs *engine); -bool i915_gem_retire_requests(struct drm_device *dev); +bool i915_gem_retire_requests(struct drm_i915_private *dev_priv); void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); static inline u32 i915_reset_counter(struct i915_gpu_error *error) @@ -3147,7 +3283,6 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_init(struct drm_device *dev); int i915_gem_init_engines(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); -int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_engines(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); @@ -3215,8 +3350,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, bool i915_gem_obj_bound(struct drm_i915_gem_object *o, struct i915_address_space *vm); -unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, - struct i915_address_space *vm); struct i915_vma * i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm); @@ -3251,14 +3384,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); } -static inline unsigned long -i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - - return i915_gem_obj_size(obj, &ggtt->base); -} +unsigned long +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj); static inline int __must_check i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, @@ -3272,12 +3399,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, alignment, flags | PIN_GLOBAL); } -static inline int -i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) -{ - return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); -} - void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view); static inline void @@ -3301,28 +3422,42 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_context.c */ int __must_check i915_gem_context_init(struct drm_device *dev); +void i915_gem_context_lost(struct drm_i915_private *dev_priv); void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_reset(struct drm_device *dev); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); -int i915_gem_context_enable(struct drm_i915_gem_request *req); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); int i915_switch_context(struct drm_i915_gem_request *req); -struct intel_context * -i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); void i915_gem_context_free(struct kref *ctx_ref); struct drm_i915_gem_object * i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); -static inline void i915_gem_context_reference(struct intel_context *ctx) + +static inline struct i915_gem_context * +i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) +{ + struct i915_gem_context *ctx; + + lockdep_assert_held(&file_priv->dev_priv->dev->struct_mutex); + + ctx = idr_find(&file_priv->context_idr, id); + if (!ctx) + return ERR_PTR(-ENOENT); + + return ctx; +} + +static inline void i915_gem_context_reference(struct i915_gem_context *ctx) { kref_get(&ctx->ref); } -static inline void i915_gem_context_unreference(struct intel_context *ctx) +static inline void i915_gem_context_unreference(struct i915_gem_context *ctx) { + lockdep_assert_held(&ctx->i915->dev->struct_mutex); kref_put(&ctx->ref, i915_gem_context_free); } -static inline bool i915_gem_context_is_default(const struct intel_context *c) +static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) { return c->user_handle == DEFAULT_CONTEXT_HANDLE; } @@ -3335,6 +3470,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, @@ -3349,9 +3486,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); /* belongs in i915_gem_gtt.h */ -static inline void i915_gem_chipset_flush(struct drm_device *dev) +static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) intel_gtt_chipset_flush(); } @@ -3430,18 +3567,19 @@ static inline void i915_error_state_buf_release( { kfree(eb->buf); } -void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, +void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *error_msg); void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv); void i915_error_state_put(struct i915_error_state_file_priv *error_priv); void i915_destroy_error_state(struct drm_device *dev); -void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); +void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone); const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ -int i915_cmd_parser_get_version(void); +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); bool i915_needs_cmd_parser(struct intel_engine_cs *engine); @@ -3489,31 +3627,33 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, /* intel_opregion.c */ #ifdef CONFIG_ACPI -extern int intel_opregion_setup(struct drm_device *dev); -extern void intel_opregion_init(struct drm_device *dev); -extern void intel_opregion_fini(struct drm_device *dev); -extern void intel_opregion_asle_intr(struct drm_device *dev); +extern int intel_opregion_setup(struct drm_i915_private *dev_priv); +extern void intel_opregion_register(struct drm_i915_private *dev_priv); +extern void intel_opregion_unregister(struct drm_i915_private *dev_priv); +extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable); -extern int intel_opregion_notify_adapter(struct drm_device *dev, +extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, pci_power_t state); -extern int intel_opregion_get_panel_type(struct drm_device *dev); +extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); #else -static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } -static inline void intel_opregion_init(struct drm_device *dev) { return; } -static inline void intel_opregion_fini(struct drm_device *dev) { return; } -static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } +static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } +static inline void intel_opregion_init(struct drm_i915_private *dev) { } +static inline void intel_opregion_fini(struct drm_i915_private *dev) { } +static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) +{ +} static inline int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) { return 0; } static inline int -intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) +intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state) { return 0; } -static inline int intel_opregion_get_panel_type(struct drm_device *dev) +static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) { return -ENODEV; } @@ -3538,26 +3678,25 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void intel_display_resume(struct drm_device *dev); extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev); -extern bool ironlake_set_drps(struct drm_device *dev, u8 val); +extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); -extern void intel_set_rps(struct drm_device *dev, u8 val); +extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); extern void intel_detect_pch(struct drm_device *dev); -extern int intel_enable_rc6(const struct drm_device *dev); -extern bool i915_semaphore_is_enabled(struct drm_device *dev); +extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); /* overlay */ -extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); +extern struct intel_overlay_error_state * +intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, struct intel_overlay_error_state *error); -extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); +extern struct intel_display_error_state * +intel_display_capture_error_state(struct drm_i915_private *dev_priv); extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct drm_device *dev, struct intel_display_error_state *error); @@ -3586,6 +3725,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +/* intel_dpio_phy.c */ +void chv_set_phy_signal_level(struct intel_encoder *encoder, + u32 deemph_reg_value, u32 margin_reg_value, + bool uniq_trans_scale); +void chv_data_lane_soft_reset(struct intel_encoder *encoder, + bool reset); +void chv_phy_pre_pll_enable(struct intel_encoder *encoder); +void chv_phy_pre_encoder_enable(struct intel_encoder *encoder); +void chv_phy_release_cl2_override(struct intel_encoder *encoder); +void chv_phy_post_pll_disable(struct intel_encoder *encoder); + +void vlv_set_phy_signal_level(struct intel_encoder *encoder, + u32 demph_reg_value, u32 preemph_reg_value, + u32 uniqtranscale_reg_value, u32 tx3_demph); +void vlv_phy_pre_pll_enable(struct intel_encoder *encoder); +void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder); +void vlv_phy_reset_lanes(struct intel_encoder *encoder); + int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index aad26851cee3..343d88114f3b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) vaddr += PAGE_SIZE; } - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) @@ -347,7 +347,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, } drm_clflush_virt_range(vaddr, args->size); - i915_gem_chipset_flush(dev); + i915_gem_chipset_flush(to_i915(dev)); out: intel_fb_obj_flush(obj, false, ORIGIN_CPU); @@ -381,9 +381,9 @@ i915_gem_create(struct drm_file *file, return -EINVAL; /* Allocate the new object */ - obj = i915_gem_alloc_object(dev, size); - if (obj == NULL) - return -ENOMEM; + obj = i915_gem_object_create(dev, size); + if (IS_ERR(obj)) + return PTR_ERR(obj); ret = drm_gem_handle_create(file, &obj->base, &handle); /* drop reference from allocate - handle holds it now */ @@ -1006,7 +1006,7 @@ out: } if (needs_clflush_after) - i915_gem_chipset_flush(dev); + i915_gem_chipset_flush(to_i915(dev)); else obj->cache_dirty = true; @@ -1230,8 +1230,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, struct intel_rps_client *rps) { struct intel_engine_cs *engine = i915_gem_request_get_engine(req); - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = req->i915; const bool irq_test_in_progress = ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; @@ -1413,6 +1412,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) list_del_init(&request->list); i915_gem_request_remove_from_client(request); + if (request->previous_context) { + if (i915.enable_execlists) + intel_lr_context_unpin(request->previous_context, + request->engine); + } + + i915_gem_context_unreference(request->ctx); i915_gem_request_unreference(request); } @@ -1422,7 +1428,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) struct intel_engine_cs *engine = req->engine; struct drm_i915_gem_request *tmp; - lockdep_assert_held(&engine->dev->struct_mutex); + lockdep_assert_held(&engine->i915->dev->struct_mutex); if (list_empty(&req->list)) return; @@ -1982,7 +1988,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) return size; /* Previous chips need a power-of-two fence region when tiling */ - if (INTEL_INFO(dev)->gen == 3) + if (IS_GEN3(dev)) gtt_size = 1024*1024; else gtt_size = 512*1024; @@ -2162,7 +2168,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj) static void i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; int ret; BUG_ON(obj->madv == __I915_MADV_PURGED); @@ -2184,9 +2191,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) if (obj->madv == I915_MADV_DONTNEED) obj->dirty = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); - + for_each_sgt_page(page, sgt_iter, obj->pages) { if (obj->dirty) set_page_dirty(page); @@ -2243,7 +2248,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) struct address_space *mapping; struct sg_table *st; struct scatterlist *sg; - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ int ret; @@ -2340,8 +2345,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) err_pages: sg_mark_end(sg); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) - put_page(sg_page_iter_page(&sg_iter)); + for_each_sgt_page(page, sgt_iter, st) + put_page(page); sg_free_table(st); kfree(st); @@ -2395,6 +2400,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) return 0; } +/* The 'mapping' part of i915_gem_object_pin_map() below */ +static void *i915_gem_object_map(const struct drm_i915_gem_object *obj) +{ + unsigned long n_pages = obj->base.size >> PAGE_SHIFT; + struct sg_table *sgt = obj->pages; + struct sgt_iter sgt_iter; + struct page *page; + struct page *stack_pages[32]; + struct page **pages = stack_pages; + unsigned long i = 0; + void *addr; + + /* A single page can always be kmapped */ + if (n_pages == 1) + return kmap(sg_page(sgt->sgl)); + + if (n_pages > ARRAY_SIZE(stack_pages)) { + /* Too big for stack -- allocate temporary array instead */ + pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY); + if (!pages) + return NULL; + } + + for_each_sgt_page(page, sgt_iter, sgt) + pages[i++] = page; + + /* Check that we have the expected number of pages */ + GEM_BUG_ON(i != n_pages); + + addr = vmap(pages, n_pages, 0, PAGE_KERNEL); + + if (pages != stack_pages) + drm_free_large(pages); + + return addr; +} + +/* get, pin, and map the pages of the object into kernel space */ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) { int ret; @@ -2407,29 +2450,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) i915_gem_object_pin_pages(obj); - if (obj->mapping == NULL) { - struct page **pages; - - pages = NULL; - if (obj->base.size == PAGE_SIZE) - obj->mapping = kmap(sg_page(obj->pages->sgl)); - else - pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT, - sizeof(*pages), - GFP_TEMPORARY); - if (pages != NULL) { - struct sg_page_iter sg_iter; - int n; - - n = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, - obj->pages->nents, 0) - pages[n++] = sg_page_iter_page(&sg_iter); - - obj->mapping = vmap(pages, n, 0, PAGE_KERNEL); - drm_free_large(pages); - } - if (obj->mapping == NULL) { + if (!obj->mapping) { + obj->mapping = i915_gem_object_map(obj); + if (!obj->mapping) { i915_gem_object_unpin_pages(obj); return ERR_PTR(-ENOMEM); } @@ -2502,9 +2525,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) } static int -i915_gem_init_seqno(struct drm_device *dev, u32 seqno) +i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; int ret; @@ -2514,7 +2536,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno) if (ret) return ret; } - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); /* Finally reset hw state */ for_each_engine(engine, dev_priv) @@ -2534,7 +2556,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) /* HWS page needs to be set less than what we * will inject to ring */ - ret = i915_gem_init_seqno(dev, seqno - 1); + ret = i915_gem_init_seqno(dev_priv, seqno - 1); if (ret) return ret; @@ -2550,13 +2572,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) } int -i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) +i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* reserve 0 for non-seqno */ if (dev_priv->next_seqno == 0) { - int ret = i915_gem_init_seqno(dev, 0); + int ret = i915_gem_init_seqno(dev_priv, 0); if (ret) return ret; @@ -2580,6 +2600,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, struct drm_i915_private *dev_priv; struct intel_ringbuffer *ringbuf; u32 request_start; + u32 reserved_tail; int ret; if (WARN_ON(request == NULL)) @@ -2594,9 +2615,10 @@ void __i915_add_request(struct drm_i915_gem_request *request, * should already have been reserved in the ring buffer. Let the ring * know that it is time to use that space up. */ - intel_ring_reserved_space_use(ringbuf); - request_start = intel_ring_get_tail(ringbuf); + reserved_tail = request->reserved_space; + request->reserved_space = 0; + /* * Emit any outstanding flushes - execbuf can fail to emit the flush * after having emitted the batchbuffer command. Hence we need to fix @@ -2652,19 +2674,25 @@ void __i915_add_request(struct drm_i915_gem_request *request, /* Not allowed to fail! */ WARN(ret, "emit|add_request failed: %d!\n", ret); - i915_queue_hangcheck(engine->dev); + i915_queue_hangcheck(engine->i915); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, round_jiffies_up_relative(HZ)); - intel_mark_busy(dev_priv->dev); + intel_mark_busy(dev_priv); /* Sanity check that the reserved size was large enough. */ - intel_ring_reserved_space_end(ringbuf); + ret = intel_ring_get_tail(ringbuf) - request_start; + if (ret < 0) + ret += ringbuf->size; + WARN_ONCE(ret > reserved_tail, + "Not enough space reserved (%d bytes) " + "for adding the request (%d bytes)\n", + reserved_tail, ret); } static bool i915_context_is_banned(struct drm_i915_private *dev_priv, - const struct intel_context *ctx) + const struct i915_gem_context *ctx) { unsigned long elapsed; @@ -2689,7 +2717,7 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv, } static void i915_set_reset_status(struct drm_i915_private *dev_priv, - struct intel_context *ctx, + struct i915_gem_context *ctx, const bool guilty) { struct i915_ctx_hang_stats *hs; @@ -2712,27 +2740,15 @@ void i915_gem_request_free(struct kref *req_ref) { struct drm_i915_gem_request *req = container_of(req_ref, typeof(*req), ref); - struct intel_context *ctx = req->ctx; - - if (req->file_priv) - i915_gem_request_remove_from_client(req); - - if (ctx) { - if (i915.enable_execlists && ctx != req->i915->kernel_context) - intel_lr_context_unpin(ctx, req->engine); - - i915_gem_context_unreference(ctx); - } - kmem_cache_free(req->i915->requests, req); } static inline int __i915_gem_request_alloc(struct intel_engine_cs *engine, - struct intel_context *ctx, + struct i915_gem_context *ctx, struct drm_i915_gem_request **req_out) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); struct drm_i915_gem_request *req; int ret; @@ -2754,7 +2770,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, if (req == NULL) return -ENOMEM; - ret = i915_gem_get_seqno(engine->dev, &req->seqno); + ret = i915_gem_get_seqno(engine->i915, &req->seqno); if (ret) goto err; @@ -2765,15 +2781,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, req->ctx = ctx; i915_gem_context_reference(req->ctx); - if (i915.enable_execlists) - ret = intel_logical_ring_alloc_request_extras(req); - else - ret = intel_ring_alloc_request_extras(req); - if (ret) { - i915_gem_context_unreference(req->ctx); - goto err; - } - /* * Reserve space in the ring buffer for all the commands required to * eventually emit this request. This is to guarantee that the @@ -2781,24 +2788,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine, * to be redone if the request is not actually submitted straight * away, e.g. because a GPU scheduler has deferred it. */ + req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; + if (i915.enable_execlists) - ret = intel_logical_ring_reserve_space(req); + ret = intel_logical_ring_alloc_request_extras(req); else - ret = intel_ring_reserve_space(req); - if (ret) { - /* - * At this point, the request is fully allocated even if not - * fully prepared. Thus it can be cleaned up using the proper - * free code. - */ - intel_ring_reserved_space_cancel(req->ringbuf); - i915_gem_request_unreference(req); - return ret; - } + ret = intel_ring_alloc_request_extras(req); + if (ret) + goto err_ctx; *req_out = req; return 0; +err_ctx: + i915_gem_context_unreference(ctx); err: kmem_cache_free(dev_priv->requests, req); return ret; @@ -2818,13 +2821,13 @@ err: */ struct drm_i915_gem_request * i915_gem_request_alloc(struct intel_engine_cs *engine, - struct intel_context *ctx) + struct i915_gem_context *ctx) { struct drm_i915_gem_request *req; int err; if (ctx == NULL) - ctx = to_i915(engine->dev)->kernel_context; + ctx = engine->i915->kernel_context; err = __i915_gem_request_alloc(engine, ctx, &req); return err ? ERR_PTR(err) : req; } @@ -2888,13 +2891,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, /* Ensure irq handler finishes or is cancelled. */ tasklet_kill(&engine->irq_tasklet); - spin_lock_bh(&engine->execlist_lock); - /* list_splice_tail_init checks for empty lists */ - list_splice_tail_init(&engine->execlist_queue, - &engine->execlist_retired_req_list); - spin_unlock_bh(&engine->execlist_lock); - - intel_execlists_retire_requests(engine); + intel_execlists_cancel_requests(engine); } /* @@ -3005,9 +3002,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine) } bool -i915_gem_retire_requests(struct drm_device *dev) +i915_gem_retire_requests(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; bool idle = true; @@ -3018,8 +3014,6 @@ i915_gem_retire_requests(struct drm_device *dev) spin_lock_bh(&engine->execlist_lock); idle &= list_empty(&engine->execlist_queue); spin_unlock_bh(&engine->execlist_lock); - - intel_execlists_retire_requests(engine); } } @@ -3042,7 +3036,7 @@ i915_gem_retire_work_handler(struct work_struct *work) /* Come back later if the device is busy... */ idle = false; if (mutex_trylock(&dev->struct_mutex)) { - idle = i915_gem_retire_requests(dev); + idle = i915_gem_retire_requests(dev_priv); mutex_unlock(&dev->struct_mutex); } if (!idle) @@ -3066,7 +3060,7 @@ i915_gem_idle_work_handler(struct work_struct *work) * Also locking seems to be fubar here, engine->request_list is protected * by dev->struct_mutex. */ - intel_mark_idle(dev); + intel_mark_idle(dev_priv); if (mutex_trylock(&dev->struct_mutex)) { for_each_engine(engine, dev_priv) @@ -3096,14 +3090,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) if (req == NULL) continue; - if (list_empty(&req->list)) - goto retire; - - if (i915_gem_request_completed(req, true)) { - __i915_gem_request_retire__upto(req); -retire: + if (i915_gem_request_completed(req, true)) i915_gem_object_retire__read(obj, i); - } } return 0; @@ -3185,7 +3173,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ret = __i915_wait_request(req[i], true, args->timeout_ns > 0 ? &args->timeout_ns : NULL, to_rps_client(file)); - i915_gem_request_unreference__unlocked(req[i]); + i915_gem_request_unreference(req[i]); } return ret; @@ -3211,7 +3199,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, if (i915_gem_request_completed(from_req, true)) return 0; - if (!i915_semaphore_is_enabled(obj->base.dev)) { + if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) { struct drm_i915_private *i915 = to_i915(obj->base.dev); ret = __i915_wait_request(from_req, i915->mm.interruptible, @@ -3345,6 +3333,17 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) old_write_domain); } +static void __i915_vma_iounmap(struct i915_vma *vma) +{ + GEM_BUG_ON(vma->pin_count); + + if (vma->iomap == NULL) + return; + + io_mapping_unmap(vma->iomap); + vma->iomap = NULL; +} + static int __i915_vma_unbind(struct i915_vma *vma, bool wait) { struct drm_i915_gem_object *obj = vma->obj; @@ -3377,6 +3376,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) ret = i915_gem_object_put_fence(obj); if (ret) return ret; + + __i915_vma_iounmap(vma); } trace_i915_vma_unbind(vma); @@ -3731,7 +3732,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) return; if (i915_gem_clflush_object(obj, obj->pin_display)) - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; @@ -3929,7 +3930,7 @@ out: obj->base.write_domain != I915_GEM_DOMAIN_CPU && cpu_write_needs_clflush(obj)) { if (i915_gem_clflush_object(obj, true)) - i915_gem_chipset_flush(obj->base.dev); + i915_gem_chipset_flush(to_i915(obj->base.dev)); } return 0; @@ -4198,7 +4199,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (ret == 0) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); - i915_gem_request_unreference__unlocked(target); + i915_gem_request_unreference(target); return ret; } @@ -4499,21 +4500,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = { .put_pages = i915_gem_object_put_pages_gtt, }; -struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, +struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, size_t size) { struct drm_i915_gem_object *obj; struct address_space *mapping; gfp_t mask; + int ret; obj = i915_gem_object_alloc(dev); if (obj == NULL) - return NULL; + return ERR_PTR(-ENOMEM); - if (drm_gem_object_init(dev, &obj->base, size) != 0) { - i915_gem_object_free(obj); - return NULL; - } + ret = drm_gem_object_init(dev, &obj->base, size); + if (ret) + goto fail; mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { @@ -4550,6 +4551,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, trace_i915_gem_object_create(obj); return obj; + +fail: + i915_gem_object_free(obj); + + return ERR_PTR(ret); } static bool discard_backing_storage(struct drm_i915_gem_object *obj) @@ -4655,16 +4661,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; - BUG_ON(!view); + GEM_BUG_ON(!view); list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->vm == &ggtt->base && - i915_ggtt_view_equal(&vma->ggtt_view, view)) + if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma; return NULL; } @@ -4706,9 +4708,10 @@ i915_gem_suspend(struct drm_device *dev) if (ret) goto err; - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev_priv); i915_gem_stop_engines(dev); + i915_gem_context_lost(dev_priv); mutex_unlock(&dev->struct_mutex); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); @@ -4727,37 +4730,6 @@ err: return ret; } -int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) -{ - struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; - int i, ret; - - if (!HAS_L3_DPF(dev) || !remap_info) - return 0; - - ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3); - if (ret) - return ret; - - /* - * Note: We do not worry about the concurrent register cacheline hang - * here because no other code should access these registers other than - * at initialization time. - */ - for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) { - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i)); - intel_ring_emit(engine, remap_info[i]); - } - - intel_ring_advance(engine); - - return ret; -} - void i915_gem_init_swizzling(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4862,7 +4834,7 @@ i915_gem_init_hw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int ret, j; + int ret; /* Double layer security blanket, see i915_gem_init() */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -4914,13 +4886,10 @@ i915_gem_init_hw(struct drm_device *dev) intel_mocs_init_l3cc_table(dev); /* We can't enable contexts until all firmware is loaded */ - if (HAS_GUC_UCODE(dev)) { - ret = intel_guc_ucode_load(dev); - if (ret) { - DRM_ERROR("Failed to initialize GuC, error %d\n", ret); - ret = -EIO; + if (HAS_GUC(dev)) { + ret = intel_guc_setup(dev); + if (ret) goto out; - } } /* @@ -4928,44 +4897,6 @@ i915_gem_init_hw(struct drm_device *dev) * on re-initialisation */ ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100); - if (ret) - goto out; - - /* Now it is safe to go back round and do everything else: */ - for_each_engine(engine, dev_priv) { - struct drm_i915_gem_request *req; - - req = i915_gem_request_alloc(engine, NULL); - if (IS_ERR(req)) { - ret = PTR_ERR(req); - break; - } - - if (engine->id == RCS) { - for (j = 0; j < NUM_L3_SLICES(dev); j++) { - ret = i915_gem_l3_remap(req, j); - if (ret) - goto err_request; - } - } - - ret = i915_ppgtt_init_ring(req); - if (ret) - goto err_request; - - ret = i915_gem_context_enable(req); - if (ret) - goto err_request; - -err_request: - i915_add_request_no_flush(req); - if (ret) { - DRM_ERROR("Failed to enable %s, error=%d\n", - engine->name, ret); - i915_gem_cleanup_engines(dev); - break; - } - } out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); @@ -4977,9 +4908,6 @@ int i915_gem_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; - i915.enable_execlists = intel_sanitize_enable_execlists(dev, - i915.enable_execlists); - mutex_lock(&dev->struct_mutex); if (!i915.enable_execlists) { @@ -5002,10 +4930,7 @@ int i915_gem_init(struct drm_device *dev) */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - ret = i915_gem_init_userptr(dev); - if (ret) - goto out_unlock; - + i915_gem_init_userptr(dev_priv); i915_gem_init_ggtt(dev); ret = i915_gem_context_init(dev); @@ -5042,14 +4967,6 @@ i915_gem_cleanup_engines(struct drm_device *dev) for_each_engine(engine, dev_priv) dev_priv->gt.cleanup_engine(engine); - - if (i915.enable_execlists) - /* - * Neither the BIOS, ourselves or any other kernel - * expects the system to be in execlists mode on startup, - * so we need to reset the GPU back to legacy mode. - */ - intel_gpu_reset(dev, ALL_ENGINES); } static void @@ -5073,7 +4990,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv) else dev_priv->num_fence_regs = 8; - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) dev_priv->num_fence_regs = I915_READ(vgtif_reg(avail_rs.fence_num)); @@ -5148,6 +5065,34 @@ void i915_gem_load_cleanup(struct drm_device *dev) kmem_cache_destroy(dev_priv->objects); } +int i915_gem_freeze_late(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *obj; + + /* Called just before we write the hibernation image. + * + * We need to update the domain tracking to reflect that the CPU + * will be accessing all the pages to create and restore from the + * hibernation, and so upon restoration those pages will be in the + * CPU domain. + * + * To make sure the hibernation image contains the latest state, + * we update that state just before writing out the image. + */ + + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + + return 0; +} + void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -5254,13 +5199,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view) { - struct drm_i915_private *dev_priv = to_i915(o->base.dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->vm == &ggtt->base && - i915_ggtt_view_equal(&vma->ggtt_view, view)) + if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma->node.start; WARN(1, "global vma for this object not found. (view=%u)\n", view->type); @@ -5286,12 +5228,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view) { - struct drm_i915_private *dev_priv = to_i915(o->base.dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->vm == &ggtt->base && + if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) return true; @@ -5310,23 +5250,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) return false; } -unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, - struct i915_address_space *vm) +unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) { - struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - - BUG_ON(list_empty(&o->vma_list)); + GEM_BUG_ON(list_empty(&o->vma_list)); list_for_each_entry(vma, &o->vma_list, obj_link) { if (vma->is_ggtt && - vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) - continue; - if (vma->vm == vm) + vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) return vma->node.size; } + return 0; } @@ -5365,8 +5300,8 @@ i915_gem_object_create_from_data(struct drm_device *dev, size_t bytes; int ret; - obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE)); - if (IS_ERR_OR_NULL(obj)) + obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE)); + if (IS_ERR(obj)) return obj; ret = i915_gem_object_set_to_cpu_domain(obj, true); diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c index 7bf2f3f2968e..3752d5daa4b2 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c @@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, if (obj == NULL) { int ret; - obj = i915_gem_alloc_object(pool->dev, size); - if (obj == NULL) - return ERR_PTR(-ENOMEM); + obj = i915_gem_object_create(pool->dev, size); + if (IS_ERR(obj)) + return obj; ret = i915_gem_object_get_pages(obj); if (ret) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index e5acc3916f75..a3b11aac23a4 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -90,6 +90,8 @@ #include "i915_drv.h" #include "i915_trace.h" +#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 + /* This is a HW constraint. The value below is the largest known requirement * I've seen in a spec to date, and that was a workaround for a non-shipping * part. It should be safe to decrease this, but it's more future proof as is. @@ -97,28 +99,27 @@ #define GEN6_CONTEXT_ALIGN (64<<10) #define GEN7_CONTEXT_ALIGN 4096 -static size_t get_context_alignment(struct drm_device *dev) +static size_t get_context_alignment(struct drm_i915_private *dev_priv) { - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) return GEN6_CONTEXT_ALIGN; return GEN7_CONTEXT_ALIGN; } -static int get_context_size(struct drm_device *dev) +static int get_context_size(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; u32 reg; - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_GEN(dev_priv)) { case 6: reg = I915_READ(CXT_SIZE); ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; break; case 7: reg = I915_READ(GEN7_CXT_SIZE); - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) ret = HSW_CXT_TOTAL_SIZE; else ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; @@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev) return ret; } -static void i915_gem_context_clean(struct intel_context *ctx) +static void i915_gem_context_clean(struct i915_gem_context *ctx) { struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; struct i915_vma *vma, *next; @@ -150,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx) void i915_gem_context_free(struct kref *ctx_ref) { - struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); + struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); + int i; + lockdep_assert_held(&ctx->i915->dev->struct_mutex); trace_i915_context_free(ctx); - if (i915.enable_execlists) - intel_lr_context_free(ctx); - /* * This context is going away and we need to remove all VMAs still * around. This is to handle imported shared objects for which @@ -166,9 +166,22 @@ void i915_gem_context_free(struct kref *ctx_ref) i915_ppgtt_put(ctx->ppgtt); - if (ctx->legacy_hw_ctx.rcs_state) - drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); + for (i = 0; i < I915_NUM_ENGINES; i++) { + struct intel_context *ce = &ctx->engine[i]; + + if (!ce->state) + continue; + + WARN_ON(ce->pin_count); + if (ce->ringbuf) + intel_ringbuffer_free(ce->ringbuf); + + drm_gem_object_unreference(&ce->state->base); + } + list_del(&ctx->link); + + ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id); kfree(ctx); } @@ -178,9 +191,11 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) struct drm_i915_gem_object *obj; int ret; - obj = i915_gem_alloc_object(dev, size); - if (obj == NULL) - return ERR_PTR(-ENOMEM); + lockdep_assert_held(&dev->struct_mutex); + + obj = i915_gem_object_create(dev, size); + if (IS_ERR(obj)) + return obj; /* * Try to make the context utilize L3 as well as LLC. @@ -209,18 +224,46 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) return obj; } -static struct intel_context * +static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) +{ + int ret; + + ret = ida_simple_get(&dev_priv->context_hw_ida, + 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); + if (ret < 0) { + /* Contexts are only released when no longer active. + * Flush any pending retires to hopefully release some + * stale contexts and try again. + */ + i915_gem_retire_requests(dev_priv); + ret = ida_simple_get(&dev_priv->context_hw_ida, + 0, MAX_CONTEXT_HW_ID, GFP_KERNEL); + if (ret < 0) + return ret; + } + + *out = ret; + return 0; +} + +static struct i915_gem_context * __create_hw_context(struct drm_device *dev, struct drm_i915_file_private *file_priv) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (ctx == NULL) return ERR_PTR(-ENOMEM); + ret = assign_hw_id(dev_priv, &ctx->hw_id); + if (ret) { + kfree(ctx); + return ERR_PTR(ret); + } + kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->context_list); ctx->i915 = dev_priv; @@ -232,7 +275,7 @@ __create_hw_context(struct drm_device *dev, ret = PTR_ERR(obj); goto err_out; } - ctx->legacy_hw_ctx.rcs_state = obj; + ctx->engine[RCS].state = obj; } /* Default context will never have a file_priv */ @@ -249,7 +292,7 @@ __create_hw_context(struct drm_device *dev, /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there * is no remap info, it will be a NOP. */ - ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; + ctx->remap_slice = ALL_L3_SLICES(dev_priv); ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; @@ -265,44 +308,27 @@ err_out: * context state of the GPU for applications that don't utilize HW contexts, as * well as an idle case. */ -static struct intel_context * +static struct i915_gem_context * i915_gem_create_context(struct drm_device *dev, struct drm_i915_file_private *file_priv) { - const bool is_global_default_ctx = file_priv == NULL; - struct intel_context *ctx; - int ret = 0; + struct i915_gem_context *ctx; - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + lockdep_assert_held(&dev->struct_mutex); ctx = __create_hw_context(dev, file_priv); if (IS_ERR(ctx)) return ctx; - if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) { - /* We may need to do things with the shrinker which - * require us to immediately switch back to the default - * context. This can cause a problem as pinning the - * default context also requires GTT space which may not - * be available. To avoid this we always pin the default - * context. - */ - ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state, - get_context_alignment(dev), 0); - if (ret) { - DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); - goto err_destroy; - } - } - if (USES_FULL_PPGTT(dev)) { struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); - if (IS_ERR_OR_NULL(ppgtt)) { + if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); - ret = PTR_ERR(ppgtt); - goto err_unpin; + idr_remove(&file_priv->context_idr, ctx->user_handle); + i915_gem_context_unreference(ctx); + return ERR_CAST(ppgtt); } ctx->ppgtt = ppgtt; @@ -311,24 +337,19 @@ i915_gem_create_context(struct drm_device *dev, trace_i915_context_create(ctx); return ctx; - -err_unpin: - if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) - i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); -err_destroy: - idr_remove(&file_priv->context_idr, ctx->user_handle); - i915_gem_context_unreference(ctx); - return ERR_PTR(ret); } -static void i915_gem_context_unpin(struct intel_context *ctx, +static void i915_gem_context_unpin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { if (i915.enable_execlists) { intel_lr_context_unpin(ctx, engine); } else { - if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) - i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); + struct intel_context *ce = &ctx->engine[engine->id]; + + if (ce->state) + i915_gem_object_ggtt_unpin(ce->state); + i915_gem_context_unreference(ctx); } } @@ -336,51 +357,48 @@ static void i915_gem_context_unpin(struct intel_context *ctx, void i915_gem_context_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int i; + + lockdep_assert_held(&dev->struct_mutex); if (i915.enable_execlists) { - struct intel_context *ctx; + struct i915_gem_context *ctx; list_for_each_entry(ctx, &dev_priv->context_list, link) intel_lr_context_reset(dev_priv, ctx); } - for (i = 0; i < I915_NUM_ENGINES; i++) { - struct intel_engine_cs *engine = &dev_priv->engine[i]; - - if (engine->last_context) { - i915_gem_context_unpin(engine->last_context, engine); - engine->last_context = NULL; - } - } - - /* Force the GPU state to be reinitialised on enabling */ - dev_priv->kernel_context->legacy_hw_ctx.initialized = false; + i915_gem_context_lost(dev_priv); } int i915_gem_context_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *ctx; + struct i915_gem_context *ctx; /* Init should only be called once per module load. Eventually the * restriction on the context_disabled check can be loosened. */ if (WARN_ON(dev_priv->kernel_context)) return 0; - if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { + if (intel_vgpu_active(dev_priv) && + HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { if (!i915.enable_execlists) { DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); return -EINVAL; } } + /* Using the simple ida interface, the max is limited by sizeof(int) */ + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); + ida_init(&dev_priv->context_hw_ida); + if (i915.enable_execlists) { /* NB: intentionally left blank. We will allocate our own * backing objects as we need them, thank you very much */ dev_priv->hw_context_size = 0; - } else if (HAS_HW_CONTEXTS(dev)) { - dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); + } else if (HAS_HW_CONTEXTS(dev_priv)) { + dev_priv->hw_context_size = + round_up(get_context_size(dev_priv), 4096); if (dev_priv->hw_context_size > (1<<20)) { DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", dev_priv->hw_context_size); @@ -395,6 +413,26 @@ int i915_gem_context_init(struct drm_device *dev) return PTR_ERR(ctx); } + if (!i915.enable_execlists && ctx->engine[RCS].state) { + int ret; + + /* We may need to do things with the shrinker which + * require us to immediately switch back to the default + * context. This can cause a problem as pinning the + * default context also requires GTT space which may not + * be available. To avoid this we always pin the default + * context. + */ + ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state, + get_context_alignment(dev_priv), 0); + if (ret) { + DRM_ERROR("Failed to pinned default global context (error %d)\n", + ret); + i915_gem_context_unreference(ctx); + return ret; + } + } + dev_priv->kernel_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized\n", @@ -403,67 +441,48 @@ int i915_gem_context_init(struct drm_device *dev) return 0; } -void i915_gem_context_fini(struct drm_device *dev) +void i915_gem_context_lost(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_context *dctx = dev_priv->kernel_context; - int i; + struct intel_engine_cs *engine; - if (dctx->legacy_hw_ctx.rcs_state) { - /* The only known way to stop the gpu from accessing the hw context is - * to reset it. Do this as the very last operation to avoid confusing - * other code, leading to spurious errors. */ - intel_gpu_reset(dev, ALL_ENGINES); - - /* When default context is created and switched to, base object refcount - * will be 2 (+1 from object creation and +1 from do_switch()). - * i915_gem_context_fini() will be called after gpu_idle() has switched - * to default context. So we need to unreference the base object once - * to offset the do_switch part, so that i915_gem_context_unreference() - * can then free the base object correctly. */ - WARN_ON(!dev_priv->engine[RCS].last_context); - - i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); - } - - for (i = I915_NUM_ENGINES; --i >= 0;) { - struct intel_engine_cs *engine = &dev_priv->engine[i]; + lockdep_assert_held(&dev_priv->dev->struct_mutex); + for_each_engine(engine, dev_priv) { if (engine->last_context) { i915_gem_context_unpin(engine->last_context, engine); engine->last_context = NULL; } + + /* Force the GPU state to be reinitialised on enabling */ + dev_priv->kernel_context->engine[engine->id].initialised = + engine->init_context == NULL; } - i915_gem_context_unreference(dctx); - dev_priv->kernel_context = NULL; + /* Force the GPU state to be reinitialised on enabling */ + dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv); } -int i915_gem_context_enable(struct drm_i915_gem_request *req) +void i915_gem_context_fini(struct drm_device *dev) { - struct intel_engine_cs *engine = req->engine; - int ret; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_gem_context *dctx = dev_priv->kernel_context; - if (i915.enable_execlists) { - if (engine->init_context == NULL) - return 0; + lockdep_assert_held(&dev->struct_mutex); - ret = engine->init_context(req); - } else - ret = i915_switch_context(req); + if (!i915.enable_execlists && dctx->engine[RCS].state) + i915_gem_object_ggtt_unpin(dctx->engine[RCS].state); - if (ret) { - DRM_ERROR("ring init context: %d\n", ret); - return ret; - } + i915_gem_context_unreference(dctx); + dev_priv->kernel_context = NULL; - return 0; + ida_destroy(&dev_priv->context_hw_ida); } static int context_idr_cleanup(int id, void *p, void *data) { - struct intel_context *ctx = p; + struct i915_gem_context *ctx = p; + ctx->file_priv = ERR_PTR(-EBADF); i915_gem_context_unreference(ctx); return 0; } @@ -471,7 +490,7 @@ static int context_idr_cleanup(int id, void *p, void *data) int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - struct intel_context *ctx; + struct i915_gem_context *ctx; idr_init(&file_priv->context_idr); @@ -491,31 +510,22 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; + lockdep_assert_held(&dev->struct_mutex); + idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); } -struct intel_context * -i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) -{ - struct intel_context *ctx; - - ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id); - if (!ctx) - return ERR_PTR(-ENOENT); - - return ctx; -} - static inline int mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) { + struct drm_i915_private *dev_priv = req->i915; struct intel_engine_cs *engine = req->engine; u32 flags = hw_flags | MI_MM_SPACE_GTT; const int num_rings = /* Use an extended w/a on ivb+ if signalling from other rings */ - i915_semaphore_is_enabled(engine->dev) ? - hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : + i915_semaphore_is_enabled(dev_priv) ? + hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 : 0; int len, ret; @@ -524,21 +534,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) * explicitly, so we rely on the value at ring init, stored in * itlb_before_ctx_switch. */ - if (IS_GEN6(engine->dev)) { + if (IS_GEN6(dev_priv)) { ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); if (ret) return ret; } /* These flags are for resource streamer on HSW+ */ - if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) + if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); - else if (INTEL_INFO(engine->dev)->gen < 8) + else if (INTEL_GEN(dev_priv) < 8) flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); len = 4; - if (INTEL_INFO(engine->dev)->gen >= 7) + if (INTEL_GEN(dev_priv) >= 7) len += 2 + (num_rings ? 4*num_rings + 6 : 0); ret = intel_ring_begin(req, len); @@ -546,14 +556,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) return ret; /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ - if (INTEL_INFO(engine->dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); if (num_rings) { struct intel_engine_cs *signaller; intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, to_i915(engine->dev)) { + for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; @@ -568,7 +578,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) intel_ring_emit(engine, MI_NOOP); intel_ring_emit(engine, MI_SET_CONTEXT); intel_ring_emit(engine, - i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | + i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) | flags); /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP @@ -576,14 +586,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) */ intel_ring_emit(engine, MI_NOOP); - if (INTEL_INFO(engine->dev)->gen >= 7) { + if (INTEL_GEN(dev_priv) >= 7) { if (num_rings) { struct intel_engine_cs *signaller; i915_reg_t last_reg = {}; /* keep gcc quiet */ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, to_i915(engine->dev)) { + for_each_engine(signaller, dev_priv) { if (signaller == engine) continue; @@ -609,45 +619,83 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) return ret; } -static inline bool skip_rcs_switch(struct intel_engine_cs *engine, - struct intel_context *to) +static int remap_l3(struct drm_i915_gem_request *req, int slice) +{ + u32 *remap_info = req->i915->l3_parity.remap_info[slice]; + struct intel_engine_cs *engine = req->engine; + int i, ret; + + if (!remap_info) + return 0; + + ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2); + if (ret) + return ret; + + /* + * Note: We do not worry about the concurrent register cacheline hang + * here because no other code should access these registers other than + * at initialization time. + */ + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4)); + for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { + intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i)); + intel_ring_emit(engine, remap_info[i]); + } + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); + + return 0; +} + +static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, + struct intel_engine_cs *engine, + struct i915_gem_context *to) { if (to->remap_slice) return false; - if (!to->legacy_hw_ctx.initialized) + if (!to->engine[RCS].initialised) return false; - if (to->ppgtt && - !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) + if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) return false; return to == engine->last_context; } static bool -needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) +needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, + struct intel_engine_cs *engine, + struct i915_gem_context *to) { - if (!to->ppgtt) + if (!ppgtt) return false; + /* Always load the ppgtt on first use */ + if (!engine->last_context) + return true; + + /* Same context without new entries, skip */ if (engine->last_context == to && - !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) + !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) return false; if (engine->id != RCS) return true; - if (INTEL_INFO(engine->dev)->gen < 8) + if (INTEL_GEN(engine->i915) < 8) return true; return false; } static bool -needs_pd_load_post(struct intel_context *to, u32 hw_flags) +needs_pd_load_post(struct i915_hw_ppgtt *ppgtt, + struct i915_gem_context *to, + u32 hw_flags) { - if (!to->ppgtt) + if (!ppgtt) return false; if (!IS_GEN8(to->i915)) @@ -661,18 +709,19 @@ needs_pd_load_post(struct intel_context *to, u32 hw_flags) static int do_rcs_switch(struct drm_i915_gem_request *req) { - struct intel_context *to = req->ctx; + struct i915_gem_context *to = req->ctx; struct intel_engine_cs *engine = req->engine; - struct intel_context *from; + struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt; + struct i915_gem_context *from; u32 hw_flags; int ret, i; - if (skip_rcs_switch(engine, to)) + if (skip_rcs_switch(ppgtt, engine, to)) return 0; /* Trying to pin first makes error handling easier. */ - ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, - get_context_alignment(engine->dev), + ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state, + get_context_alignment(engine->i915), 0); if (ret) return ret; @@ -694,37 +743,32 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * * XXX: We need a real interface to do this instead of trickery. */ - ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); + ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false); if (ret) goto unpin_out; - if (needs_pd_load_pre(engine, to)) { + if (needs_pd_load_pre(ppgtt, engine, to)) { /* Older GENs and non render rings still want the load first, * "PP_DCLV followed by PP_DIR_BASE register through Load * Register Immediate commands in Ring Buffer before submitting * a context."*/ trace_switch_mm(engine, to); - ret = to->ppgtt->switch_mm(to->ppgtt, req); + ret = ppgtt->switch_mm(ppgtt, req); if (ret) goto unpin_out; } - if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) + if (!to->engine[RCS].initialised || i915_gem_context_is_default(to)) /* NB: If we inhibit the restore, the context is not allowed to * die because future work may end up depending on valid address * space. This means we must enforce that a page table load * occur when this occurs. */ hw_flags = MI_RESTORE_INHIBIT; - else if (to->ppgtt && - intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings) + else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings) hw_flags = MI_FORCE_RESTORE; else hw_flags = 0; - /* We should never emit switch_mm more than once */ - WARN_ON(needs_pd_load_pre(engine, to) && - needs_pd_load_post(to, hw_flags)); - if (to != from || (hw_flags & MI_FORCE_RESTORE)) { ret = mi_set_context(req, hw_flags); if (ret) @@ -738,8 +782,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { - from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; - i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); + from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; + i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be @@ -747,10 +791,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) * able to defer doing this until we know the object would be * swapped, but there is no way to do that yet. */ - from->legacy_hw_ctx.rcs_state->dirty = 1; + from->engine[RCS].state->dirty = 1; /* obj is kept alive until the next request by its active ref */ - i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); + i915_gem_object_ggtt_unpin(from->engine[RCS].state); i915_gem_context_unreference(from); } i915_gem_context_reference(to); @@ -759,9 +803,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) /* GEN8 does *not* require an explicit reload if the PDPs have been * setup, and we do not wish to move them. */ - if (needs_pd_load_post(to, hw_flags)) { + if (needs_pd_load_post(ppgtt, to, hw_flags)) { trace_switch_mm(engine, to); - ret = to->ppgtt->switch_mm(to->ppgtt, req); + ret = ppgtt->switch_mm(ppgtt, req); /* The hardware context switch is emitted, but we haven't * actually changed the state - so it's probably safe to bail * here. Still, let the user know something dangerous has @@ -771,33 +815,33 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) return ret; } - if (to->ppgtt) - to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); + if (ppgtt) + ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); for (i = 0; i < MAX_L3_SLICES; i++) { if (!(to->remap_slice & (1<<i))) continue; - ret = i915_gem_l3_remap(req, i); + ret = remap_l3(req, i); if (ret) return ret; to->remap_slice &= ~(1<<i); } - if (!to->legacy_hw_ctx.initialized) { + if (!to->engine[RCS].initialised) { if (engine->init_context) { ret = engine->init_context(req); if (ret) return ret; } - to->legacy_hw_ctx.initialized = true; + to->engine[RCS].initialised = true; } return 0; unpin_out: - i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); + i915_gem_object_ggtt_unpin(to->engine[RCS].state); return ret; } @@ -817,25 +861,24 @@ unpin_out: int i915_switch_context(struct drm_i915_gem_request *req) { struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = req->i915; WARN_ON(i915.enable_execlists); - WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); + lockdep_assert_held(&req->i915->dev->struct_mutex); - if (engine->id != RCS || - req->ctx->legacy_hw_ctx.rcs_state == NULL) { - struct intel_context *to = req->ctx; + if (!req->ctx->engine[engine->id].state) { + struct i915_gem_context *to = req->ctx; + struct i915_hw_ppgtt *ppgtt = + to->ppgtt ?: req->i915->mm.aliasing_ppgtt; - if (needs_pd_load_pre(engine, to)) { + if (needs_pd_load_pre(ppgtt, engine, to)) { int ret; trace_switch_mm(engine, to); - ret = to->ppgtt->switch_mm(to->ppgtt, req); + ret = ppgtt->switch_mm(ppgtt, req); if (ret) return ret; - /* Doing a PD load always reloads the page dirs */ - to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); + ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); } if (to != engine->last_context) { @@ -861,7 +904,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_context_create *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; if (!contexts_enabled(dev)) @@ -890,7 +933,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_context_destroy *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; if (args->pad != 0) @@ -903,13 +946,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - ctx = i915_gem_context_get(file_priv, args->ctx_id); + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); return PTR_ERR(ctx); } - idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); + idr_remove(&file_priv->context_idr, ctx->user_handle); i915_gem_context_unreference(ctx); mutex_unlock(&dev->struct_mutex); @@ -922,14 +965,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - ctx = i915_gem_context_get(file_priv, args->ctx_id); + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); return PTR_ERR(ctx); @@ -965,14 +1008,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; - struct intel_context *ctx; + struct i915_gem_context *ctx; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - ctx = i915_gem_context_get(file_priv, args->ctx_id); + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); return PTR_ERR(ctx); @@ -1004,3 +1047,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, return ret; } + +int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_reset_stats *args = data; + struct i915_ctx_hang_stats *hs; + struct i915_gem_context *ctx; + int ret; + + if (args->flags || args->pad) + return -EINVAL; + + if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); + if (IS_ERR(ctx)) { + mutex_unlock(&dev->struct_mutex); + return PTR_ERR(ctx); + } + hs = &ctx->hang_stats; + + if (capable(CAP_SYS_ADMIN)) + args->reset_count = i915_reset_count(&dev_priv->gpu_error); + else + args->reset_count = 0; + + args->batch_active = hs->batch_active; + args->batch_pending = hs->batch_pending; + + mutex_unlock(&dev->struct_mutex); + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index ea1f8d1bd228..b144c3f5c650 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -154,7 +154,7 @@ none: if (ret) return ret; - i915_gem_retire_requests(dev); + i915_gem_retire_requests(to_i915(dev)); goto search_again; } @@ -265,7 +265,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) if (ret) return ret; - i915_gem_retire_requests(vm->dev); + i915_gem_retire_requests(to_i915(vm->dev)); WARN_ON(!list_empty(&vm->active_list)); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 33df74d98269..8097698b9622 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma) static int i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct list_head *vmas, - struct intel_context *ctx, + struct i915_gem_context *ctx, bool *need_relocs) { struct drm_i915_gem_object *obj; @@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct i915_address_space *vm; struct list_head ordered_vmas; struct list_head pinned_vmas; - bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; + bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4; int retry; i915_gem_retire_requests_ring(engine); @@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct intel_engine_cs *engine, struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec, - struct intel_context *ctx) + struct i915_gem_context *ctx) { struct drm_i915_gem_relocation_entry *reloc; struct i915_address_space *vm; @@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, } if (flush_chipset) - i915_gem_chipset_flush(req->engine->dev); + i915_gem_chipset_flush(req->engine->i915); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); @@ -1063,17 +1063,17 @@ validate_exec_list(struct drm_device *dev, return 0; } -static struct intel_context * +static struct i915_gem_context * i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, struct intel_engine_cs *engine, const u32 ctx_id) { - struct intel_context *ctx = NULL; + struct i915_gem_context *ctx = NULL; struct i915_ctx_hang_stats *hs; if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) return ERR_PTR(-EINVAL); - ctx = i915_gem_context_get(file->driver_priv, ctx_id); + ctx = i915_gem_context_lookup(file->driver_priv, ctx_id); if (IS_ERR(ctx)) return ctx; @@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, return ERR_PTR(-EIO); } - if (i915.enable_execlists && !ctx->engine[engine->id].state) { - int ret = intel_lr_context_deferred_alloc(ctx, engine); - if (ret) { - DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); - return ERR_PTR(ret); - } - } - return ctx; } @@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { i915_gem_request_assign(&obj->last_fenced_req, req); if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, &dev_priv->mm.fence_list); } @@ -1436,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_exec_object2 shadow_exec_entry; struct intel_engine_cs *engine; - struct intel_context *ctx; + struct i915_gem_context *ctx; struct i915_address_space *vm; struct i915_execbuffer_params params_master; /* XXX: will be removed later */ struct i915_execbuffer_params *params = ¶ms_master; diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index a2b938ec01a7..2b6bdc267fb5 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c @@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page) void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; int i; if (obj->bit_17 == NULL) return; i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); + for_each_sgt_page(page, sgt_iter, obj->pages) { char new_bit_17 = page_to_phys(page) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { @@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; int page_count = obj->base.size >> PAGE_SHIFT; int i; @@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) } i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) + + for_each_sgt_page(page, sgt_iter, obj->pages) { + if (page_to_phys(page) & (1 << 17)) __set_bit(i, obj->bit_17); else __clear_bit(i, obj->bit_17); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 92acdff9dad3..46684779d4d6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -93,6 +93,13 @@ * */ +static inline struct i915_ggtt * +i915_vm_to_ggtt(struct i915_address_space *vm) +{ + GEM_BUG_ON(!i915_is_ggtt(vm)); + return container_of(vm, struct i915_ggtt, base); +} + static int i915_get_ggtt_vma_pages(struct i915_vma *vma); @@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = { .type = I915_GGTT_VIEW_ROTATED, }; -static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) +int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, + int enable_ppgtt) { bool has_aliasing_ppgtt; bool has_full_ppgtt; bool has_full_48bit_ppgtt; - has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; - has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; - has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; + has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6; + has_full_ppgtt = INTEL_GEN(dev_priv) >= 7; + has_full_48bit_ppgtt = + IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9; - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) has_full_ppgtt = false; /* emulation is too hard */ + if (!has_aliasing_ppgtt) + return 0; + /* * We don't allow disabling PPGTT for gen9+ as it's a requirement for * execlists, the sole mechanism available to submit work. */ - if (INTEL_INFO(dev)->gen < 9 && - (enable_ppgtt == 0 || !has_aliasing_ppgtt)) + if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) return 0; if (enable_ppgtt == 1) @@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) #ifdef CONFIG_INTEL_IOMMU /* Disable ppgtt on SNB if VT-d is on. */ - if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { + if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) { DRM_INFO("Disabling PPGTT because VT-d is on\n"); return 0; } #endif /* Early VLV doesn't have this */ - if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { + if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) { DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); return 0; } - if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) + if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) return has_full_48bit_ppgtt ? 3 : 2; else return has_aliasing_ppgtt ? 1 : 0; @@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev, static int gen8_init_scratch(struct i915_address_space *vm) { struct drm_device *dev = vm->dev; + int ret; vm->scratch_page = alloc_scratch_page(dev); if (IS_ERR(vm->scratch_page)) @@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm) vm->scratch_pt = alloc_pt(dev); if (IS_ERR(vm->scratch_pt)) { - free_scratch_page(dev, vm->scratch_page); - return PTR_ERR(vm->scratch_pt); + ret = PTR_ERR(vm->scratch_pt); + goto free_scratch_page; } vm->scratch_pd = alloc_pd(dev); if (IS_ERR(vm->scratch_pd)) { - free_pt(dev, vm->scratch_pt); - free_scratch_page(dev, vm->scratch_page); - return PTR_ERR(vm->scratch_pd); + ret = PTR_ERR(vm->scratch_pd); + goto free_pt; } if (USES_FULL_48BIT_PPGTT(dev)) { vm->scratch_pdp = alloc_pdp(dev); if (IS_ERR(vm->scratch_pdp)) { - free_pd(dev, vm->scratch_pd); - free_pt(dev, vm->scratch_pt); - free_scratch_page(dev, vm->scratch_page); - return PTR_ERR(vm->scratch_pdp); + ret = PTR_ERR(vm->scratch_pdp); + goto free_pd; } } @@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm) gen8_initialize_pdp(vm, vm->scratch_pdp); return 0; + +free_pd: + free_pd(dev, vm->scratch_pd); +free_pt: + free_pt(dev, vm->scratch_pt); +free_scratch_page: + free_scratch_page(dev, vm->scratch_page); + + return ret; } static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) @@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (intel_vgpu_active(vm->dev)) + if (intel_vgpu_active(to_i915(vm->dev))) gen8_ppgtt_notify_vgt(ppgtt, false); if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) @@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 0, 0, GEN8_PML4E_SHIFT); - if (intel_vgpu_active(ppgtt->base.dev)) { + if (intel_vgpu_active(to_i915(ppgtt->base.dev))) { ret = gen8_preallocate_top_level_pdps(ppgtt); if (ret) goto free_scratch; } } - if (intel_vgpu_active(ppgtt->base.dev)) + if (intel_vgpu_active(to_i915(ppgtt->base.dev))) gen8_ppgtt_notify_vgt(ppgtt, true); return 0; @@ -1821,20 +1839,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level cache_level, u32 flags) { struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - gen6_pte_t *pt_vaddr; unsigned first_entry = start >> PAGE_SHIFT; unsigned act_pt = first_entry / GEN6_PTES; unsigned act_pte = first_entry % GEN6_PTES; - struct sg_page_iter sg_iter; + gen6_pte_t *pt_vaddr = NULL; + struct sgt_iter sgt_iter; + dma_addr_t addr; - pt_vaddr = NULL; - for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { + for_each_sgt_dma(addr, sgt_iter, pages) { if (pt_vaddr == NULL) pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); pt_vaddr[act_pte] = - vm->pte_encode(sg_page_iter_dma_address(&sg_iter), - cache_level, true, flags); + vm->pte_encode(addr, cache_level, true, flags); if (++act_pte == GEN6_PTES) { kunmap_px(ppgtt, pt_vaddr); @@ -1843,6 +1860,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, act_pte = 0; } } + if (pt_vaddr) kunmap_px(ppgtt, pt_vaddr); } @@ -2064,7 +2082,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) } else BUG(); - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) ppgtt->switch_mm = vgpu_mm_switch; ret = gen6_ppgtt_alloc(ppgtt); @@ -2140,7 +2158,7 @@ static void gtt_write_workarounds(struct drm_device *dev) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); } -int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) +static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; @@ -2179,20 +2197,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev) return 0; } -int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) -{ - struct drm_i915_private *dev_priv = req->i915; - struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - - if (i915.enable_execlists) - return 0; - - if (!ppgtt) - return 0; - - return ppgtt->switch_mm(ppgtt, req); -} - struct i915_hw_ppgtt * i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) { @@ -2275,12 +2279,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) dev_priv->mm.interruptible = interruptible; } -void i915_check_and_clear_faults(struct drm_device *dev) +void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return; for_each_engine(engine, dev_priv) { @@ -2324,7 +2327,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6) return; - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, true); @@ -2358,23 +2361,21 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level level, u32 unused) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - unsigned first_entry = start >> PAGE_SHIFT; - gen8_pte_t __iomem *gtt_entries = - (gen8_pte_t __iomem *)ggtt->gsm + first_entry; - int i = 0; - struct sg_page_iter sg_iter; - dma_addr_t addr = 0; /* shut up gcc */ + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter sgt_iter; + gen8_pte_t __iomem *gtt_entries; + gen8_pte_t gtt_entry; + dma_addr_t addr; int rpm_atomic_seq; + int i = 0; rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { - addr = sg_dma_address(sg_iter.sg) + - (sg_iter.sg_pgoffset << PAGE_SHIFT); - gen8_set_pte(>t_entries[i], - gen8_pte_encode(addr, level, true)); - i++; + gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT); + + for_each_sgt_dma(addr, sgt_iter, st) { + gtt_entry = gen8_pte_encode(addr, level, true); + gen8_set_pte(>t_entries[i++], gtt_entry); } /* @@ -2385,8 +2386,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, * hardware should work, we must keep this posting read for paranoia. */ if (i != 0) - WARN_ON(readq(>t_entries[i-1]) - != gen8_pte_encode(addr, level, true)); + WARN_ON(readq(>t_entries[i-1]) != gtt_entry); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -2436,21 +2436,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level level, u32 flags) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; - unsigned first_entry = start >> PAGE_SHIFT; - gen6_pte_t __iomem *gtt_entries = - (gen6_pte_t __iomem *)ggtt->gsm + first_entry; - int i = 0; - struct sg_page_iter sg_iter; - dma_addr_t addr = 0; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); + struct sgt_iter sgt_iter; + gen6_pte_t __iomem *gtt_entries; + gen6_pte_t gtt_entry; + dma_addr_t addr; int rpm_atomic_seq; + int i = 0; rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { - addr = sg_page_iter_dma_address(&sg_iter); - iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); - i++; + gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT); + + for_each_sgt_dma(addr, sgt_iter, st) { + gtt_entry = vm->pte_encode(addr, level, true, flags); + iowrite32(gtt_entry, >t_entries[i++]); } /* XXX: This serves as a posting read to make sure that the PTE has @@ -2459,10 +2459,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, * of NUMA access patterns. Therefore, even with the way we assume * hardware should work, we must keep this posting read for paranoia. */ - if (i != 0) { - unsigned long gtt = readl(>t_entries[i-1]); - WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); - } + if (i != 0) + WARN_ON(readl(>t_entries[i-1]) != gtt_entry); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -2474,13 +2472,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } +static void nop_clear_range(struct i915_address_space *vm, + uint64_t start, + uint64_t length, + bool use_scratch) +{ +} + static void gen8_ggtt_clear_range(struct i915_address_space *vm, uint64_t start, uint64_t length, bool use_scratch) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; gen8_pte_t scratch_pte, __iomem *gtt_base = @@ -2512,7 +2517,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, bool use_scratch) { struct drm_i915_private *dev_priv = to_i915(vm->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; gen6_pte_t scratch_pte, __iomem *gtt_base = @@ -2727,7 +2732,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, i915_address_space_init(&ggtt->base, dev_priv); ggtt->base.total += PAGE_SIZE; - if (intel_vgpu_active(dev)) { + if (intel_vgpu_active(dev_priv)) { ret = intel_vgt_balloon(dev); if (ret) return ret; @@ -2831,7 +2836,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) i915_gem_cleanup_stolen(dev); if (drm_mm_initialized(&ggtt->base.mm)) { - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) intel_vgt_deballoon(); drm_mm_takedown(&ggtt->base.mm); @@ -3069,14 +3074,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ret = ggtt_probe_common(dev, ggtt->size); - ggtt->base.clear_range = gen8_ggtt_clear_range; - if (IS_CHERRYVIEW(dev_priv)) - ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL; - else - ggtt->base.insert_entries = gen8_ggtt_insert_entries; ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma; + ggtt->base.clear_range = nop_clear_range; + if (!USES_FULL_PPGTT(dev_priv)) + ggtt->base.clear_range = gen8_ggtt_clear_range; + + ggtt->base.insert_entries = gen8_ggtt_insert_entries; + if (IS_CHERRYVIEW(dev_priv)) + ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL; + return ret; } @@ -3219,14 +3227,6 @@ int i915_ggtt_init_hw(struct drm_device *dev) if (intel_iommu_gfx_mapped) DRM_INFO("VT-d active for gfx access\n"); #endif - /* - * i915.enable_ppgtt is read-only, so do an early pass to validate the - * user's requested state against the hardware/driver capabilities. We - * do this now so that we can print out any log messages once rather - * than every time we check intel_enable_ppgtt(). - */ - i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); - DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); return 0; @@ -3250,9 +3250,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj; struct i915_vma *vma; - bool flush; - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); /* First fill our portion of the GTT with scratch pages */ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, @@ -3260,19 +3259,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) /* Cache flush objects bound into GGTT and rebind them. */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - flush = false; list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->vm != &ggtt->base) continue; WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); - - flush = true; } - if (flush) - i915_gem_clflush_object(obj, obj->pin_display); + if (obj->pin_display) + WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); } if (INTEL_INFO(dev)->gen >= 8) { @@ -3398,9 +3394,11 @@ static struct sg_table * intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { + const size_t n_pages = obj->base.size / PAGE_SIZE; unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; unsigned int size_pages_uv; - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + dma_addr_t dma_addr; unsigned long i; dma_addr_t *page_addr_list; struct sg_table *st; @@ -3409,7 +3407,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, int ret = -ENOMEM; /* Allocate a temporary list of source pages for random access. */ - page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, + page_addr_list = drm_malloc_gfp(n_pages, sizeof(dma_addr_t), GFP_TEMPORARY); if (!page_addr_list) @@ -3432,11 +3430,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, /* Populate source page list from the object. */ i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); - i++; - } + for_each_sgt_dma(dma_addr, sgt_iter, obj->pages) + page_addr_list[i++] = dma_addr; + GEM_BUG_ON(i != n_pages); st->nents = 0; sg = st->sgl; @@ -3634,3 +3631,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj, return obj->base.size; } } + +void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) +{ + void __iomem *ptr; + + lockdep_assert_held(&vma->vm->dev->struct_mutex); + if (WARN_ON(!vma->obj->map_and_fenceable)) + return ERR_PTR(-ENODEV); + + GEM_BUG_ON(!vma->is_ggtt); + GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0); + + ptr = vma->iomap; + if (ptr == NULL) { + ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable, + vma->node.start, + vma->node.size); + if (ptr == NULL) + return ERR_PTR(-ENOMEM); + + vma->iomap = ptr; + } + + vma->pin_count++; + return ptr; +} diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 0008543d55f6..62be77cac5cd 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -34,6 +34,8 @@ #ifndef __I915_GEM_GTT_H__ #define __I915_GEM_GTT_H__ +#include <linux/io-mapping.h> + struct drm_i915_file_private; typedef uint32_t gen6_pte_t; @@ -175,6 +177,7 @@ struct i915_vma { struct drm_mm_node node; struct drm_i915_gem_object *obj; struct i915_address_space *vm; + void __iomem *iomap; /** Flags and address space this VMA is bound to */ #define GLOBAL_BIND (1<<0) @@ -518,9 +521,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev); void i915_gem_init_ggtt(struct drm_device *dev); void i915_ggtt_cleanup_hw(struct drm_device *dev); -int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); int i915_ppgtt_init_hw(struct drm_device *dev); -int i915_ppgtt_init_ring(struct drm_i915_gem_request *req); void i915_ppgtt_release(struct kref *kref); struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv); @@ -535,7 +536,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) kref_put(&ppgtt->ref, i915_ppgtt_release); } -void i915_check_and_clear_faults(struct drm_device *dev); +void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); void i915_gem_suspend_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev); @@ -560,4 +561,36 @@ size_t i915_ggtt_view_size(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view); +/** + * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture + * @vma: VMA to iomap + * + * The passed in VMA has to be pinned in the global GTT mappable region. + * An extra pinning of the VMA is acquired for the return iomapping, + * the caller must call i915_vma_unpin_iomap to relinquish the pinning + * after the iomapping is no longer required. + * + * Callers must hold the struct_mutex. + * + * Returns a valid iomapped pointer or ERR_PTR. + */ +void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); + +/** + * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap + * @vma: VMA to unpin + * + * Unpins the previously iomapped VMA from i915_vma_pin_iomap(). + * + * Callers must hold the struct_mutex. This function is only valid to be + * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap(). + */ +static inline void i915_vma_unpin_iomap(struct i915_vma *vma) +{ + lockdep_assert_held(&vma->vm->dev->struct_mutex); + GEM_BUG_ON(vma->pin_count == 0); + GEM_BUG_ON(vma->iomap == NULL); + vma->pin_count--; +} + #endif diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 71611bf21fca..7c93327b70fe 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -29,7 +29,7 @@ #include "intel_renderstate.h" static const struct intel_renderstate_rodata * -render_state_get_rodata(struct drm_device *dev, const int gen) +render_state_get_rodata(const int gen) { switch (gen) { case 6: @@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen) return NULL; } -static int render_state_init(struct render_state *so, struct drm_device *dev) +static int render_state_init(struct render_state *so, + struct drm_i915_private *dev_priv) { int ret; - so->gen = INTEL_INFO(dev)->gen; - so->rodata = render_state_get_rodata(dev, so->gen); + so->gen = INTEL_GEN(dev_priv); + so->rodata = render_state_get_rodata(so->gen); if (so->rodata == NULL) return 0; if (so->rodata->batch_items * 4 > 4096) return -EINVAL; - so->obj = i915_gem_alloc_object(dev, 4096); - if (so->obj == NULL) - return -ENOMEM; + so->obj = i915_gem_object_create(dev_priv->dev, 4096); + if (IS_ERR(so->obj)) + return PTR_ERR(so->obj); ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); if (ret) @@ -177,7 +178,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine, if (WARN_ON(engine->id != RCS)) return -ENOENT; - ret = render_state_init(so, engine->dev); + ret = render_state_init(so, engine->i915); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 425e721aac58..538c30499848 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, unsigned long count = 0; trace_i915_gem_shrink(dev_priv, target, flags); - i915_gem_retire_requests(dev_priv->dev); + i915_gem_retire_requests(dev_priv); + + /* + * Unbinding of objects will require HW access; Let us not wake the + * device just to recover a little memory. If absolutely necessary, + * we will force the wake during oom-notifier. + */ + if ((flags & I915_SHRINK_BOUND) && + !intel_runtime_pm_get_if_in_use(dev_priv)) + flags &= ~I915_SHRINK_BOUND; /* * As we may completely rewrite the (un)bound list whilst unbinding @@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, list_splice(&still_in_list, phase->list); } - i915_gem_retire_requests(dev_priv->dev); + if (flags & I915_SHRINK_BOUND) + intel_runtime_pm_put(dev_priv); + + i915_gem_retire_requests(dev_priv); return count; } @@ -345,7 +357,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) return NOTIFY_DONE; + intel_runtime_pm_get(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv); + intel_runtime_pm_put(dev_priv); /* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not @@ -386,17 +400,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr struct drm_i915_private *dev_priv = container_of(nb, struct drm_i915_private, mm.vmap_notifier); struct shrinker_lock_uninterruptible slu; - unsigned long freed_pages; + struct i915_vma *vma, *next; + unsigned long freed_pages = 0; + int ret; if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) return NOTIFY_DONE; - freed_pages = i915_gem_shrink(dev_priv, -1UL, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_ACTIVE | - I915_SHRINK_VMAPS); + /* Force everything onto the inactive lists */ + ret = i915_gpu_idle(dev_priv->dev); + if (ret) + goto out; + + intel_runtime_pm_get(dev_priv); + freed_pages += i915_gem_shrink(dev_priv, -1UL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE | + I915_SHRINK_VMAPS); + intel_runtime_pm_put(dev_priv); + + /* We also want to clear any cached iomaps as they wrap vmap */ + list_for_each_entry_safe(vma, next, + &dev_priv->ggtt.base.inactive_list, vm_link) { + unsigned long count = vma->node.size >> PAGE_SHIFT; + if (vma->iomap && i915_vma_unbind(vma) == 0) + freed_pages += count; + } +out: i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); *(unsigned long *)ptr += freed_pages; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index b7ce963fb8f8..f9253f2b7ba0 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -56,7 +56,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, /* See the comment at the drm_mm_init() call for more about this check. * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ - if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) + if (IS_GEN8(dev_priv) && start < 4096) start = 4096; mutex_lock(&dev_priv->mm.stolen_lock); @@ -109,9 +109,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 3) { u32 bsm; - pci_read_config_dword(dev->pdev, BSM, &bsm); + pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm); - base = bsm & BSM_MASK; + base = bsm & INTEL_BSM_MASK; } else if (IS_I865G(dev)) { u16 toud = 0; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index b9bdb34032cd..a6eb5c47a49c 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) if (INTEL_INFO(obj->base.dev)->gen >= 4) return true; - if (INTEL_INFO(obj->base.dev)->gen == 3) { + if (IS_GEN3(obj->base.dev)) { if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) return false; } else { @@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, */ if (obj->map_and_fenceable && !i915_gem_object_fence_ok(obj, args->tiling_mode)) - ret = i915_gem_object_ggtt_unbind(obj); + ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); if (ret == 0) { if (obj->pages && diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 32d9726e38b1..2314c88323e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) static void i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) { - struct sg_page_iter sg_iter; + struct sgt_iter sgt_iter; + struct page *page; BUG_ON(obj->userptr.work != NULL); __i915_gem_userptr_set_active(obj, false); @@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) i915_gem_gtt_finish_object(obj); - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { - struct page *page = sg_page_iter_page(&sg_iter); - + for_each_sgt_page(page, sgt_iter, obj->pages) { if (obj->dirty) set_page_dirty(page); @@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file return 0; } -int -i915_gem_init_userptr(struct drm_device *dev) +void i915_gem_init_userptr(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); mutex_init(&dev_priv->mm_lock); hash_init(dev_priv->mm_structs); - return 0; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 89725c9efc25..34ff2459ceea 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } - if (INTEL_INFO(dev)->gen == 7) + if (IS_GEN7(dev)) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); for (i = 0; i < ARRAY_SIZE(error->ring); i++) @@ -824,19 +824,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, return error_code; } -static void i915_gem_record_fences(struct drm_device *dev, +static void i915_gem_record_fences(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_i915_private *dev_priv = dev->dev_private; int i; - if (IS_GEN3(dev) || IS_GEN2(dev)) { + if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ(FENCE_REG(i)); - } else if (IS_GEN5(dev) || IS_GEN4(dev)) { + } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); - } else if (INTEL_INFO(dev)->gen >= 6) { + } else if (INTEL_GEN(dev_priv) >= 6) { for (i = 0; i < dev_priv->num_fence_regs; i++) error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); } @@ -851,7 +850,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, struct intel_engine_cs *to; enum intel_engine_id id; - if (!i915_semaphore_is_enabled(dev_priv->dev)) + if (!i915_semaphore_is_enabled(dev_priv)) return; if (!error->semaphore_obj) @@ -893,31 +892,29 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, } } -static void i915_record_ring_state(struct drm_device *dev, +static void i915_record_ring_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, struct intel_engine_cs *engine, struct drm_i915_error_ring *ering) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) gen8_record_semaphore_state(dev_priv, error, engine, ering); else gen6_record_semaphore_state(dev_priv, engine, ering); } - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; } @@ -939,10 +936,10 @@ static void i915_record_ring_state(struct drm_device *dev, ering->tail = I915_READ_TAIL(engine); ering->ctl = I915_READ_CTL(engine); - if (I915_NEED_GFX_HWS(dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { i915_reg_t mmio; - if (IS_GEN7(dev)) { + if (IS_GEN7(dev_priv)) { switch (engine->id) { default: case RCS: @@ -958,7 +955,7 @@ static void i915_record_ring_state(struct drm_device *dev, mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->dev)) { + } else if (IS_GEN6(engine->i915)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -971,18 +968,18 @@ static void i915_record_ring_state(struct drm_device *dev, ering->hangcheck_score = engine->hangcheck.score; ering->hangcheck_action = engine->hangcheck.action; - if (USES_PPGTT(dev)) { + if (USES_PPGTT(dev_priv)) { int i; ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) ering->vm_info.pp_dir_base = I915_READ(RING_PP_DIR_BASE_READ(engine)); - else if (IS_GEN7(dev)) + else if (IS_GEN7(dev_priv)) ering->vm_info.pp_dir_base = I915_READ(RING_PP_DIR_BASE(engine)); - else if (INTEL_INFO(dev)->gen >= 8) + else if (INTEL_GEN(dev_priv) >= 8) for (i = 0; i < 4; i++) { ering->vm_info.pdp[i] = I915_READ(GEN8_RING_PDP_UDW(engine, i)); @@ -998,7 +995,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, struct drm_i915_error_state *error, struct drm_i915_error_ring *ering) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_gem_object *obj; /* Currently render ring is the only HW context user */ @@ -1016,10 +1013,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, } } -static void i915_gem_record_rings(struct drm_device *dev, +static void i915_gem_record_rings(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_request *request; int i, count; @@ -1030,12 +1026,12 @@ static void i915_gem_record_rings(struct drm_device *dev, error->ring[i].pid = -1; - if (engine->dev == NULL) + if (!intel_engine_initialized(engine)) continue; error->ring[i].valid = true; - i915_record_ring_state(dev, error, engine, &error->ring[i]); + i915_record_ring_state(dev_priv, error, engine, &error->ring[i]); request = i915_gem_find_active_request(engine); if (request) { @@ -1301,15 +1297,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); - i915_get_extra_instdone(dev, error->extra_instdone); + i915_get_extra_instdone(dev_priv, error->extra_instdone); } -static void i915_error_capture_msg(struct drm_device *dev, +static void i915_error_capture_msg(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, u32 engine_mask, const char *error_msg) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 ecode; int ring_id = -1, len; @@ -1317,7 +1312,7 @@ static void i915_error_capture_msg(struct drm_device *dev, len = scnprintf(error->error_msg, sizeof(error->error_msg), "GPU HANG: ecode %d:%d:0x%08x", - INTEL_INFO(dev)->gen, ring_id, ecode); + INTEL_GEN(dev_priv), ring_id, ecode); if (ring_id != -1 && error->ring[ring_id].pid != -1) len += scnprintf(error->error_msg + len, @@ -1352,11 +1347,11 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, * out a structure which becomes available in debugfs for user level tools * to pick up. */ -void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, +void i915_capture_error_state(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *error_msg) { static bool warned; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_error_state *error; unsigned long flags; @@ -1372,15 +1367,15 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, i915_capture_gen_state(dev_priv, error); i915_capture_reg_state(dev_priv, error); i915_gem_capture_buffers(dev_priv, error); - i915_gem_record_fences(dev, error); - i915_gem_record_rings(dev, error); + i915_gem_record_fences(dev_priv, error); + i915_gem_record_rings(dev_priv, error); do_gettimeofday(&error->time); - error->overlay = intel_overlay_capture_error_state(dev); - error->display = intel_display_capture_error_state(dev); + error->overlay = intel_overlay_capture_error_state(dev_priv); + error->display = intel_display_capture_error_state(dev_priv); - i915_error_capture_msg(dev, error, engine_mask, error_msg); + i915_error_capture_msg(dev_priv, error, engine_mask, error_msg); DRM_INFO("%s\n", error->error_msg); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); @@ -1400,7 +1395,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); - DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); + DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index); warned = true; } } @@ -1450,17 +1445,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } /* NB: please notice the memset */ -void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) +void i915_get_extra_instdone(struct drm_i915_private *dev_priv, + uint32_t *instdone) { - struct drm_i915_private *dev_priv = dev->dev_private; memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); - if (IS_GEN2(dev) || IS_GEN3(dev)) + if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) instdone[0] = I915_READ(GEN2_INSTDONE); - else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { + else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) { instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[1] = I915_READ(GEN4_INSTDONE1); - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); instdone[1] = I915_READ(GEN7_SC_INSTDONE); instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h index 80786d9f9ad3..cf5a65be4fe0 100644 --- a/drivers/gpu/drm/i915/i915_guc_reg.h +++ b/drivers/gpu/drm/i915/i915_guc_reg.h @@ -67,11 +67,11 @@ #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ #define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) +/* Defines WOPCM space available to GuC firmware */ #define GUC_WOPCM_SIZE _MMIO(0xc050) -#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ - /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ -#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) +#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */ +#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */ #define GEN8_GT_PM_CONFIG _MMIO(0x138140) #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index d40c13fb6643..ac72451c571c 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -158,8 +158,7 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; /* WaRsDisableCoarsePowerGating:skl,bxt */ - if (!intel_enable_rc6(dev) || - NEEDS_WaRsDisableCoarsePowerGating(dev)) + if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev)) data[1] = 0; else /* bit 0 and 1 are for Render and Media domain separately */ @@ -361,10 +360,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc, struct drm_i915_gem_object *client_obj = client->client_obj; struct drm_i915_private *dev_priv = guc_to_i915(guc); struct intel_engine_cs *engine; - struct intel_context *ctx = client->owner; + struct i915_gem_context *ctx = client->owner; struct guc_context_desc desc; struct sg_table *sg; - enum intel_engine_id id; u32 gfx_addr; memset(&desc, 0, sizeof(desc)); @@ -374,10 +372,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.priority = client->priority; desc.db_id = client->doorbell_id; - for_each_engine_id(engine, dev_priv, id) { + for_each_engine(engine, dev_priv) { + struct intel_context *ce = &ctx->engine[engine->id]; struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; struct drm_i915_gem_object *obj; - uint64_t ctx_desc; /* TODO: We have a design issue to be solved here. Only when we * receive the first batch, we know which engine is used by the @@ -386,20 +384,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc, * for now who owns a GuC client. But for future owner of GuC * client, need to make sure lrc is pinned prior to enter here. */ - obj = ctx->engine[id].state; - if (!obj) + if (!ce->state) break; /* XXX: continue? */ - ctx_desc = intel_lr_context_descriptor(ctx, engine); - lrc->context_desc = (u32)ctx_desc; + lrc->context_desc = lower_32_bits(ce->lrc_desc); /* The state page is after PPHWSP */ - gfx_addr = i915_gem_obj_ggtt_offset(obj); + gfx_addr = i915_gem_obj_ggtt_offset(ce->state); lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | (engine->guc_id << GUC_ELC_ENGINE_OFFSET); - obj = ctx->engine[id].ringbuf->obj; + obj = ce->ringbuf->obj; gfx_addr = i915_gem_obj_ggtt_offset(obj); lrc->ring_begin = gfx_addr; @@ -427,7 +423,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.wq_size = client->wq_size; /* - * XXX: Take LRCs from an existing intel_context if this is not an + * XXX: Take LRCs from an existing context if this is not an * IsKMDCreatedContext client */ desc.desc_private = (uintptr_t)client; @@ -451,47 +447,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc, sizeof(desc) * client->ctx_index); } -int i915_guc_wq_check_space(struct i915_guc_client *gc) +/** + * i915_guc_wq_check_space() - check that the GuC can accept a request + * @request: request associated with the commands + * + * Return: 0 if space is available + * -EAGAIN if space is not currently available + * + * This function must be called (and must return 0) before a request + * is submitted to the GuC via i915_guc_submit() below. Once a result + * of 0 has been returned, it remains valid until (but only until) + * the next call to submit(). + * + * This precheck allows the caller to determine in advance that space + * will be available for the next submission before committing resources + * to it, and helps avoid late failures with complicated recovery paths. + */ +int i915_guc_wq_check_space(struct drm_i915_gem_request *request) { + const size_t wqi_size = sizeof(struct guc_wq_item); + struct i915_guc_client *gc = request->i915->guc.execbuf_client; struct guc_process_desc *desc; - u32 size = sizeof(struct guc_wq_item); - int ret = -ETIMEDOUT, timeout_counter = 200; + u32 freespace; - if (!gc) - return 0; + GEM_BUG_ON(gc == NULL); desc = gc->client_base + gc->proc_desc_offset; - while (timeout_counter-- > 0) { - if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { - ret = 0; - break; - } + freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); + if (likely(freespace >= wqi_size)) + return 0; - if (timeout_counter) - usleep_range(1000, 2000); - }; + gc->no_wq_space += 1; - return ret; + return -EAGAIN; } -static int guc_add_workqueue_item(struct i915_guc_client *gc, - struct drm_i915_gem_request *rq) +static void guc_add_workqueue_item(struct i915_guc_client *gc, + struct drm_i915_gem_request *rq) { + /* wqi_len is in DWords, and does not include the one-word header */ + const size_t wqi_size = sizeof(struct guc_wq_item); + const u32 wqi_len = wqi_size/sizeof(u32) - 1; struct guc_process_desc *desc; struct guc_wq_item *wqi; void *base; - u32 tail, wq_len, wq_off, space; + u32 freespace, tail, wq_off, wq_page; desc = gc->client_base + gc->proc_desc_offset; - space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); - if (WARN_ON(space < sizeof(struct guc_wq_item))) - return -ENOSPC; /* shouldn't happen */ - /* postincrement WQ tail for next time */ - wq_off = gc->wq_tail; - gc->wq_tail += sizeof(struct guc_wq_item); - gc->wq_tail &= gc->wq_size - 1; + /* Free space is guaranteed, see i915_guc_wq_check_space() above */ + freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size); + GEM_BUG_ON(freespace < wqi_size); + + /* The GuC firmware wants the tail index in QWords, not bytes */ + tail = rq->tail; + GEM_BUG_ON(tail & 7); + tail >>= 3; + GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we * should not have the case where structure wqi is across page, neither @@ -500,19 +513,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, * XXX: if not the case, we need save data to a temp wqi and copy it to * workqueue buffer dw by dw. */ - WARN_ON(sizeof(struct guc_wq_item) != 16); - WARN_ON(wq_off & 3); + BUILD_BUG_ON(wqi_size != 16); - /* wq starts from the page after doorbell / process_desc */ - base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, - (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT)); + /* postincrement WQ tail for next time */ + wq_off = gc->wq_tail; + gc->wq_tail += wqi_size; + gc->wq_tail &= gc->wq_size - 1; + GEM_BUG_ON(wq_off & (wqi_size - 1)); + + /* WQ starts from the page after doorbell / process_desc */ + wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT; wq_off &= PAGE_SIZE - 1; + base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page)); wqi = (struct guc_wq_item *)((char *)base + wq_off); - /* len does not include the header */ - wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1; + /* Now fill in the 4-word work queue item */ wqi->header = WQ_TYPE_INORDER | - (wq_len << WQ_LEN_SHIFT) | + (wqi_len << WQ_LEN_SHIFT) | (rq->engine->guc_id << WQ_TARGET_SHIFT) | WQ_NO_WCFLUSH_WAIT; @@ -520,48 +537,50 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->engine); - /* The GuC firmware wants the tail index in QWords, not bytes */ - tail = rq->ringbuf->tail >> 3; wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; - wqi->fence_id = 0; /*XXX: what fence to be here */ + wqi->fence_id = rq->seqno; kunmap_atomic(base); - - return 0; } /** * i915_guc_submit() - Submit commands through GuC - * @client: the guc client where commands will go through * @rq: request associated with the commands * - * Return: 0 if succeed + * Return: 0 on success, otherwise an errno. + * (Note: nonzero really shouldn't happen!) + * + * The caller must have already called i915_guc_wq_check_space() above + * with a result of 0 (success) since the last request submission. This + * guarantees that there is space in the work queue for the new request, + * so enqueuing the item cannot fail. + * + * Bad Things Will Happen if the caller violates this protocol e.g. calls + * submit() when check() says there's no space, or calls submit() multiple + * times with no intervening check(). + * + * The only error here arises if the doorbell hardware isn't functioning + * as expected, which really shouln't happen. */ -int i915_guc_submit(struct i915_guc_client *client, - struct drm_i915_gem_request *rq) +int i915_guc_submit(struct drm_i915_gem_request *rq) { - struct intel_guc *guc = client->guc; unsigned int engine_id = rq->engine->guc_id; - int q_ret, b_ret; + struct intel_guc *guc = &rq->i915->guc; + struct i915_guc_client *client = guc->execbuf_client; + int b_ret; - q_ret = guc_add_workqueue_item(client, rq); - if (q_ret == 0) - b_ret = guc_ring_doorbell(client); + guc_add_workqueue_item(client, rq); + b_ret = guc_ring_doorbell(client); client->submissions[engine_id] += 1; - if (q_ret) { - client->q_fail += 1; - client->retcode = q_ret; - } else if (b_ret) { + client->retcode = b_ret; + if (b_ret) client->b_fail += 1; - client->retcode = q_ret = b_ret; - } else { - client->retcode = 0; - } + guc->submissions[engine_id] += 1; guc->last_seqno[engine_id] = rq->seqno; - return q_ret; + return b_ret; } /* @@ -587,8 +606,8 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; - obj = i915_gem_alloc_object(dev, size); - if (!obj) + obj = i915_gem_object_create(dev, size); + if (IS_ERR(obj)) return NULL; if (i915_gem_object_get_pages(obj)) { @@ -678,7 +697,7 @@ static void guc_client_free(struct drm_device *dev, */ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, uint32_t priority, - struct intel_context *ctx) + struct i915_gem_context *ctx) { struct i915_guc_client *client; struct drm_i915_private *dev_priv = dev->dev_private; @@ -916,11 +935,12 @@ int i915_guc_submission_enable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx = dev_priv->kernel_context; struct i915_guc_client *client; /* client for execbuf submission */ - client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx); + client = guc_client_alloc(dev, + GUC_CTX_PRIORITY_KMD_NORMAL, + dev_priv->kernel_context); if (!client) { DRM_ERROR("Failed to create execbuf guc_client\n"); return -ENOMEM; @@ -967,10 +987,10 @@ int intel_guc_suspend(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx; + struct i915_gem_context *ctx; u32 data[3]; - if (!i915.enable_guc_submission) + if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS) return 0; ctx = dev_priv->kernel_context; @@ -993,10 +1013,10 @@ int intel_guc_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc *guc = &dev_priv->guc; - struct intel_context *ctx; + struct i915_gem_context *ctx; u32 data[3]; - if (!i915.enable_guc_submission) + if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS) return 0; ctx = dev_priv->kernel_context; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2f6fd33c07ba..5c7378374ae6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) __gen6_disable_pm_irq(dev_priv, mask); } -void gen6_reset_rps_interrupts(struct drm_device *dev) +void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; i915_reg_t reg = gen6_pm_iir(dev_priv); spin_lock_irq(&dev_priv->irq_lock); @@ -349,10 +348,8 @@ void gen6_reset_rps_interrupts(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); } -void gen6_enable_rps_interrupts(struct drm_device *dev) +void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - spin_lock_irq(&dev_priv->irq_lock); WARN_ON(dev_priv->rps.pm_iir); @@ -367,25 +364,11 @@ void gen6_enable_rps_interrupts(struct drm_device *dev) u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) { - /* - * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer - * if GEN6_PM_UP_EI_EXPIRED is masked. - * - * TODO: verify if this can be reproduced on VLV,CHV. - */ - if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) - mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; - - if (INTEL_INFO(dev_priv)->gen >= 8) - mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; - - return mask; + return (mask & ~dev_priv->rps.pm_intr_keep); } -void gen6_disable_rps_interrupts(struct drm_device *dev) +void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - spin_lock_irq(&dev_priv->irq_lock); dev_priv->rps.interrupts_enabled = false; spin_unlock_irq(&dev_priv->irq_lock); @@ -402,7 +385,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); - synchronize_irq(dev->irq); + synchronize_irq(dev_priv->dev->irq); } /** @@ -607,17 +590,15 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion * @dev: drm device */ -static void i915_enable_asle_pipestat(struct drm_device *dev) +static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) + if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) return; spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) i915_enable_pipestat(dev_priv, PIPE_A, PIPE_LEGACY_BLC_EVENT_STATUS); @@ -750,7 +731,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) if (mode->flags & DRM_MODE_FLAG_INTERLACE) vtotal /= 2; - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; else position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; @@ -767,7 +748,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) * problem. We may need to extend this to include other platforms, * but so far testing only shows the problem on HSW. */ - if (HAS_DDI(dev) && !position) { + if (HAS_DDI(dev_priv) && !position) { int i, temp; for (i = 0; i < 100; i++) { @@ -835,7 +816,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, if (stime) *stime = ktime_get(); - if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ @@ -897,7 +878,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, else position += vtotal - vbl_end; - if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { *vpos = position; *hpos = 0; } else { @@ -955,9 +936,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, &crtc->hwmode); } -static void ironlake_rps_change_irq_handler(struct drm_device *dev) +static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; u8 new_delay; @@ -986,7 +966,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) new_delay = dev_priv->ips.min_delay; } - if (ironlake_set_drps(dev, new_delay)) + if (ironlake_set_drps(dev_priv, new_delay)) dev_priv->ips.cur_delay = new_delay; spin_unlock(&mchdev_lock); @@ -1175,7 +1155,7 @@ static void gen6_pm_rps_work(struct work_struct *work) new_delay += adj; new_delay = clamp_t(int, new_delay, min, max); - intel_set_rps(dev_priv->dev, new_delay); + intel_set_rps(dev_priv, new_delay); mutex_unlock(&dev_priv->rps.hw_lock); out: @@ -1506,27 +1486,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, } -static void gmbus_irq_handler(struct drm_device *dev) +static void gmbus_irq_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - wake_up_all(&dev_priv->gmbus_wait_queue); } -static void dp_aux_irq_handler(struct drm_device *dev) +static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - wake_up_all(&dev_priv->gmbus_wait_queue); } #if defined(CONFIG_DEBUG_FS) -static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, +static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe, uint32_t crc0, uint32_t crc1, uint32_t crc2, uint32_t crc3, uint32_t crc4) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_pipe_crc_entry *entry; int head, tail; @@ -1550,7 +1526,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, entry = &pipe_crc->entries[head]; - entry->frame = dev->driver->get_vblank_counter(dev, pipe); + entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev, + pipe); entry->crc[0] = crc0; entry->crc[1] = crc1; entry->crc[2] = crc2; @@ -1566,27 +1543,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, } #else static inline void -display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, +display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe, uint32_t crc0, uint32_t crc1, uint32_t crc2, uint32_t crc3, uint32_t crc4) {} #endif -static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; - - display_pipe_crc_irq_handler(dev, pipe, + display_pipe_crc_irq_handler(dev_priv, pipe, I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 0, 0, 0, 0); } -static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; - - display_pipe_crc_irq_handler(dev, pipe, + display_pipe_crc_irq_handler(dev_priv, pipe, I915_READ(PIPE_CRC_RES_1_IVB(pipe)), I915_READ(PIPE_CRC_RES_2_IVB(pipe)), I915_READ(PIPE_CRC_RES_3_IVB(pipe)), @@ -1594,22 +1570,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) I915_READ(PIPE_CRC_RES_5_IVB(pipe))); } -static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) +static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, + enum pipe pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t res1, res2; - if (INTEL_INFO(dev)->gen >= 3) + if (INTEL_GEN(dev_priv) >= 3) res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); else res1 = 0; - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); else res2 = 0; - display_pipe_crc_irq_handler(dev, pipe, + display_pipe_crc_irq_handler(dev_priv, pipe, I915_READ(PIPE_CRC_RES_RED(pipe)), I915_READ(PIPE_CRC_RES_GREEN(pipe)), I915_READ(PIPE_CRC_RES_BLUE(pipe)), @@ -1643,18 +1619,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) } } -static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) +static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, + enum pipe pipe) { - if (!drm_handle_vblank(dev, pipe)) - return false; + bool ret; - return true; + ret = drm_handle_vblank(dev_priv->dev, pipe); + if (ret) + intel_finish_page_flip_mmio(dev_priv, pipe); + + return ret; } -static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, - u32 pipe_stats[I915_MAX_PIPES]) +static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, + u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct drm_i915_private *dev_priv = dev->dev_private; int pipe; spin_lock(&dev_priv->irq_lock); @@ -1710,31 +1689,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, spin_unlock(&dev_priv->irq_lock); } -static void valleyview_pipestat_irq_handler(struct drm_device *dev, +static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 pipe_stats[I915_MAX_PIPES]) { - struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe; for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); - if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip(dev, pipe); - } + if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) + intel_finish_page_flip_cs(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); } static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) @@ -1747,12 +1723,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) return hotplug_status; } -static void i9xx_hpd_irq_handler(struct drm_device *dev, +static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) { u32 pin_mask = 0, long_mask = 0; - if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv)) { u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; if (hotplug_trigger) { @@ -1760,11 +1737,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev, hotplug_trigger, hpd_status_g4x, i9xx_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); } else { u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; @@ -1772,7 +1749,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev, intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, hotplug_trigger, hpd_status_i915, i9xx_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } } } @@ -1831,7 +1808,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in iir */ - valleyview_pipestat_irq_ack(dev, iir, pipe_stats); + valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); /* * VLV_IIR is single buffered, and reflects the level @@ -1850,9 +1827,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) gen6_rps_irq_handler(dev_priv, pm_iir); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); - valleyview_pipestat_irq_handler(dev, pipe_stats); + valleyview_pipestat_irq_handler(dev_priv, pipe_stats); } while (0); enable_rpm_wakeref_asserts(dev_priv); @@ -1911,7 +1888,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in iir */ - valleyview_pipestat_irq_ack(dev, iir, pipe_stats); + valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); /* * VLV_IIR is single buffered, and reflects the level @@ -1927,9 +1904,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) gen8_gt_irq_handler(dev_priv, gt_iir); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); - valleyview_pipestat_irq_handler(dev, pipe_stats); + valleyview_pipestat_irq_handler(dev_priv, pipe_stats); } while (0); enable_rpm_wakeref_asserts(dev_priv); @@ -1937,10 +1914,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) return ret; } -static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, +static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; /* @@ -1966,16 +1943,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, dig_hotplug_reg, hpd, pch_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } -static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) +static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; - ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); + ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -1985,10 +1961,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) } if (pch_iir & SDE_AUX_MASK) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (pch_iir & SDE_GMBUS) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); if (pch_iir & SDE_AUDIO_HDCP_MASK) DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); @@ -2018,9 +1994,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); } -static void ivb_err_int_handler(struct drm_device *dev) +static void ivb_err_int_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 err_int = I915_READ(GEN7_ERR_INT); enum pipe pipe; @@ -2032,19 +2007,18 @@ static void ivb_err_int_handler(struct drm_device *dev) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { - if (IS_IVYBRIDGE(dev)) - ivb_pipe_crc_irq_handler(dev, pipe); + if (IS_IVYBRIDGE(dev_priv)) + ivb_pipe_crc_irq_handler(dev_priv, pipe); else - hsw_pipe_crc_irq_handler(dev, pipe); + hsw_pipe_crc_irq_handler(dev_priv, pipe); } } I915_WRITE(GEN7_ERR_INT, err_int); } -static void cpt_serr_int_handler(struct drm_device *dev) +static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 serr_int = I915_READ(SERR_INT); if (serr_int & SERR_INT_POISON) @@ -2062,13 +2036,12 @@ static void cpt_serr_int_handler(struct drm_device *dev) I915_WRITE(SERR_INT, serr_int); } -static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) +static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; int pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; - ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); + ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> @@ -2078,10 +2051,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) } if (pch_iir & SDE_AUX_MASK_CPT) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (pch_iir & SDE_GMBUS_CPT) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); if (pch_iir & SDE_AUDIO_CP_REQ_CPT) DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); @@ -2096,12 +2069,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) I915_READ(FDI_RX_IIR(pipe))); if (pch_iir & SDE_ERROR_CPT) - cpt_serr_int_handler(dev); + cpt_serr_int_handler(dev_priv); } -static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) +static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & ~SDE_PORTE_HOTPLUG_SPT; u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; @@ -2130,16 +2102,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) } if (pin_mask) - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_CPT) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); } -static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, +static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); @@ -2149,97 +2121,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, dig_hotplug_reg, hpd, ilk_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } -static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) +static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, + u32 de_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; if (hotplug_trigger) - ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); + ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); if (de_iir & DE_AUX_CHANNEL_A) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (de_iir & DE_GSE) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); if (de_iir & DE_POISON) DRM_ERROR("Poison interrupt\n"); for_each_pipe(dev_priv, pipe) { if (de_iir & DE_PIPE_VBLANK(pipe) && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); if (de_iir & DE_PIPE_CRC_DONE(pipe)) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); /* plane/pipes map 1:1 on ilk+ */ - if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (de_iir & DE_PLANE_FLIP_DONE(pipe)) + intel_finish_page_flip_cs(dev_priv, pipe); } /* check event from PCH */ if (de_iir & DE_PCH_EVENT) { u32 pch_iir = I915_READ(SDEIIR); - if (HAS_PCH_CPT(dev)) - cpt_irq_handler(dev, pch_iir); + if (HAS_PCH_CPT(dev_priv)) + cpt_irq_handler(dev_priv, pch_iir); else - ibx_irq_handler(dev, pch_iir); + ibx_irq_handler(dev_priv, pch_iir); /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); } - if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) - ironlake_rps_change_irq_handler(dev); + if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) + ironlake_rps_change_irq_handler(dev_priv); } -static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) +static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, + u32 de_iir) { - struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; if (hotplug_trigger) - ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); + ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); if (de_iir & DE_ERR_INT_IVB) - ivb_err_int_handler(dev); + ivb_err_int_handler(dev_priv); if (de_iir & DE_AUX_CHANNEL_A_IVB) - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); if (de_iir & DE_GSE_IVB) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); for_each_pipe(dev_priv, pipe) { if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); /* plane/pipes map 1:1 on ilk+ */ - if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) + intel_finish_page_flip_cs(dev_priv, pipe); } /* check event from PCH */ - if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { + if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { u32 pch_iir = I915_READ(SDEIIR); - cpt_irq_handler(dev, pch_iir); + cpt_irq_handler(dev_priv, pch_iir); /* clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); @@ -2277,7 +2245,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) * able to process them after we restore SDEIER (as soon as we restore * it, we'll get an interrupt if SDEIIR still has something to process * due to its back queue). */ - if (!HAS_PCH_NOP(dev)) { + if (!HAS_PCH_NOP(dev_priv)) { sde_ier = I915_READ(SDEIER); I915_WRITE(SDEIER, 0); POSTING_READ(SDEIER); @@ -2289,7 +2257,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (gt_iir) { I915_WRITE(GTIIR, gt_iir); ret = IRQ_HANDLED; - if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_GEN(dev_priv) >= 6) snb_gt_irq_handler(dev_priv, gt_iir); else ilk_gt_irq_handler(dev_priv, gt_iir); @@ -2299,13 +2267,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (de_iir) { I915_WRITE(DEIIR, de_iir); ret = IRQ_HANDLED; - if (INTEL_INFO(dev)->gen >= 7) - ivb_display_irq_handler(dev, de_iir); + if (INTEL_GEN(dev_priv) >= 7) + ivb_display_irq_handler(dev_priv, de_iir); else - ilk_display_irq_handler(dev, de_iir); + ilk_display_irq_handler(dev_priv, de_iir); } - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { u32 pm_iir = I915_READ(GEN6_PMIIR); if (pm_iir) { I915_WRITE(GEN6_PMIIR, pm_iir); @@ -2316,7 +2284,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); - if (!HAS_PCH_NOP(dev)) { + if (!HAS_PCH_NOP(dev_priv)) { I915_WRITE(SDEIER, sde_ier); POSTING_READ(SDEIER); } @@ -2327,10 +2295,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) return ret; } -static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, +static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, + u32 hotplug_trigger, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); @@ -2340,13 +2308,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, dig_hotplug_reg, hpd, bxt_port_hotplug_long_detect); - intel_hpd_irq_handler(dev, pin_mask, long_mask); + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); } static irqreturn_t gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { - struct drm_device *dev = dev_priv->dev; irqreturn_t ret = IRQ_NONE; u32 iir; enum pipe pipe; @@ -2357,7 +2324,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(GEN8_DE_MISC_IIR, iir); ret = IRQ_HANDLED; if (iir & GEN8_DE_MISC_GSE) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); else DRM_ERROR("Unexpected DE Misc interrupt\n"); } @@ -2381,26 +2348,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) GEN9_AUX_CHANNEL_D; if (iir & tmp_mask) { - dp_aux_irq_handler(dev); + dp_aux_irq_handler(dev_priv); found = true; } if (IS_BROXTON(dev_priv)) { tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; if (tmp_mask) { - bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); + bxt_hpd_irq_handler(dev_priv, tmp_mask, + hpd_bxt); found = true; } } else if (IS_BROADWELL(dev_priv)) { tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; if (tmp_mask) { - ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); + ilk_hpd_irq_handler(dev_priv, + tmp_mask, hpd_bdw); found = true; } } - if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { - gmbus_irq_handler(dev); + if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { + gmbus_irq_handler(dev_priv); found = true; } @@ -2427,8 +2396,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); if (iir & GEN8_PIPE_VBLANK && - intel_pipe_handle_vblank(dev, pipe)) - intel_check_page_flip(dev, pipe); + intel_pipe_handle_vblank(dev_priv, pipe)) + intel_check_page_flip(dev_priv, pipe); flip_done = iir; if (INTEL_INFO(dev_priv)->gen >= 9) @@ -2436,13 +2405,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) else flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; - if (flip_done) { - intel_prepare_page_flip(dev, pipe); - intel_finish_page_flip_plane(dev, pipe); - } + if (flip_done) + intel_finish_page_flip_cs(dev_priv, pipe); if (iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev, pipe); + hsw_pipe_crc_irq_handler(dev_priv, pipe); if (iir & GEN8_PIPE_FIFO_UNDERRUN) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); @@ -2459,7 +2426,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) fault_errors); } - if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && + if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && master_ctl & GEN8_DE_PCH_IRQ) { /* * FIXME(BDW): Assume for now that the new interrupt handling @@ -2472,9 +2439,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ret = IRQ_HANDLED; if (HAS_PCH_SPT(dev_priv)) - spt_irq_handler(dev, iir); + spt_irq_handler(dev_priv, iir); else - cpt_irq_handler(dev, iir); + cpt_irq_handler(dev_priv, iir); } else { /* * Like on previous PCH there seems to be something @@ -2555,15 +2522,15 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, * Fire an error uevent so userspace can see that a hang or error * was detected. */ -static void i915_reset_and_wakeup(struct drm_device *dev) +static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; int ret; - kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); + kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); /* * Note that there's only one work item which does gpu resets, so we @@ -2577,8 +2544,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) */ if (i915_reset_in_progress(&dev_priv->gpu_error)) { DRM_DEBUG_DRIVER("resetting chip\n"); - kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, - reset_event); + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); /* * In most cases it's guaranteed that we get here with an RPM @@ -2589,7 +2555,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev) */ intel_runtime_pm_get(dev_priv); - intel_prepare_reset(dev); + intel_prepare_reset(dev_priv); /* * All state reset _must_ be completed before we update the @@ -2597,14 +2563,14 @@ static void i915_reset_and_wakeup(struct drm_device *dev) * pending state and not properly drop locks, resulting in * deadlocks with the reset work. */ - ret = i915_reset(dev); + ret = i915_reset(dev_priv); - intel_finish_reset(dev); + intel_finish_reset(dev_priv); intel_runtime_pm_put(dev_priv); if (ret == 0) - kobject_uevent_env(&dev->primary->kdev->kobj, + kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); /* @@ -2615,9 +2581,8 @@ static void i915_reset_and_wakeup(struct drm_device *dev) } } -static void i915_report_and_clear_eir(struct drm_device *dev) +static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t instdone[I915_NUM_INSTDONE_REG]; u32 eir = I915_READ(EIR); int pipe, i; @@ -2627,9 +2592,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev) pr_err("render error detected, EIR: 0x%08x\n", eir); - i915_get_extra_instdone(dev, instdone); + i915_get_extra_instdone(dev_priv, instdone); - if (IS_G4X(dev)) { + if (IS_G4X(dev_priv)) { if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { u32 ipeir = I915_READ(IPEIR_I965); @@ -2651,7 +2616,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) } } - if (!IS_GEN2(dev)) { + if (!IS_GEN2(dev_priv)) { if (eir & I915_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); pr_err("page table error\n"); @@ -2673,7 +2638,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); for (i = 0; i < ARRAY_SIZE(instdone); i++) pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { u32 ipeir = I915_READ(IPEIR); pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); @@ -2717,10 +2682,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev) * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -void i915_handle_error(struct drm_device *dev, u32 engine_mask, +void i915_handle_error(struct drm_i915_private *dev_priv, + u32 engine_mask, const char *fmt, ...) { - struct drm_i915_private *dev_priv = dev->dev_private; va_list args; char error_msg[80]; @@ -2728,8 +2693,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask, vscnprintf(error_msg, sizeof(error_msg), fmt, args); va_end(args); - i915_capture_error_state(dev, engine_mask, error_msg); - i915_report_and_clear_eir(dev); + i915_capture_error_state(dev_priv, engine_mask, error_msg); + i915_report_and_clear_eir(dev_priv); if (engine_mask) { atomic_or(I915_RESET_IN_PROGRESS_FLAG, @@ -2751,7 +2716,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask, i915_error_wake_up(dev_priv, false); } - i915_reset_and_wakeup(dev); + i915_reset_and_wakeup(dev_priv); } /* Called from drm generic code, passed 'crtc' which @@ -2869,9 +2834,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno) } static bool -ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) +ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr) { - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { return (ipehr >> 23) == 0x1c; } else { ipehr &= ~MI_SEMAPHORE_SYNC_MASK; @@ -2884,10 +2849,10 @@ static struct intel_engine_cs * semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, u64 offset) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_engine_cs *signaller; - if (INTEL_INFO(dev_priv)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { for_each_engine(signaller, dev_priv) { if (engine == signaller) continue; @@ -2916,7 +2881,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, static struct intel_engine_cs * semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 cmd, ipehr, head; u64 offset = 0; int i, backwards; @@ -2942,7 +2907,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); - if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) + if (!ipehr_is_semaphore_wait(engine->i915, ipehr)) return NULL; /* @@ -2954,7 +2919,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) * ringbuffer itself. */ head = I915_READ_HEAD(engine) & HEAD_ADDR; - backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; + backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4; for (i = backwards; i; --i) { /* @@ -2976,7 +2941,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { offset = ioread32(engine->buffer->virtual_start + head + 12); offset <<= 32; offset = ioread32(engine->buffer->virtual_start + head + 8); @@ -2986,7 +2951,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) static int semaphore_passed(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_engine_cs *signaller; u32 seqno; @@ -3028,7 +2993,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) if (engine->id != RCS) return true; - i915_get_extra_instdone(engine->dev, instdone); + i915_get_extra_instdone(engine->i915, instdone); /* There might be unstable subunit states even when * actual head is not moving. Filter out the unstable ones by @@ -3069,8 +3034,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) static enum intel_ring_hangcheck_action ring_stuck(struct intel_engine_cs *engine, u64 acthd) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; enum intel_ring_hangcheck_action ha; u32 tmp; @@ -3078,7 +3042,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) if (ha != HANGCHECK_HUNG) return ha; - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return HANGCHECK_HUNG; /* Is the chip hanging on a WAIT_FOR_EVENT? @@ -3088,19 +3052,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) */ tmp = I915_READ_CTL(engine); if (tmp & RING_WAIT) { - i915_handle_error(dev, 0, + i915_handle_error(dev_priv, 0, "Kicking stuck wait on %s", engine->name); I915_WRITE_CTL(engine, tmp); return HANGCHECK_KICK; } - if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { + if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { switch (semaphore_passed(engine)) { default: return HANGCHECK_HUNG; case 1: - i915_handle_error(dev, 0, + i915_handle_error(dev_priv, 0, "Kicking stuck semaphore on %s", engine->name); I915_WRITE_CTL(engine, tmp); @@ -3115,7 +3079,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) static unsigned kick_waiters(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = to_i915(engine->dev); + struct drm_i915_private *i915 = engine->i915; unsigned user_interrupts = READ_ONCE(engine->user_interrupts); if (engine->hangcheck.user_interrupts == user_interrupts && @@ -3144,7 +3108,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), gpu_error.hangcheck_work.work); - struct drm_device *dev = dev_priv->dev; struct intel_engine_cs *engine; enum intel_engine_id id; int busy_count = 0, rings_hung = 0; @@ -3272,22 +3235,22 @@ static void i915_hangcheck_elapsed(struct work_struct *work) } if (rings_hung) { - i915_handle_error(dev, rings_hung, "Engine(s) hung"); + i915_handle_error(dev_priv, rings_hung, "Engine(s) hung"); goto out; } if (busy_count) /* Reset timer case chip hangs without another request * being added */ - i915_queue_hangcheck(dev); + i915_queue_hangcheck(dev_priv); out: ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); } -void i915_queue_hangcheck(struct drm_device *dev) +void i915_queue_hangcheck(struct drm_i915_private *dev_priv) { - struct i915_gpu_error *e = &to_i915(dev)->gpu_error; + struct i915_gpu_error *e = &dev_priv->gpu_error; if (!i915.enable_hangcheck) return; @@ -3500,31 +3463,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); } -static u32 intel_hpd_enabled_irqs(struct drm_device *dev, +static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, const u32 hpd[HPD_NUM_PINS]) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; u32 enabled_irqs = 0; - for_each_intel_encoder(dev, encoder) + for_each_intel_encoder(dev_priv->dev, encoder) if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) enabled_irqs |= hpd[encoder->hpd_pin]; return enabled_irqs; } -static void ibx_hpd_irq_setup(struct drm_device *dev) +static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; - if (HAS_PCH_IBX(dev)) { + if (HAS_PCH_IBX(dev_priv)) { hotplug_irqs = SDE_HOTPLUG_MASK; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); } else { hotplug_irqs = SDE_HOTPLUG_MASK_CPT; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); } ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3543,18 +3504,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev) * When CPU and PCH are on the same package, port A * HPD must be enabled in both north and south. */ - if (HAS_PCH_LPT_LP(dev)) + if (HAS_PCH_LPT_LP(dev_priv)) hotplug |= PORTA_HOTPLUG_ENABLE; I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } -static void spt_hpd_irq_setup(struct drm_device *dev) +static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; hotplug_irqs = SDE_HOTPLUG_MASK_SPT; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3569,24 +3529,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev) I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); } -static void ilk_hpd_irq_setup(struct drm_device *dev) +static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { hotplug_irqs = DE_DP_A_HOTPLUG_IVB; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); } else { hotplug_irqs = DE_DP_A_HOTPLUG; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); } @@ -3601,15 +3560,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev) hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); - ibx_hpd_irq_setup(dev); + ibx_hpd_irq_setup(dev_priv); } -static void bxt_hpd_irq_setup(struct drm_device *dev) +static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_irqs, hotplug, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); @@ -3827,6 +3785,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) uint32_t de_pipe_enables; u32 de_port_masked = GEN8_AUX_CHANNEL_A; u32 de_port_enables; + u32 de_misc_masked = GEN8_DE_MISC_GSE; enum pipe pipe; if (INTEL_INFO(dev_priv)->gen >= 9) { @@ -3862,6 +3821,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) de_pipe_enables); GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); + GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); } static int gen8_irq_postinstall(struct drm_device *dev) @@ -4006,13 +3966,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev) /* * Returns true when a page flip has completed. */ -static bool i8xx_handle_vblank(struct drm_device *dev, +static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, int plane, int pipe, u32 iir) { - struct drm_i915_private *dev_priv = dev->dev_private; u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); - if (!intel_pipe_handle_vblank(dev, pipe)) + if (!intel_pipe_handle_vblank(dev_priv, pipe)) return false; if ((iir & flip_pending) == 0) @@ -4027,12 +3986,11 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if (I915_READ16(ISR) & flip_pending) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - intel_finish_page_flip(dev, pipe); + intel_finish_page_flip_cs(dev_priv, pipe); return true; check_page_flip: - intel_check_page_flip(dev, pipe); + intel_check_page_flip(dev_priv, pipe); return false; } @@ -4089,15 +4047,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) for_each_pipe(dev_priv, pipe) { int plane = pipe; - if (HAS_FBC(dev)) + if (HAS_FBC(dev_priv)) plane = !plane; if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && - i8xx_handle_vblank(dev, plane, pipe, iir)) + i8xx_handle_vblank(dev_priv, plane, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, @@ -4182,7 +4140,7 @@ static int i915_irq_postinstall(struct drm_device *dev) I915_WRITE(IER, enable_mask); POSTING_READ(IER); - i915_enable_asle_pipestat(dev); + i915_enable_asle_pipestat(dev_priv); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ @@ -4197,13 +4155,12 @@ static int i915_irq_postinstall(struct drm_device *dev) /* * Returns true when a page flip has completed. */ -static bool i915_handle_vblank(struct drm_device *dev, +static bool i915_handle_vblank(struct drm_i915_private *dev_priv, int plane, int pipe, u32 iir) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); - if (!intel_pipe_handle_vblank(dev, pipe)) + if (!intel_pipe_handle_vblank(dev_priv, pipe)) return false; if ((iir & flip_pending) == 0) @@ -4218,12 +4175,11 @@ static bool i915_handle_vblank(struct drm_device *dev, if (I915_READ(ISR) & flip_pending) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - intel_finish_page_flip(dev, pipe); + intel_finish_page_flip_cs(dev_priv, pipe); return true; check_page_flip: - intel_check_page_flip(dev, pipe); + intel_check_page_flip(dev_priv, pipe); return false; } @@ -4273,11 +4229,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) break; /* Consume port. Then clear IIR or we'll miss events */ - if (I915_HAS_HOTPLUG(dev) && + if (I915_HAS_HOTPLUG(dev_priv) && iir & I915_DISPLAY_PORT_INTERRUPT) { u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); } I915_WRITE(IIR, iir & ~flip_mask); @@ -4288,18 +4244,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) for_each_pipe(dev_priv, pipe) { int plane = pipe; - if (HAS_FBC(dev)) + if (HAS_FBC(dev_priv)) plane = !plane; if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && - i915_handle_vblank(dev, plane, pipe, iir)) + i915_handle_vblank(dev_priv, plane, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, @@ -4307,7 +4263,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) } if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); /* With MSI, interrupts are only generated when iir * transitions from zero to nonzero. If another bit got @@ -4391,7 +4347,7 @@ static int i965_irq_postinstall(struct drm_device *dev) I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); enable_mask |= I915_USER_INTERRUPT; - if (IS_G4X(dev)) + if (IS_G4X(dev_priv)) enable_mask |= I915_BSD_USER_INTERRUPT; /* Interrupt setup is already guaranteed to be single-threaded, this is @@ -4406,7 +4362,7 @@ static int i965_irq_postinstall(struct drm_device *dev) * Enable some error detection, note the instruction error mask * bit is reserved, so we leave it masked. */ - if (IS_G4X(dev)) { + if (IS_G4X(dev_priv)) { error_mask = ~(GM45_ERROR_PAGE_TABLE | GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV | @@ -4424,26 +4380,25 @@ static int i965_irq_postinstall(struct drm_device *dev) i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); POSTING_READ(PORT_HOTPLUG_EN); - i915_enable_asle_pipestat(dev); + i915_enable_asle_pipestat(dev_priv); return 0; } -static void i915_hpd_irq_setup(struct drm_device *dev) +static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 hotplug_en; assert_spin_locked(&dev_priv->irq_lock); /* Note HDMI and DP share hotplug bits */ /* enable bits are the same for all generations */ - hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); + hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); /* Programming the CRT detection parameters tends to generate a spurious hotplug event about three seconds later. So just do it once. */ - if (IS_G4X(dev)) + if (IS_G4X(dev_priv)) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; @@ -4510,7 +4465,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (iir & I915_DISPLAY_PORT_INTERRUPT) { u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); if (hotplug_status) - i9xx_hpd_irq_handler(dev, hotplug_status); + i9xx_hpd_irq_handler(dev_priv, hotplug_status); } I915_WRITE(IIR, iir & ~flip_mask); @@ -4523,24 +4478,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && - i915_handle_vblank(dev, pipe, pipe, iir)) + i915_handle_vblank(dev_priv, pipe, pipe, iir)) flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev, pipe); + i9xx_pipe_crc_irq_handler(dev_priv, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); } if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev); + intel_opregion_asle_intr(dev_priv); if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev); + gmbus_irq_handler(dev_priv); /* With MSI, interrupts are only generated when iir * transitions from zero to nonzero. If another bit got @@ -4611,6 +4566,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv) else dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; + dev_priv->rps.pm_intr_keep = 0; + + /* + * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + * + * TODO: verify if this can be reproduced on VLV,CHV. + */ + if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) + dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED; + + if (INTEL_INFO(dev_priv)->gen >= 8) + dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP; + INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, i915_hangcheck_elapsed); @@ -4674,12 +4643,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->disable_vblank = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else { - if (INTEL_INFO(dev_priv)->gen == 2) { + if (IS_GEN2(dev_priv)) { dev->driver->irq_preinstall = i8xx_irq_preinstall; dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_uninstall; - } else if (INTEL_INFO(dev_priv)->gen == 3) { + } else if (IS_GEN3(dev_priv)) { dev->driver->irq_preinstall = i915_irq_preinstall; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_uninstall; diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 1779f02e6df8..5e18cf9f754d 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -54,10 +54,12 @@ struct i915_params i915 __read_mostly = { .verbose_state_checks = 1, .nuclear_pageflip = 0, .edp_vswing = 0, - .enable_guc_submission = false, + .enable_guc_loading = 0, + .enable_guc_submission = 0, .guc_log_level = -1, .enable_dp_mst = true, .inject_load_failure = 0, + .enable_dpcd_backlight = false, }; module_param_named(modeset, i915.modeset, int, 0400); @@ -197,8 +199,15 @@ MODULE_PARM_DESC(edp_vswing, "(0=use value from vbt [default], 1=low power swing(200mV)," "2=default swing(400mV))"); -module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400); -MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)"); +module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); +MODULE_PARM_DESC(enable_guc_loading, + "Enable GuC firmware loading " + "(-1=auto, 0=never [default], 1=if available, 2=required)"); + +module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); +MODULE_PARM_DESC(enable_guc_submission, + "Enable GuC submission " + "(-1=auto, 0=never [default], 1=if available, 2=required)"); module_param_named(guc_log_level, i915.guc_log_level, int, 0400); MODULE_PARM_DESC(guc_log_level, @@ -210,3 +219,6 @@ MODULE_PARM_DESC(enable_dp_mst, module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); MODULE_PARM_DESC(inject_load_failure, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); +module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); +MODULE_PARM_DESC(enable_dpcd_backlight, + "Enable support for DPCD backlight control (default:false)"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 02bc27804291..1323261a0cdd 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -45,6 +45,8 @@ struct i915_params { int enable_ips; int invert_brightness; int enable_cmd_parser; + int enable_guc_loading; + int enable_guc_submission; int guc_log_level; int use_mmio_flip; int mmio_debug; @@ -57,10 +59,10 @@ struct i915_params { bool load_detect_test; bool reset; bool disable_display; - bool enable_guc_submission; bool verbose_state_checks; bool nuclear_pageflip; bool enable_dp_mst; + bool enable_dpcd_backlight; }; extern struct i915_params i915 __read_mostly; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b407411e31ba..dfb4c7a88de3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -886,7 +886,7 @@ enum skl_disp_power_wells { * PLLs can be routed to any transcoder A/B/C. * * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is - * digital port D (CHV) or port A (BXT). + * digital port D (CHV) or port A (BXT). :: * * * Dual channel PHY (VLV/CHV/BXT) @@ -2449,6 +2449,8 @@ enum skl_disp_power_wells { #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 +#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024) + #define _FPA0 0x6040 #define _FPA1 0x6044 #define _FPB0 0x6048 @@ -6031,6 +6033,7 @@ enum skl_disp_power_wells { #define CHICKEN_PAR1_1 _MMIO(0x42080) #define DPA_MASK_VBLANK_SRD (1 << 15) #define FORCE_ARB_IDLE_PLANES (1 << 14) +#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 @@ -6089,7 +6092,14 @@ enum skl_disp_power_wells { #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 #define GEN8_L3SQCREG1 _MMIO(0xB100) -#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 +/* + * Note that on CHV the following has an off-by-one error wrt. to BSpec. + * Using the formula in BSpec leads to a hang, while the formula here works + * fine and matches the formulas for all other platforms. A BSpec change + * request has been filed to clarify this. + */ +#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) +#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) #define GEN7_L3CNTLREG1 _MMIO(0xB01C) #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C @@ -7021,7 +7031,7 @@ enum skl_disp_power_wells { #define VLV_RCEDATA _MMIO(0xA0BC) #define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) #define GEN6_PMINTRMSK _MMIO(0xA168) -#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) +#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) #define VLV_PWRDWNUPCTL _MMIO(0xA294) #define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) #define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) @@ -7557,14 +7567,15 @@ enum skl_disp_power_wells { #define CDCLK_FREQ_540 (1<<26) #define CDCLK_FREQ_337_308 (2<<26) #define CDCLK_FREQ_675_617 (3<<26) -#define CDCLK_FREQ_DECIMAL_MASK (0x7ff) - #define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) #define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) +#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) +#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) +#define CDCLK_FREQ_DECIMAL_MASK (0x7ff) /* LCPLL_CTL */ #define LCPLL1_CTL _MMIO(0x46010) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 2d576b7ff299..02507bfc8def 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -43,7 +43,7 @@ static u32 calc_residency(struct drm_device *dev, u64 units = 128ULL, div = 100000ULL; u32 ret; - if (!intel_enable_rc6(dev)) + if (!intel_enable_rc6()) return 0; intel_runtime_pm_get(dev_priv); @@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev, static ssize_t show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) { - struct drm_minor *dminor = dev_to_drm_minor(kdev); - return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); + return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6()); } static ssize_t @@ -204,7 +203,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj, struct drm_minor *dminor = dev_to_drm_minor(dev); struct drm_device *drm_dev = dminor->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; - struct intel_context *ctx; + struct i915_gem_context *ctx; u32 *temp = NULL; /* Just here to make handling failures easy */ int slice = (int)(uintptr_t)attr->private; int ret; @@ -397,7 +396,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, /* We still need *_set_rps to process the new max_delay and * update the interrupt limits and PMINTRMSK even though * frequency request may be unchanged. */ - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); @@ -461,7 +460,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, /* We still need *_set_rps to process the new min_delay and * update the interrupt limits and PMINTRMSK even though * frequency request may be unchanged. */ - intel_set_rps(dev, val); + intel_set_rps(dev_priv, val); mutex_unlock(&dev_priv->rps.hw_lock); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index dc0def210097..6768db032f84 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, ), TP_fast_assign( - __entry->dev = from->dev->primary->index; + __entry->dev = from->i915->dev->primary->index; __entry->sync_from = from->id; __entry->sync_to = to_req->engine->id; __entry->seqno = i915_gem_request_get_seqno(req); @@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch, ), TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->dev->primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; __entry->flags = flags; - i915_trace_irq_get(engine, req); + i915_trace_irq_get(req->engine, req); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", @@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush, ), TP_fast_assign( - __entry->dev = req->engine->dev->primary->index; + __entry->dev = req->i915->dev->primary->index; __entry->ring = req->engine->id; __entry->invalidate = invalidate; __entry->flush = flush; @@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request, ), TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->dev->primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; ), TP_printk("dev=%u, ring=%u, seqno=%u", @@ -560,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify, ), TP_fast_assign( - __entry->dev = engine->dev->primary->index; + __entry->dev = engine->i915->dev->primary->index; __entry->ring = engine->id; __entry->seqno = engine->get_seqno(engine); ), @@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin, * less desirable. */ TP_fast_assign( - struct intel_engine_cs *engine = - i915_gem_request_get_engine(req); - __entry->dev = engine->dev->primary->index; - __entry->ring = engine->id; - __entry->seqno = i915_gem_request_get_seqno(req); + __entry->dev = req->i915->dev->primary->index; + __entry->ring = req->engine->id; + __entry->seqno = req->seqno; __entry->blocking = - mutex_is_locked(&engine->dev->struct_mutex); + mutex_is_locked(&req->i915->dev->struct_mutex); ), TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", @@ -740,12 +734,12 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release, * the context. */ DECLARE_EVENT_CLASS(i915_context, - TP_PROTO(struct intel_context *ctx), + TP_PROTO(struct i915_gem_context *ctx), TP_ARGS(ctx), TP_STRUCT__entry( __field(u32, dev) - __field(struct intel_context *, ctx) + __field(struct i915_gem_context *, ctx) __field(struct i915_address_space *, vm) ), @@ -760,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context, ) DEFINE_EVENT(i915_context, i915_context_create, - TP_PROTO(struct intel_context *ctx), + TP_PROTO(struct i915_gem_context *ctx), TP_ARGS(ctx) ); DEFINE_EVENT(i915_context, i915_context_free, - TP_PROTO(struct intel_context *ctx), + TP_PROTO(struct i915_gem_context *ctx), TP_ARGS(ctx) ); @@ -777,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free, * called only if full ppgtt is enabled. */ TRACE_EVENT(switch_mm, - TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to), + TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to), TP_ARGS(engine, to), TP_STRUCT__entry( __field(u32, ring) - __field(struct intel_context *, to) + __field(struct i915_gem_context *, to) __field(struct i915_address_space *, vm) __field(u32, dev) ), @@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm, __entry->ring = engine->id; __entry->to = to; __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; - __entry->dev = engine->dev->primary->index; + __entry->dev = engine->i915->dev->primary->index; ), TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index d02efb8cad4d..004326291854 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -58,15 +58,14 @@ * This function is called at the initialization stage, to detect whether * running on a vGPU. */ -void i915_check_vgpu(struct drm_device *dev) +void i915_check_vgpu(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); uint64_t magic; uint32_t version; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); - if (!IS_HASWELL(dev)) + if (!IS_HASWELL(dev_priv)) return; magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); @@ -136,7 +135,7 @@ static int vgt_balloon_space(struct drm_mm *mm, /** * intel_vgt_balloon - balloon out reserved graphics address trunks - * @dev: drm device + * @dev_priv: i915 device * * This function is called at the initialization stage, to balloon out the * graphic address space allocated to other vGPUs, by marking these spaces as @@ -151,28 +150,28 @@ static int vgt_balloon_space(struct drm_mm *mm, * of its graphic space being zero. Yet there are some portions ballooned out( * the shadow part, which are marked as reserved by drm allocator). From the * host point of view, the graphic address space is partitioned by multiple - * vGPUs in different VMs. + * vGPUs in different VMs. :: * * vGPU1 view Host view * 0 ------> +-----------+ +-----------+ - * ^ |///////////| | vGPU3 | - * | |///////////| +-----------+ - * | |///////////| | vGPU2 | + * ^ |###########| | vGPU3 | + * | |###########| +-----------+ + * | |###########| | vGPU2 | * | +-----------+ +-----------+ * mappable GM | available | ==> | vGPU1 | * | +-----------+ +-----------+ - * | |///////////| | | - * v |///////////| | Host | + * | |###########| | | + * v |###########| | Host | * +=======+===========+ +===========+ - * ^ |///////////| | vGPU3 | - * | |///////////| +-----------+ - * | |///////////| | vGPU2 | + * ^ |###########| | vGPU3 | + * | |###########| +-----------+ + * | |###########| | vGPU2 | * | +-----------+ +-----------+ * unmappable GM | available | ==> | vGPU1 | * | +-----------+ +-----------+ - * | |///////////| | | - * | |///////////| | Host | - * v |///////////| | | + * | |###########| | | + * | |###########| | Host | + * v |###########| | | * total GM size ------> +-----------+ +-----------+ * * Returns: diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 3c83b47b5f69..21ffcfea5f5d 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -110,7 +110,7 @@ struct vgt_if { #define VGT_DRV_DISPLAY_NOT_READY 0 #define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ -extern void i915_check_vgpu(struct drm_device *dev); +extern void i915_check_vgpu(struct drm_i915_private *dev_priv); extern int intel_vgt_balloon(struct drm_device *dev); extern void intel_vgt_deballoon(void); diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 50ff90aea721..c5a166752eda 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -191,7 +191,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev, /* plane scaler case: assign as a plane scaler */ /* find the plane that set the bit as scaler_user */ - plane = drm_state->planes[i]; + plane = drm_state->planes[i].ptr; /* * to enable/disable hq mode, add planes that are using scaler @@ -223,7 +223,8 @@ int intel_atomic_setup_scalers(struct drm_device *dev, continue; } - plane_state = to_intel_plane_state(drm_state->plane_states[i]); + plane_state = intel_atomic_get_existing_plane_state(drm_state, + intel_plane); scaler_id = &plane_state->scaler_id; } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 02a7527ce7bb..b9329c2a670a 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev, static int i915_audio_component_get_cdclk_freq(struct device *dev) { struct drm_i915_private *dev_priv = dev_to_i915(dev); - int ret; if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) return -ENODEV; - intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); - ret = dev_priv->display.get_display_clock_speed(dev_priv->dev); - - intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); - - return ret; + return dev_priv->cdclk_freq; } static int i915_audio_component_sync_audio_rate(struct device *dev, diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b235b6e88ead..713a02db378a 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, else panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; + panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) | + dvo_timing->himage_lo; + panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) | + dvo_timing->vimage_lo; + /* Some VBTs have bogus h/vtotal values */ if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; @@ -213,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; - ret = intel_opregion_get_panel_type(dev_priv->dev); + ret = intel_opregion_get_panel_type(dev_priv); if (ret >= 0) { WARN_ON(ret > 0xf); panel_type = ret; @@ -318,6 +323,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, return; } + dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; + if (bdb->version >= 191 && + get_blocksize(backlight_data) >= sizeof(*backlight_data)) { + const struct bdb_lfp_backlight_control_method *method; + + method = &backlight_data->backlight_control[panel_type]; + dev_priv->vbt.backlight.type = method->type; + } + dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; dev_priv->vbt.backlight.min_brightness = entry->min_brightness; @@ -763,6 +777,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv, return; } + /* + * These fields are introduced from the VBT version 197 onwards, + * so making sure that these bits are set zero in the previous + * versions. + */ + if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) { + dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0; + dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0; + } + /* We have mandatory mipi config blocks. Initialize as generic panel */ dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; } @@ -1187,7 +1211,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, } if (bdb->version < 106) { expected_size = 22; - } else if (bdb->version < 109) { + } else if (bdb->version < 111) { expected_size = 27; } else if (bdb->version < 195) { BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index ab0ea315eddb..8405b5a367d7 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -30,6 +30,14 @@ #ifndef _INTEL_BIOS_H_ #define _INTEL_BIOS_H_ +enum intel_backlight_type { + INTEL_BACKLIGHT_PMIC, + INTEL_BACKLIGHT_LPSS, + INTEL_BACKLIGHT_DISPLAY_DDI, + INTEL_BACKLIGHT_DSI_DCS, + INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE, +}; + struct edp_power_seq { u16 t1_t3; u16 t8; @@ -113,7 +121,13 @@ struct mipi_config { u16 dual_link:2; u16 lane_cnt:2; u16 pixel_overlap:3; - u16 rsvd3:9; + u16 rgb_flip:1; +#define DL_DCS_PORT_A 0x00 +#define DL_DCS_PORT_C 0x01 +#define DL_DCS_PORT_A_AND_C 0x02 + u16 dl_dcs_cabc_ports:2; + u16 dl_dcs_backlight_ports:2; + u16 rsvd3:4; u16 rsvd4; diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 1b3f97449395..522f5a2de015 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -547,7 +547,8 @@ void intel_color_init(struct drm_crtc *crtc) /* Enable color management support when we have degamma & gamma LUTs. */ if (INTEL_INFO(dev)->color.degamma_lut_size != 0 && INTEL_INFO(dev)->color.gamma_lut_size != 0) - drm_helper_crtc_enable_color_mgmt(crtc, + drm_crtc_enable_color_mgmt(crtc, INTEL_INFO(dev)->color.degamma_lut_size, + true, INTEL_INFO(dev)->color.gamma_lut_size); } diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 3fbb6fc66451..9465de4135aa 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -753,7 +753,6 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { .mode_valid = intel_crt_mode_valid, .get_modes = intel_crt_get_modes, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_crt_enc_funcs = { @@ -839,7 +838,7 @@ void intel_crt_init(struct drm_device *dev) &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, - DRM_MODE_ENCODER_DAC, NULL); + DRM_MODE_ENCODER_DAC, "CRT"); intel_connector_attach_encoder(intel_connector, &crt->base); diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index a34c23eceba0..2b3b428d9cd2 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -41,16 +41,22 @@ * be moved to FW_FAILED. */ +#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" +MODULE_FIRMWARE(I915_CSR_KBL); +#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) + #define I915_CSR_SKL "i915/skl_dmc_ver1.bin" +MODULE_FIRMWARE(I915_CSR_SKL); +#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) + #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" +MODULE_FIRMWARE(I915_CSR_BXT); +#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) #define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" -MODULE_FIRMWARE(I915_CSR_SKL); -MODULE_FIRMWARE(I915_CSR_BXT); -#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) -#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) + #define CSR_MAX_FW_SIZE 0x2FFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF @@ -169,12 +175,10 @@ struct stepping_info { char substepping; }; -/* - * Kabylake derivated from Skylake H0, so SKL H0 - * is the right firmware for KBL A0 (revid 0). - */ static const struct stepping_info kbl_stepping_info[] = { - {'H', '0'}, {'I', '0'} + {'A', '0'}, {'B', '0'}, {'C', '0'}, + {'D', '0'}, {'E', '0'}, {'F', '0'}, + {'G', '0'}, {'H', '0'}, {'I', '0'}, }; static const struct stepping_info skl_stepping_info[] = { @@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, csr->version = css_header->version; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + if (IS_KABYLAKE(dev_priv)) { + required_min_version = KBL_CSR_VERSION_REQUIRED; + } else if (IS_SKYLAKE(dev_priv)) { required_min_version = SKL_CSR_VERSION_REQUIRED; } else if (IS_BROXTON(dev_priv)) { required_min_version = BXT_CSR_VERSION_REQUIRED; @@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) if (!HAS_CSR(dev_priv)) return; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_KABYLAKE(dev_priv)) + csr->fw_path = I915_CSR_KBL; + else if (IS_SKYLAKE(dev_priv)) csr->fw_path = I915_CSR_SKL; else if (IS_BROXTON(dev_priv)) csr->fw_path = I915_CSR_BXT; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 01e523df363b..022b41d422dc 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv, { struct intel_shared_dpll *pll; struct intel_dpll_hw_state *state; - intel_clock_t clock; + struct dpll clock; /* For DDI ports we always use a shared PLL. */ if (WARN_ON(dpll == DPLL_ID_PRIVATE)) @@ -2347,7 +2347,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) encoder = &intel_encoder->base; drm_encoder_init(dev, encoder, &intel_ddi_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); intel_encoder->compute_config = intel_ddi_compute_config; intel_encoder->enable = intel_enable_ddi; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2113f401f0ba..49322f6cfa2b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -48,6 +48,11 @@ #include <linux/reservation.h> #include <linux/dma-buf.h> +static bool is_mmio_work(struct intel_flip_work *work) +{ + return work->mmio_work.func; +} + /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { DRM_FORMAT_C8, @@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_enable(struct intel_crtc *crtc); static void intel_modeset_setup_hw_state(struct drm_device *dev); static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); +static int ilk_max_pixel_rate(struct drm_atomic_state *state); +static int broxton_calc_cdclk(int max_pixclk); -typedef struct { - int min, max; -} intel_range_t; - -typedef struct { - int dot_limit; - int p2_slow, p2_fast; -} intel_p2_t; - -typedef struct intel_limit intel_limit_t; struct intel_limit { - intel_range_t dot, vco, n, m, m1, m2, p, p1; - intel_p2_t p2; + struct { + int min, max; + } dot, vco, n, m, m1, m2, p, p1; + + struct { + int dot_limit; + int p2_slow, p2_fast; + } p2; }; /* returns HPLL frequency in kHz */ @@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv) static int intel_vlv_hrawclk(struct drm_i915_private *dev_priv) { + /* RAWCLK_FREQ_VLV register updated from power well code */ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL); } @@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv) } } -static void intel_update_rawclk(struct drm_i915_private *dev_priv) +void intel_update_rawclk(struct drm_i915_private *dev_priv) { if (HAS_PCH_SPLIT(dev_priv)) dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); @@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv, return 270000; } -static const intel_limit_t intel_limits_i8xx_dac = { +static const struct intel_limit intel_limits_i8xx_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = { .p2_slow = 4, .p2_fast = 2 }, }; -static const intel_limit_t intel_limits_i8xx_dvo = { +static const struct intel_limit intel_limits_i8xx_dvo = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = { .p2_slow = 4, .p2_fast = 4 }, }; -static const intel_limit_t intel_limits_i8xx_lvds = { +static const struct intel_limit intel_limits_i8xx_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 908000, .max = 1512000 }, .n = { .min = 2, .max = 16 }, @@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = { .p2_slow = 14, .p2_fast = 7 }, }; -static const intel_limit_t intel_limits_i9xx_sdvo = { +static const struct intel_limit intel_limits_i9xx_sdvo = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, @@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_i9xx_lvds = { +static const struct intel_limit intel_limits_i9xx_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1400000, .max = 2800000 }, .n = { .min = 1, .max = 6 }, @@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = { }; -static const intel_limit_t intel_limits_g4x_sdvo = { +static const struct intel_limit intel_limits_g4x_sdvo = { .dot = { .min = 25000, .max = 270000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, @@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = { }, }; -static const intel_limit_t intel_limits_g4x_hdmi = { +static const struct intel_limit intel_limits_g4x_hdmi = { .dot = { .min = 22000, .max = 400000 }, .vco = { .min = 1750000, .max = 3500000}, .n = { .min = 1, .max = 4 }, @@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_g4x_single_channel_lvds = { +static const struct intel_limit intel_limits_g4x_single_channel_lvds = { .dot = { .min = 20000, .max = 115000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, @@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = { }, }; -static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { +static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { .dot = { .min = 80000, .max = 224000 }, .vco = { .min = 1750000, .max = 3500000 }, .n = { .min = 1, .max = 3 }, @@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { }, }; -static const intel_limit_t intel_limits_pineview_sdvo = { +static const struct intel_limit intel_limits_pineview_sdvo = { .dot = { .min = 20000, .max = 400000}, .vco = { .min = 1700000, .max = 3500000 }, /* Pineview's Ncounter is a ring counter */ @@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_pineview_lvds = { +static const struct intel_limit intel_limits_pineview_lvds = { .dot = { .min = 20000, .max = 400000 }, .vco = { .min = 1700000, .max = 3500000 }, .n = { .min = 3, .max = 6 }, @@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = { * We calculate clock using (register_value + 2) for N/M1/M2, so here * the range value for them is (actual_value - 2). */ -static const intel_limit_t intel_limits_ironlake_dac = { +static const struct intel_limit intel_limits_ironlake_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 5 }, @@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = { .p2_slow = 10, .p2_fast = 5 }, }; -static const intel_limit_t intel_limits_ironlake_single_lvds = { +static const struct intel_limit intel_limits_ironlake_single_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = { .p2_slow = 14, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_ironlake_dual_lvds = { +static const struct intel_limit intel_limits_ironlake_dual_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = { }; /* LVDS 100mhz refclk limits. */ -static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { +static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 2 }, @@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { .p2_slow = 14, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { +static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 1760000, .max = 3510000 }, .n = { .min = 1, .max = 3 }, @@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { .p2_slow = 7, .p2_fast = 7 }, }; -static const intel_limit_t intel_limits_vlv = { +static const struct intel_limit intel_limits_vlv = { /* * These are the data rate limits (measured in fast clocks) * since those are the strictest limits we have. The fast @@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = { .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ }; -static const intel_limit_t intel_limits_chv = { +static const struct intel_limit intel_limits_chv = { /* * These are the data rate limits (measured in fast clocks) * since those are the strictest limits we have. The fast @@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = { .p2 = { .p2_slow = 1, .p2_fast = 14 }, }; -static const intel_limit_t intel_limits_bxt = { +static const struct intel_limit intel_limits_bxt = { /* FIXME: find real dot limits */ .dot = { .min = 0, .max = INT_MAX }, .vco = { .min = 4800000, .max = 6700000 }, @@ -581,7 +585,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, * divided-down version of it. */ /* m1 is reserved as 0 in Pineview, n is a ring counter */ -static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) +static int pnv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m2 + 2; clock->p = clock->p1 * clock->p2; @@ -598,7 +602,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); } -static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) +static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = i9xx_dpll_compute_m(clock); clock->p = clock->p1 * clock->p2; @@ -610,7 +614,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) return clock->dot; } -static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) +static int vlv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2; @@ -622,7 +626,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) return clock->dot / 5; } -int chv_calc_dpll_params(int refclk, intel_clock_t *clock) +int chv_calc_dpll_params(int refclk, struct dpll *clock) { clock->m = clock->m1 * clock->m2; clock->p = clock->p1 * clock->p2; @@ -642,8 +646,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock) */ static bool intel_PLL_is_valid(struct drm_device *dev, - const intel_limit_t *limit, - const intel_clock_t *clock) + const struct intel_limit *limit, + const struct dpll *clock) { if (clock->n < limit->n.min || limit->n.max < clock->n) INTELPllInvalid("n out of range\n"); @@ -678,7 +682,7 @@ static bool intel_PLL_is_valid(struct drm_device *dev, } static int -i9xx_select_p2_div(const intel_limit_t *limit, +i9xx_select_p2_div(const struct intel_limit *limit, const struct intel_crtc_state *crtc_state, int target) { @@ -713,13 +717,13 @@ i9xx_select_p2_div(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -i9xx_find_best_dpll(const intel_limit_t *limit, +i9xx_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int err = target; memset(best_clock, 0, sizeof(*best_clock)); @@ -770,13 +774,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -pnv_find_best_dpll(const intel_limit_t *limit, +pnv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int err = target; memset(best_clock, 0, sizeof(*best_clock)); @@ -825,13 +829,13 @@ pnv_find_best_dpll(const intel_limit_t *limit, * divider from @match_clock used for LVDS downclocking. */ static bool -g4x_find_best_dpll(const intel_limit_t *limit, +g4x_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct drm_device *dev = crtc_state->base.crtc->dev; - intel_clock_t clock; + struct dpll clock; int max_n; bool found = false; /* approximately equals target * 0.00585 */ @@ -877,8 +881,8 @@ g4x_find_best_dpll(const intel_limit_t *limit, * best configuration and error found so far. Return the calculated error. */ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, - const intel_clock_t *calculated_clock, - const intel_clock_t *best_clock, + const struct dpll *calculated_clock, + const struct dpll *best_clock, unsigned int best_error_ppm, unsigned int *error_ppm) { @@ -918,14 +922,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ static bool -vlv_find_best_dpll(const intel_limit_t *limit, +vlv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; - intel_clock_t clock; + struct dpll clock; unsigned int bestppm = 1000000; /* min update 19.2 MHz */ int max_n = min(limit->n.max, refclk / 19200); @@ -977,15 +981,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ static bool -chv_find_best_dpll(const intel_limit_t *limit, +chv_find_best_dpll(const struct intel_limit *limit, struct intel_crtc_state *crtc_state, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) + int target, int refclk, struct dpll *match_clock, + struct dpll *best_clock) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; unsigned int best_error_ppm; - intel_clock_t clock; + struct dpll clock; uint64_t m2; int found = false; @@ -1035,10 +1039,10 @@ chv_find_best_dpll(const intel_limit_t *limit, } bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, - intel_clock_t *best_clock) + struct dpll *best_clock) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_bxt; + const struct intel_limit *limit = &intel_limits_bxt; return chv_find_best_dpll(limit, crtc_state, target_clock, refclk, NULL, best_clock); @@ -1203,7 +1207,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, u32 val; /* ILK FDI PLL is always enabled */ - if (INTEL_INFO(dev_priv)->gen == 5) + if (IS_GEN5(dev_priv)) return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ @@ -2309,7 +2313,7 @@ err_pm: return ret; } -static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) +void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; @@ -3110,17 +3114,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, return -ENODEV; } -static void intel_complete_page_flips(struct drm_device *dev) +static void intel_complete_page_flips(struct drm_i915_private *dev_priv) { - struct drm_crtc *crtc; - - for_each_crtc(dev, crtc) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum plane plane = intel_crtc->plane; + struct intel_crtc *crtc; - intel_prepare_page_flip(dev, plane); - intel_finish_page_flip_plane(dev, plane); - } + for_each_intel_crtc(dev_priv->dev, crtc) + intel_finish_page_flip_cs(dev_priv, crtc->pipe); } static void intel_update_primary_planes(struct drm_device *dev) @@ -3143,41 +3142,39 @@ static void intel_update_primary_planes(struct drm_device *dev) } } -void intel_prepare_reset(struct drm_device *dev) +void intel_prepare_reset(struct drm_i915_private *dev_priv) { /* no reset support for gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return; /* reset doesn't touch the display */ - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) return; - drm_modeset_lock_all(dev); + drm_modeset_lock_all(dev_priv->dev); /* * Disabling the crtcs gracefully seems nicer. Also the * g33 docs say we should at least disable all the planes. */ - intel_display_suspend(dev); + intel_display_suspend(dev_priv->dev); } -void intel_finish_reset(struct drm_device *dev) +void intel_finish_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* * Flips in the rings will be nuked by the reset, * so complete all pending flips so that user space * will get its events and not get stuck. */ - intel_complete_page_flips(dev); + intel_complete_page_flips(dev_priv); /* no reset support for gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return; /* reset doesn't touch the display */ - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { /* * Flips in the rings have been nuked by the reset, * so update the base address of all primary @@ -3187,7 +3184,7 @@ void intel_finish_reset(struct drm_device *dev) * FIXME: Atomic will make this obsolete since we won't schedule * CS-based flips (which might get lost in gpu resets) any more. */ - intel_update_primary_planes(dev); + intel_update_primary_planes(dev_priv->dev); return; } @@ -3198,18 +3195,18 @@ void intel_finish_reset(struct drm_device *dev) intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); - intel_modeset_init_hw(dev); + intel_modeset_init_hw(dev_priv->dev); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_display_resume(dev); + intel_display_resume(dev_priv->dev); intel_hpd_init(dev_priv); - drm_modeset_unlock_all(dev); + drm_modeset_unlock_all(dev_priv->dev); } static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) @@ -3224,7 +3221,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) return false; spin_lock_irq(&dev->event_lock); - pending = to_intel_crtc(crtc)->unpin_work != NULL; + pending = to_intel_crtc(crtc)->flip_work != NULL; spin_unlock_irq(&dev->event_lock); return pending; @@ -3803,7 +3800,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) if (atomic_read(&crtc->unpin_work_count) == 0) continue; - if (crtc->unpin_work) + if (crtc->flip_work) intel_wait_for_vblank(dev, crtc->pipe); return true; @@ -3815,11 +3812,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) static void page_flip_completed(struct intel_crtc *intel_crtc) { struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - struct intel_unpin_work *work = intel_crtc->unpin_work; + struct intel_flip_work *work = intel_crtc->flip_work; - /* ensure that the unpin work is consistent wrt ->pending. */ - smp_rmb(); - intel_crtc->unpin_work = NULL; + intel_crtc->flip_work = NULL; if (work->event) drm_crtc_send_vblank_event(&intel_crtc->base, work->event); @@ -3827,7 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) drm_crtc_vblank_put(&intel_crtc->base); wake_up_all(&dev_priv->pending_flip_queue); - queue_work(dev_priv->wq, &work->work); + queue_work(dev_priv->wq, &work->unpin_work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); @@ -3851,9 +3846,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) if (ret == 0) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - if (intel_crtc->unpin_work) { + work = intel_crtc->flip_work; + if (work && !is_mmio_work(work)) { WARN_ONCE(1, "Removing stuck page flip\n"); page_flip_completed(intel_crtc); } @@ -4281,8 +4278,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; - DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", - intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); + DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n", + intel_crtc->base.base.id, intel_crtc->base.name, + intel_crtc->pipe, SKL_CRTC_INDEX); return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), @@ -4312,9 +4310,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, bool force_detach = !fb || !plane_state->visible; - DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", - intel_plane->base.base.id, intel_crtc->pipe, - drm_plane_index(&intel_plane->base)); + DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n", + intel_plane->base.base.id, intel_plane->base.name, + intel_crtc->pipe, drm_plane_index(&intel_plane->base)); ret = skl_update_scaler(crtc_state, force_detach, drm_plane_index(&intel_plane->base), @@ -4330,8 +4328,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, /* check colorkey */ if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { - DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", - intel_plane->base.base.id); + DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", + intel_plane->base.base.id, + intel_plane->base.name); return -EINVAL; } @@ -4350,8 +4349,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_VYUY: break; default: - DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", - intel_plane->base.base.id, fb->base.id, fb->pixel_format); + DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", + intel_plane->base.base.id, intel_plane->base.name, + fb->base.id, fb->pixel_format); return -EINVAL; } @@ -5269,21 +5269,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) return max_cdclk_freq*90/100; } +static int skl_calc_cdclk(int max_pixclk, int vco); + static void intel_update_max_cdclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; + int max_cdclk, vco; + + vco = dev_priv->skl_preferred_vco_freq; + WARN_ON(vco != 8100000 && vco != 8640000); + /* + * Use the lower (vco 8640) cdclk values as a + * first guess. skl_calc_cdclk() will correct it + * if the preferred vco is 8100 instead. + */ if (limit == SKL_DFSM_CDCLK_LIMIT_675) - dev_priv->max_cdclk_freq = 675000; + max_cdclk = 617143; else if (limit == SKL_DFSM_CDCLK_LIMIT_540) - dev_priv->max_cdclk_freq = 540000; + max_cdclk = 540000; else if (limit == SKL_DFSM_CDCLK_LIMIT_450) - dev_priv->max_cdclk_freq = 450000; + max_cdclk = 432000; else - dev_priv->max_cdclk_freq = 337500; + max_cdclk = 308571; + + dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); } else if (IS_BROXTON(dev)) { dev_priv->max_cdclk_freq = 624000; } else if (IS_BROADWELL(dev)) { @@ -5324,264 +5337,313 @@ static void intel_update_cdclk(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); - DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", - dev_priv->cdclk_freq); + + if (INTEL_GEN(dev_priv) >= 9) + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n", + dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco, + dev_priv->cdclk_pll.ref); + else + DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", + dev_priv->cdclk_freq); /* - * Program the gmbus_freq based on the cdclk frequency. - * BSpec erroneously claims we should aim for 4MHz, but - * in fact 1MHz is the correct frequency. + * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): + * Programmng [sic] note: bit[9:2] should be programmed to the number + * of cdclk that generates 4MHz reference clock freq which is used to + * generate GMBus clock. This will vary with the cdclk freq. */ - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { - /* - * Program the gmbus_freq based on the cdclk frequency. - * BSpec erroneously claims we should aim for 4MHz, but - * in fact 1MHz is the correct frequency. - */ + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); - } +} - if (dev_priv->max_cdclk_freq == 0) - intel_update_max_cdclk(dev); +/* convert from kHz to .1 fixpoint MHz with -1MHz offset */ +static int skl_cdclk_decimal(int cdclk) +{ + return DIV_ROUND_CLOSEST(cdclk - 1000, 500); } -static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency) +static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) { - uint32_t divider; - uint32_t ratio; - uint32_t current_freq; - int ret; + int ratio; + + if (cdclk == dev_priv->cdclk_pll.ref) + return 0; - /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ - switch (frequency) { + switch (cdclk) { + default: + MISSING_CASE(cdclk); case 144000: + case 288000: + case 384000: + case 576000: + ratio = 60; + break; + case 624000: + ratio = 65; + break; + } + + return dev_priv->cdclk_pll.ref * ratio; +} + +static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(BXT_DE_PLL_ENABLE, 0); + + /* Timeout 200us */ + if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1)) + DRM_ERROR("timeout waiting for DE PLL unlock\n"); + + dev_priv->cdclk_pll.vco = 0; +} + +static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) +{ + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref); + u32 val; + + val = I915_READ(BXT_DE_PLL_CTL); + val &= ~BXT_DE_PLL_RATIO_MASK; + val |= BXT_DE_PLL_RATIO(ratio); + I915_WRITE(BXT_DE_PLL_CTL, val); + + I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); + + /* Timeout 200us */ + if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1)) + DRM_ERROR("timeout waiting for DE PLL lock\n"); + + dev_priv->cdclk_pll.vco = vco; +} + +static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) +{ + u32 val, divider; + int vco, ret; + + vco = bxt_de_pll_vco(dev_priv, cdclk); + + DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); + + /* cdclk = vco / 2 / div{1,1.5,2,4} */ + switch (DIV_ROUND_CLOSEST(vco, cdclk)) { + case 8: divider = BXT_CDCLK_CD2X_DIV_SEL_4; - ratio = BXT_DE_PLL_RATIO(60); break; - case 288000: + case 4: divider = BXT_CDCLK_CD2X_DIV_SEL_2; - ratio = BXT_DE_PLL_RATIO(60); break; - case 384000: + case 3: divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; - ratio = BXT_DE_PLL_RATIO(60); - break; - case 576000: - divider = BXT_CDCLK_CD2X_DIV_SEL_1; - ratio = BXT_DE_PLL_RATIO(60); break; - case 624000: + case 2: divider = BXT_CDCLK_CD2X_DIV_SEL_1; - ratio = BXT_DE_PLL_RATIO(65); - break; - case 19200: - /* - * Bypass frequency with DE PLL disabled. Init ratio, divider - * to suppress GCC warning. - */ - ratio = 0; - divider = 0; break; default: - DRM_ERROR("unsupported CDCLK freq %d", frequency); + WARN_ON(cdclk != dev_priv->cdclk_pll.ref); + WARN_ON(vco != 0); - return; + divider = BXT_CDCLK_CD2X_DIV_SEL_1; + break; } - mutex_lock(&dev_priv->rps.hw_lock); /* Inform power controller of upcoming frequency change */ + mutex_lock(&dev_priv->rps.hw_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 0x80000000); mutex_unlock(&dev_priv->rps.hw_lock); if (ret) { DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", - ret, frequency); + ret, cdclk); return; } - current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; - /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ - current_freq = current_freq * 500 + 1000; + if (dev_priv->cdclk_pll.vco != 0 && + dev_priv->cdclk_pll.vco != vco) + bxt_de_pll_disable(dev_priv); - /* - * DE PLL has to be disabled when - * - setting to 19.2MHz (bypass, PLL isn't used) - * - before setting to 624MHz (PLL needs toggling) - * - before setting to any frequency from 624MHz (PLL needs toggling) - */ - if (frequency == 19200 || frequency == 624000 || - current_freq == 624000) { - I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE); - /* Timeout 200us */ - if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK), - 1)) - DRM_ERROR("timout waiting for DE PLL unlock\n"); - } - - if (frequency != 19200) { - uint32_t val; - - val = I915_READ(BXT_DE_PLL_CTL); - val &= ~BXT_DE_PLL_RATIO_MASK; - val |= ratio; - I915_WRITE(BXT_DE_PLL_CTL, val); - - I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); - /* Timeout 200us */ - if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1)) - DRM_ERROR("timeout waiting for DE PLL lock\n"); - - val = I915_READ(CDCLK_CTL); - val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK; - val |= divider; - /* - * Disable SSA Precharge when CD clock frequency < 500 MHz, - * enable otherwise. - */ - val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE; - if (frequency >= 500000) - val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + if (dev_priv->cdclk_pll.vco != vco) + bxt_de_pll_enable(dev_priv, vco); - val &= ~CDCLK_FREQ_DECIMAL_MASK; - /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ - val |= (frequency - 1000) / 500; - I915_WRITE(CDCLK_CTL, val); - } + val = divider | skl_cdclk_decimal(cdclk); + /* + * FIXME if only the cd2x divider needs changing, it could be done + * without shutting off the pipe (if only one pipe is active). + */ + val |= BXT_CDCLK_CD2X_PIPE_NONE; + /* + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. + */ + if (cdclk >= 500000) + val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + I915_WRITE(CDCLK_CTL, val); mutex_lock(&dev_priv->rps.hw_lock); ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, - DIV_ROUND_UP(frequency, 25000)); + DIV_ROUND_UP(cdclk, 25000)); mutex_unlock(&dev_priv->rps.hw_lock); if (ret) { DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", - ret, frequency); + ret, cdclk); return; } intel_update_cdclk(dev_priv->dev); } -static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv) +static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { - if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE)) - return false; + u32 cdctl, expected; - /* TODO: Check for a valid CDCLK rate */ + intel_update_cdclk(dev_priv->dev); - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) { - DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n"); + if (dev_priv->cdclk_pll.vco == 0 || + dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) + goto sanitize; - return false; - } + /* DPLL okay; verify the cdclock + * + * Some BIOS versions leave an incorrect decimal frequency value and + * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, + * so sanitize this register. + */ + cdctl = I915_READ(CDCLK_CTL); + /* + * Let's ignore the pipe field, since BIOS could have configured the + * dividers both synching to an active pipe, or asynchronously + * (PIPE_NONE). + */ + cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) { - DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n"); + expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) | + skl_cdclk_decimal(dev_priv->cdclk_freq); + /* + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. + */ + if (dev_priv->cdclk_freq >= 500000) + expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - return false; - } + if (cdctl == expected) + /* All well; nothing to sanitize */ + return; - return true; -} +sanitize: + DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); -bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv) -{ - return broxton_cdclk_is_enabled(dev_priv); + /* force cdclk programming */ + dev_priv->cdclk_freq = 0; + + /* force full PLL disable + enable */ + dev_priv->cdclk_pll.vco = -1; } void broxton_init_cdclk(struct drm_i915_private *dev_priv) { - /* check if cd clock is enabled */ - if (broxton_cdclk_is_enabled(dev_priv)) { - DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n"); - return; - } + bxt_sanitize_cdclk(dev_priv); - DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n"); + if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) + return; /* * FIXME: * - The initial CDCLK needs to be read from VBT. * Need to make this change after VBT has changes for BXT. - * - check if setting the max (or any) cdclk freq is really necessary - * here, it belongs to modeset time */ - broxton_set_cdclk(dev_priv, 624000); - - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + broxton_set_cdclk(dev_priv, broxton_calc_cdclk(0)); +} - udelay(10); +void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) +{ + broxton_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref); +} - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power enable timeout!\n"); +static int skl_calc_cdclk(int max_pixclk, int vco) +{ + if (vco == 8640000) { + if (max_pixclk > 540000) + return 617143; + else if (max_pixclk > 432000) + return 540000; + else if (max_pixclk > 308571) + return 432000; + else + return 308571; + } else { + if (max_pixclk > 540000) + return 675000; + else if (max_pixclk > 450000) + return 540000; + else if (max_pixclk > 337500) + return 450000; + else + return 337500; + } } -void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) +static void +skl_dpll0_update(struct drm_i915_private *dev_priv) { - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + u32 val; - udelay(10); + dev_priv->cdclk_pll.ref = 24000; + dev_priv->cdclk_pll.vco = 0; - if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) - DRM_ERROR("DBuf power disable timeout!\n"); + val = I915_READ(LCPLL1_CTL); + if ((val & LCPLL_PLL_ENABLE) == 0) + return; - /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ - broxton_set_cdclk(dev_priv, 19200); -} + if (WARN_ON((val & LCPLL_PLL_LOCK) == 0)) + return; -static const struct skl_cdclk_entry { - unsigned int freq; - unsigned int vco; -} skl_cdclk_frequencies[] = { - { .freq = 308570, .vco = 8640 }, - { .freq = 337500, .vco = 8100 }, - { .freq = 432000, .vco = 8640 }, - { .freq = 450000, .vco = 8100 }, - { .freq = 540000, .vco = 8100 }, - { .freq = 617140, .vco = 8640 }, - { .freq = 675000, .vco = 8100 }, -}; + val = I915_READ(DPLL_CTRL1); -static unsigned int skl_cdclk_decimal(unsigned int freq) -{ - return (freq - 1000) / 500; + if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | + DPLL_CTRL1_SSC(SKL_DPLL0) | + DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != + DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) + return; + + switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) { + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0): + dev_priv->cdclk_pll.vco = 8100000; + break; + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0): + case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0): + dev_priv->cdclk_pll.vco = 8640000; + break; + default: + MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); + break; + } } -static unsigned int skl_cdclk_get_vco(unsigned int freq) +void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco) { - unsigned int i; + bool changed = dev_priv->skl_preferred_vco_freq != vco; - for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) { - const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i]; - - if (e->freq == freq) - return e->vco; - } + dev_priv->skl_preferred_vco_freq = vco; - return 8100; + if (changed) + intel_update_max_cdclk(dev_priv->dev); } static void -skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) +skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) { - unsigned int min_freq; + int min_cdclk = skl_calc_cdclk(0, vco); u32 val; - /* select the minimum CDCLK before enabling DPLL 0 */ - val = I915_READ(CDCLK_CTL); - val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK; - val |= CDCLK_FREQ_337_308; - - if (required_vco == 8640) - min_freq = 308570; - else - min_freq = 337500; - - val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq); + WARN_ON(vco != 8100000 && vco != 8640000); + /* select the minimum CDCLK before enabling DPLL 0 */ + val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); I915_WRITE(CDCLK_CTL, val); POSTING_READ(CDCLK_CTL); @@ -5592,14 +5654,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. * The modeset code is responsible for the selection of the exact link * rate later on, with the constraint of choosing a frequency that - * works with required_vco. + * works with vco. */ val = I915_READ(DPLL_CTRL1); val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); - if (required_vco == 8640) + if (vco == 8640000) val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0); else @@ -5613,6 +5675,21 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) DRM_ERROR("DPLL0 not locked\n"); + + dev_priv->cdclk_pll.vco = vco; + + /* We'll want to keep using the current vco from now on. */ + skl_set_preferred_cdclk_vco(dev_priv, vco); +} + +static void +skl_dpll0_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); + if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) + DRM_ERROR("Couldn't disable DPLL0\n"); + + dev_priv->cdclk_pll.vco = 0; } static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) @@ -5642,12 +5719,14 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) return false; } -static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) +static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) { struct drm_device *dev = dev_priv->dev; u32 freq_select, pcu_ack; - DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); + WARN_ON((cdclk == 24000) != (vco == 0)); + + DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { DRM_ERROR("failed to inform PCU about cdclk change\n"); @@ -5655,7 +5734,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) } /* set CDCLK_CTL */ - switch(freq) { + switch (cdclk) { case 450000: case 432000: freq_select = CDCLK_FREQ_450_432; @@ -5665,20 +5744,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) freq_select = CDCLK_FREQ_540; pcu_ack = 2; break; - case 308570: + case 308571: case 337500: default: freq_select = CDCLK_FREQ_337_308; pcu_ack = 0; break; - case 617140: + case 617143: case 675000: freq_select = CDCLK_FREQ_675_617; pcu_ack = 3; break; } - I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); + if (dev_priv->cdclk_pll.vco != 0 && + dev_priv->cdclk_pll.vco != vco) + skl_dpll0_disable(dev_priv); + + if (dev_priv->cdclk_pll.vco != vco) + skl_dpll0_enable(dev_priv, vco); + + I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); POSTING_READ(CDCLK_CTL); /* inform PCU of the change */ @@ -5689,52 +5775,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) intel_update_cdclk(dev); } +static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv); + void skl_uninit_cdclk(struct drm_i915_private *dev_priv) { - /* disable DBUF power */ - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); - - udelay(10); - - if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) - DRM_ERROR("DBuf power disable timeout\n"); - - /* disable DPLL0 */ - I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); - if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) - DRM_ERROR("Couldn't disable DPLL0\n"); + skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0); } void skl_init_cdclk(struct drm_i915_private *dev_priv) { - unsigned int required_vco; - - /* DPLL0 not enabled (happens on early BIOS versions) */ - if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { - /* enable DPLL0 */ - required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); - skl_dpll0_enable(dev_priv, required_vco); - } + int cdclk, vco; - /* set CDCLK to the frequency the BIOS chose */ - skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); + skl_sanitize_cdclk(dev_priv); - /* enable DBUF power */ - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); - POSTING_READ(DBUF_CTL); + if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) { + /* + * Use the current vco as our initial + * guess as to what the preferred vco is. + */ + if (dev_priv->skl_preferred_vco_freq == 0) + skl_set_preferred_cdclk_vco(dev_priv, + dev_priv->cdclk_pll.vco); + return; + } - udelay(10); + vco = dev_priv->skl_preferred_vco_freq; + if (vco == 0) + vco = 8100000; + cdclk = skl_calc_cdclk(0, vco); - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) - DRM_ERROR("DBuf power enable timeout\n"); + skl_set_cdclk(dev_priv, cdclk, vco); } -int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) +static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) { - uint32_t lcpll1 = I915_READ(LCPLL1_CTL); - uint32_t cdctl = I915_READ(CDCLK_CTL); - int freq = dev_priv->skl_boot_cdclk; + uint32_t cdctl, expected; /* * check if the pre-os intialized the display @@ -5744,8 +5819,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) goto sanitize; + intel_update_cdclk(dev_priv->dev); /* Is PLL enabled and locked ? */ - if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) + if (dev_priv->cdclk_pll.vco == 0 || + dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) goto sanitize; /* DPLL okay; verify the cdclock @@ -5754,19 +5831,20 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) * decimal part is programmed wrong from BIOS where pre-os does not * enable display. Verify the same as well. */ - if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) + cdctl = I915_READ(CDCLK_CTL); + expected = (cdctl & CDCLK_FREQ_SEL_MASK) | + skl_cdclk_decimal(dev_priv->cdclk_freq); + if (cdctl == expected) /* All well; nothing to sanitize */ - return false; + return; + sanitize: - /* - * As of now initialize with max cdclk till - * we get dynamic cdclk support - * */ - dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq; - skl_init_cdclk(dev_priv); + DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); - /* we did have to sanitize */ - return true; + /* force cdclk programming */ + dev_priv->cdclk_freq = 0; + /* force full PLL disable + enable */ + dev_priv->cdclk_pll.vco = -1; } /* Adjust CDclk dividers to allow high res or save power if possible */ @@ -5906,21 +5984,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, return 200000; } -static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, - int max_pixclk) +static int broxton_calc_cdclk(int max_pixclk) { - /* - * FIXME: - * - remove the guardband, it's not needed on BXT - * - set 19.2MHz bypass frequency if there are no active pipes - */ - if (max_pixclk > 576000*9/10) + if (max_pixclk > 576000) return 624000; - else if (max_pixclk > 384000*9/10) + else if (max_pixclk > 384000) return 576000; - else if (max_pixclk > 288000*9/10) + else if (max_pixclk > 288000) return 384000; - else if (max_pixclk > 144000*9/10) + else if (max_pixclk > 144000) return 288000; else return 144000; @@ -5963,9 +6035,6 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - if (max_pixclk < 0) - return max_pixclk; - intel_state->cdclk = intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); @@ -5977,20 +6046,15 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) { - struct drm_device *dev = state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int max_pixclk = intel_mode_max_pixclk(dev, state); + int max_pixclk = ilk_max_pixel_rate(state); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - if (max_pixclk < 0) - return max_pixclk; - intel_state->cdclk = intel_state->dev_cdclk = - broxton_calc_cdclk(dev_priv, max_pixclk); + broxton_calc_cdclk(max_pixclk); if (!intel_state->active_crtcs) - intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); + intel_state->dev_cdclk = broxton_calc_cdclk(0); return 0; } @@ -6252,7 +6316,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) return; if (to_intel_plane_state(crtc->primary->state)->visible) { - WARN_ON(intel_crtc->unpin_work); + WARN_ON(intel_crtc->flip_work); intel_pre_disable_primary_noatomic(crtc); @@ -6262,8 +6326,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) dev_priv->display.crtc_disable(crtc); - DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n", - crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", + crtc->base.id, crtc->name); WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); crtc->state->active = false; @@ -6563,10 +6627,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; + int clock_limit = dev_priv->max_dotclk_freq; - /* FIXME should check pixel clock limits on all platforms */ if (INTEL_INFO(dev)->gen < 4) { - int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; + clock_limit = dev_priv->max_cdclk_freq * 9 / 10; /* * Enable double wide mode when the dot clock @@ -6574,16 +6638,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, */ if (intel_crtc_supports_double_wide(crtc) && adjusted_mode->crtc_clock > clock_limit) { - clock_limit *= 2; + clock_limit = dev_priv->max_dotclk_freq; pipe_config->double_wide = true; } + } - if (adjusted_mode->crtc_clock > clock_limit) { - DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", - adjusted_mode->crtc_clock, clock_limit, - yesno(pipe_config->double_wide)); - return -EINVAL; - } + if (adjusted_mode->crtc_clock > clock_limit) { + DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", + adjusted_mode->crtc_clock, clock_limit, + yesno(pipe_config->double_wide)); + return -EINVAL; } /* @@ -6615,76 +6679,98 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, static int skylake_get_display_clock_speed(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t lcpll1 = I915_READ(LCPLL1_CTL); - uint32_t cdctl = I915_READ(CDCLK_CTL); - uint32_t linkrate; + uint32_t cdctl; - if (!(lcpll1 & LCPLL_PLL_ENABLE)) - return 24000; /* 24MHz is the cd freq with NSSC ref */ + skl_dpll0_update(dev_priv); - if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) - return 540000; + if (dev_priv->cdclk_pll.vco == 0) + return dev_priv->cdclk_pll.ref; - linkrate = (I915_READ(DPLL_CTRL1) & - DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1; + cdctl = I915_READ(CDCLK_CTL); - if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || - linkrate == DPLL_CTRL1_LINK_RATE_1080) { - /* vco 8640 */ + if (dev_priv->cdclk_pll.vco == 8640000) { switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: return 432000; case CDCLK_FREQ_337_308: - return 308570; + return 308571; + case CDCLK_FREQ_540: + return 540000; case CDCLK_FREQ_675_617: - return 617140; + return 617143; default: - WARN(1, "Unknown cd freq selection\n"); + MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); } } else { - /* vco 8100 */ switch (cdctl & CDCLK_FREQ_SEL_MASK) { case CDCLK_FREQ_450_432: return 450000; case CDCLK_FREQ_337_308: return 337500; + case CDCLK_FREQ_540: + return 540000; case CDCLK_FREQ_675_617: return 675000; default: - WARN(1, "Unknown cd freq selection\n"); + MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); } } - /* error case, do as if DPLL0 isn't enabled */ - return 24000; + return dev_priv->cdclk_pll.ref; +} + +static void bxt_de_pll_update(struct drm_i915_private *dev_priv) +{ + u32 val; + + dev_priv->cdclk_pll.ref = 19200; + dev_priv->cdclk_pll.vco = 0; + + val = I915_READ(BXT_DE_PLL_ENABLE); + if ((val & BXT_DE_PLL_PLL_ENABLE) == 0) + return; + + if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0)) + return; + + val = I915_READ(BXT_DE_PLL_CTL); + dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) * + dev_priv->cdclk_pll.ref; } static int broxton_get_display_clock_speed(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t cdctl = I915_READ(CDCLK_CTL); - uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; - uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE); - int cdclk; + u32 divider; + int div, vco; - if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) - return 19200; + bxt_de_pll_update(dev_priv); - cdclk = 19200 * pll_ratio / 2; + vco = dev_priv->cdclk_pll.vco; + if (vco == 0) + return dev_priv->cdclk_pll.ref; + + divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; - switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { + switch (divider) { case BXT_CDCLK_CD2X_DIV_SEL_1: - return cdclk; /* 576MHz or 624MHz */ + div = 2; + break; case BXT_CDCLK_CD2X_DIV_SEL_1_5: - return cdclk * 2 / 3; /* 384MHz */ + div = 3; + break; case BXT_CDCLK_CD2X_DIV_SEL_2: - return cdclk / 2; /* 288MHz */ + div = 4; + break; case BXT_CDCLK_CD2X_DIV_SEL_4: - return cdclk / 4; /* 144MHz */ + div = 8; + break; + default: + MISSING_CASE(divider); + return dev_priv->cdclk_pll.ref; } - /* error case, do as if DE PLL isn't enabled */ - return 19200; + return DIV_ROUND_CLOSEST(vco, div); } static int broadwell_get_display_clock_speed(struct drm_device *dev) @@ -7063,7 +7149,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) static void i9xx_update_pll_dividers(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; u32 fp, fp2 = 0; @@ -7487,7 +7573,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe) static void i9xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7563,7 +7649,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, static void i8xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7817,7 +7903,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 48000; memset(&crtc_state->dpll_hw_state, 0, @@ -7853,7 +7939,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, @@ -7896,7 +7982,7 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, @@ -7930,7 +8016,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 96000; memset(&crtc_state->dpll_hw_state, 0, @@ -7963,7 +8049,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_chv; + const struct intel_limit *limit = &intel_limits_chv; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -7984,7 +8070,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { int refclk = 100000; - const intel_limit_t *limit = &intel_limits_vlv; + const struct intel_limit *limit = &intel_limits_vlv; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -8034,7 +8120,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int pipe = pipe_config->cpu_transcoder; - intel_clock_t clock; + struct dpll clock; u32 mdiv; int refclk = 100000; @@ -8131,7 +8217,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; int pipe = pipe_config->cpu_transcoder; enum dpio_channel port = vlv_pipe_to_channel(pipe); - intel_clock_t clock; + struct dpll clock; u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; int refclk = 100000; @@ -8275,12 +8361,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; + int i; u32 val, final; bool has_lvds = false; bool has_cpu_edp = false; bool has_panel = false; bool has_ck505 = false; bool can_ssc = false; + bool using_ssc_source = false; /* We need to take the global config into account */ for_each_intel_encoder(dev, encoder) { @@ -8307,8 +8395,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) can_ssc = true; } - DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", - has_panel, has_lvds, has_ck505); + /* Check if any DPLLs are using the SSC source */ + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + u32 temp = I915_READ(PCH_DPLL(i)); + + if (!(temp & DPLL_VCO_ENABLE)) + continue; + + if ((temp & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + using_ssc_source = true; + break; + } + } + + DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", + has_panel, has_lvds, has_ck505, using_ssc_source); /* Ironlake: try to setup display ref clock before DPLL * enabling. This is only under driver's control after @@ -8328,9 +8430,12 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) else final |= DREF_NONSPREAD_SOURCE_ENABLE; - final &= ~DREF_SSC_SOURCE_MASK; final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - final &= ~DREF_SSC1_ENABLE; + + if (!using_ssc_source) { + final &= ~DREF_SSC_SOURCE_MASK; + final &= ~DREF_SSC1_ENABLE; + } if (has_panel) { final |= DREF_SSC_SOURCE_ENABLE; @@ -8393,7 +8498,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); } else { - DRM_DEBUG_KMS("Disabling SSC entirely\n"); + DRM_DEBUG_KMS("Disabling CPU source output\n"); val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; @@ -8404,16 +8509,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) POSTING_READ(PCH_DREF_CONTROL); udelay(200); - /* Turn off the SSC source */ - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_DISABLE; + if (!using_ssc_source) { + DRM_DEBUG_KMS("Disabling SSC source\n"); - /* Turn off SSC1 */ - val &= ~DREF_SSC1_ENABLE; + /* Turn off the SSC source */ + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_DISABLE; - I915_WRITE(PCH_DREF_CONTROL, val); - POSTING_READ(PCH_DREF_CONTROL); - udelay(200); + /* Turn off SSC1 */ + val &= ~DREF_SSC1_ENABLE; + + I915_WRITE(PCH_DREF_CONTROL, val); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + } } BUG_ON(val != final); @@ -8794,7 +8903,7 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock) + struct dpll *reduced_clock) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; @@ -8902,10 +9011,10 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - intel_clock_t reduced_clock; + struct dpll reduced_clock; bool has_reduced_clock = false; struct intel_shared_dpll *pll; - const intel_limit_t *limit; + const struct intel_limit *limit; int refclk = 120000; memset(&crtc_state->dpll_hw_state, 0, @@ -9300,6 +9409,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ironlake_get_fdi_m_n_config(crtc, pipe_config); if (HAS_PCH_IBX(dev_priv)) { + /* + * The pipe->pch transcoder and pch transcoder->pll + * mapping is fixed. + */ pll_id = (enum intel_dpll_id) crtc->pipe; } else { tmp = I915_READ(PCH_DPLL_SEL); @@ -9687,6 +9800,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) cdclk, dev_priv->cdclk_freq); } +static int broadwell_calc_cdclk(int max_pixclk) +{ + if (max_pixclk > 540000) + return 675000; + else if (max_pixclk > 450000) + return 540000; + else if (max_pixclk > 337500) + return 450000; + else + return 337500; +} + static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->dev); @@ -9698,14 +9823,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) * FIXME should also account for plane ratio * once 64bpp pixel formats are supported. */ - if (max_pixclk > 540000) - cdclk = 675000; - else if (max_pixclk > 450000) - cdclk = 540000; - else if (max_pixclk > 337500) - cdclk = 450000; - else - cdclk = 337500; + cdclk = broadwell_calc_cdclk(max_pixclk); if (cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", @@ -9715,7 +9833,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) intel_state->cdclk = intel_state->dev_cdclk = cdclk; if (!intel_state->active_crtcs) - intel_state->dev_cdclk = 337500; + intel_state->dev_cdclk = broadwell_calc_cdclk(0); return 0; } @@ -9730,6 +9848,47 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) broadwell_set_cdclk(dev, req_cdclk); } +static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) +{ + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(state->dev); + const int max_pixclk = ilk_max_pixel_rate(state); + int vco = intel_state->cdclk_pll_vco; + int cdclk; + + /* + * FIXME should also account for plane ratio + * once 64bpp pixel formats are supported. + */ + cdclk = skl_calc_cdclk(max_pixclk, vco); + + /* + * FIXME move the cdclk caclulation to + * compute_config() so we can fail gracegully. + */ + if (cdclk > dev_priv->max_cdclk_freq) { + DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n", + cdclk, dev_priv->max_cdclk_freq); + cdclk = dev_priv->max_cdclk_freq; + } + + intel_state->cdclk = intel_state->dev_cdclk = cdclk; + if (!intel_state->active_crtcs) + intel_state->dev_cdclk = skl_calc_cdclk(0, vco); + + return 0; +} + +static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state) +{ + struct drm_i915_private *dev_priv = to_i915(old_state->dev); + struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state); + unsigned int req_cdclk = intel_state->dev_cdclk; + unsigned int req_vco = intel_state->cdclk_pll_vco; + + skl_set_cdclk(dev_priv, req_cdclk, req_vco); +} + static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { @@ -9850,6 +10009,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, enum intel_display_power_domain power_domain; u32 tmp; + /* + * The pipe->transcoder mapping is fixed with the exception of the eDP + * transcoder handled below. + */ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; /* @@ -10317,10 +10480,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - obj = i915_gem_alloc_object(dev, + obj = i915_gem_object_create(dev, intel_framebuffer_size_for_mode(mode, bpp)); - if (obj == NULL) - return ERR_PTR(-ENOMEM); + if (IS_ERR(obj)) + return ERR_CAST(obj); mode_cmd.width = mode->hdisplay; mode_cmd.height = mode->vdisplay; @@ -10632,7 +10795,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, int pipe = pipe_config->cpu_transcoder; u32 dpll = pipe_config->dpll_hw_state.dpll; u32 fp; - intel_clock_t clock; + struct dpll clock; int port_clock; int refclk = i9xx_pll_refclk(dev, pipe_config); @@ -10806,31 +10969,27 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, return mode; } -void intel_mark_busy(struct drm_device *dev) +void intel_mark_busy(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->mm.busy) return; intel_runtime_pm_get(dev_priv); i915_update_gfx_val(dev_priv); - if (INTEL_INFO(dev)->gen >= 6) + if (INTEL_GEN(dev_priv) >= 6) gen6_rps_busy(dev_priv); dev_priv->mm.busy = true; } -void intel_mark_idle(struct drm_device *dev) +void intel_mark_idle(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (!dev_priv->mm.busy) return; dev_priv->mm.busy = false; - if (INTEL_INFO(dev)->gen >= 6) - gen6_rps_idle(dev->dev_private); + if (INTEL_GEN(dev_priv) >= 6) + gen6_rps_idle(dev_priv); intel_runtime_pm_put(dev_priv); } @@ -10839,15 +10998,16 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_device *dev = crtc->dev; - struct intel_unpin_work *work; + struct intel_flip_work *work; spin_lock_irq(&dev->event_lock); - work = intel_crtc->unpin_work; - intel_crtc->unpin_work = NULL; + work = intel_crtc->flip_work; + intel_crtc->flip_work = NULL; spin_unlock_irq(&dev->event_lock); if (work) { - cancel_work_sync(&work->work); + cancel_work_sync(&work->mmio_work); + cancel_work_sync(&work->unpin_work); kfree(work); } @@ -10858,12 +11018,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) static void intel_unpin_work_fn(struct work_struct *__work) { - struct intel_unpin_work *work = - container_of(__work, struct intel_unpin_work, work); + struct intel_flip_work *work = + container_of(__work, struct intel_flip_work, unpin_work); struct intel_crtc *crtc = to_intel_crtc(work->crtc); struct drm_device *dev = crtc->base.dev; struct drm_plane *primary = crtc->base.primary; + if (is_mmio_work(work)) + flush_work(&work->mmio_work); + mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(work->old_fb, primary->state->rotation); drm_gem_object_unreference(&work->pending_flip_obj->base); @@ -10882,60 +11045,14 @@ static void intel_unpin_work_fn(struct work_struct *__work) kfree(work); } -static void do_intel_finish_page_flip(struct drm_device *dev, - struct drm_crtc *crtc) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work; - unsigned long flags; - - /* Ignore early vblank irqs */ - if (intel_crtc == NULL) - return; - - /* - * This is called both by irq handlers and the reset code (to complete - * lost pageflips) so needs the full irqsave spinlocks. - */ - spin_lock_irqsave(&dev->event_lock, flags); - work = intel_crtc->unpin_work; - - /* Ensure we don't miss a work->pending update ... */ - smp_rmb(); - - if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { - spin_unlock_irqrestore(&dev->event_lock, flags); - return; - } - - page_flip_completed(intel_crtc); - - spin_unlock_irqrestore(&dev->event_lock, flags); -} - -void intel_finish_page_flip(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - - do_intel_finish_page_flip(dev, crtc); -} - -void intel_finish_page_flip_plane(struct drm_device *dev, int plane) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; - - do_intel_finish_page_flip(dev, crtc); -} - /* Is 'a' after or equal to 'b'? */ static bool g4x_flip_count_after_eq(u32 a, u32 b) { return !((a - b) & 0x80000000); } -static bool page_flip_finished(struct intel_crtc *crtc) +static bool __pageflip_finished_cs(struct intel_crtc *crtc, + struct intel_flip_work *work) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -10977,40 +11094,103 @@ static bool page_flip_finished(struct intel_crtc *crtc) * anyway, we don't really care. */ return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == - crtc->unpin_work->gtt_offset && + crtc->flip_work->gtt_offset && g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), - crtc->unpin_work->flip_count); + crtc->flip_work->flip_count); } -void intel_prepare_page_flip(struct drm_device *dev, int plane) +static bool +__pageflip_finished_mmio(struct intel_crtc *crtc, + struct intel_flip_work *work) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); + /* + * MMIO work completes when vblank is different from + * flip_queued_vblank. + * + * Reset counter value doesn't matter, this is handled by + * i915_wait_request finishing early, so no need to handle + * reset here. + */ + return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank; +} + + +static bool pageflip_finished(struct intel_crtc *crtc, + struct intel_flip_work *work) +{ + if (!atomic_read(&work->pending)) + return false; + + smp_rmb(); + + if (is_mmio_work(work)) + return __pageflip_finished_mmio(crtc, work); + else + return __pageflip_finished_cs(crtc, work); +} + +void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; + unsigned long flags; + + /* Ignore early vblank irqs */ + if (!crtc) + return; + + /* + * This is called both by irq handlers and the reset code (to complete + * lost pageflips) so needs the full irqsave spinlocks. + */ + spin_lock_irqsave(&dev->event_lock, flags); + work = intel_crtc->flip_work; + + if (work != NULL && + !is_mmio_work(work) && + pageflip_finished(intel_crtc, work)) + page_flip_completed(intel_crtc); + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_flip_work *work; unsigned long flags; + /* Ignore early vblank irqs */ + if (!crtc) + return; /* * This is called both by irq handlers and the reset code (to complete * lost pageflips) so needs the full irqsave spinlocks. - * - * NB: An MMIO update of the plane base pointer will also - * generate a page-flip completion irq, i.e. every modeset - * is also accompanied by a spurious intel_prepare_page_flip(). */ spin_lock_irqsave(&dev->event_lock, flags); - if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) - atomic_inc_not_zero(&intel_crtc->unpin_work->pending); + work = intel_crtc->flip_work; + + if (work != NULL && + is_mmio_work(work) && + pageflip_finished(intel_crtc, work)) + page_flip_completed(intel_crtc); + spin_unlock_irqrestore(&dev->event_lock, flags); } -static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) +static inline void intel_mark_page_flip_active(struct intel_crtc *crtc, + struct intel_flip_work *work) { + work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc); + /* Ensure that the work item is consistent when activating it ... */ - smp_wmb(); - atomic_set(&work->pending, INTEL_FLIP_PENDING); - /* and that it is marked active as soon as the irq could fire. */ - smp_wmb(); + smp_mb__before_atomic(); + atomic_set(&work->pending, 1); } static int intel_gen2_queue_flip(struct drm_device *dev, @@ -11041,10 +11221,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, 0); /* aux display base address, unused */ - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11073,10 +11252,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, MI_NOOP); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11104,7 +11282,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset | + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far @@ -11115,7 +11293,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; intel_ring_emit(engine, pf | pipesrc); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11139,7 +11316,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -11151,7 +11328,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev, pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; intel_ring_emit(engine, pf | pipesrc); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11243,10 +11419,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); intel_ring_emit(engine, (MI_NOOP)); - intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } @@ -11264,7 +11439,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, if (engine == NULL) return true; - if (INTEL_INFO(engine->dev)->gen < 5) + if (INTEL_GEN(engine->i915) < 5) return false; if (i915.use_mmio_flip < 0) @@ -11283,7 +11458,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, unsigned int rotation, - struct intel_unpin_work *work) + struct intel_flip_work *work) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -11335,7 +11510,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, } static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, - struct intel_unpin_work *work) + struct intel_flip_work *work) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -11358,48 +11533,20 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, POSTING_READ(DSPSURF(intel_crtc->plane)); } -/* - * XXX: This is the temporary way to update the plane registers until we get - * around to using the usual plane update functions for MMIO flips - */ -static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) -{ - struct intel_crtc *crtc = mmio_flip->crtc; - struct intel_unpin_work *work; - - spin_lock_irq(&crtc->base.dev->event_lock); - work = crtc->unpin_work; - spin_unlock_irq(&crtc->base.dev->event_lock); - if (work == NULL) - return; - - intel_mark_page_flip_active(work); - - intel_pipe_update_start(crtc); - - if (INTEL_INFO(mmio_flip->i915)->gen >= 9) - skl_do_mmio_flip(crtc, mmio_flip->rotation, work); - else - /* use_mmio_flip() retricts MMIO flips to ilk+ */ - ilk_do_mmio_flip(crtc, work); - - intel_pipe_update_end(crtc); -} - -static void intel_mmio_flip_work_func(struct work_struct *work) +static void intel_mmio_flip_work_func(struct work_struct *w) { - struct intel_mmio_flip *mmio_flip = - container_of(work, struct intel_mmio_flip, work); + struct intel_flip_work *work = + container_of(w, struct intel_flip_work, mmio_work); + struct intel_crtc *crtc = to_intel_crtc(work->crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_framebuffer *intel_fb = - to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); + to_intel_framebuffer(crtc->base.primary->fb); struct drm_i915_gem_object *obj = intel_fb->obj; - if (mmio_flip->req) { - WARN_ON(__i915_wait_request(mmio_flip->req, + if (work->flip_queued_req) + WARN_ON(__i915_wait_request(work->flip_queued_req, false, NULL, - &mmio_flip->i915->rps.mmioflips)); - i915_gem_request_unreference__unlocked(mmio_flip->req); - } + &dev_priv->rps.mmioflips)); /* For framebuffer backed by dmabuf, wait for fence */ if (obj->base.dma_buf) @@ -11407,29 +11554,15 @@ static void intel_mmio_flip_work_func(struct work_struct *work) false, false, MAX_SCHEDULE_TIMEOUT) < 0); - intel_do_mmio_flip(mmio_flip); - kfree(mmio_flip); -} - -static int intel_queue_mmio_flip(struct drm_device *dev, - struct drm_crtc *crtc, - struct drm_i915_gem_object *obj) -{ - struct intel_mmio_flip *mmio_flip; - - mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL); - if (mmio_flip == NULL) - return -ENOMEM; - - mmio_flip->i915 = to_i915(dev); - mmio_flip->req = i915_gem_request_reference(obj->last_write_req); - mmio_flip->crtc = to_intel_crtc(crtc); - mmio_flip->rotation = crtc->primary->state->rotation; + intel_pipe_update_start(crtc); - INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); - schedule_work(&mmio_flip->work); + if (INTEL_GEN(dev_priv) >= 9) + skl_do_mmio_flip(crtc, work->rotation, work); + else + /* use_mmio_flip() retricts MMIO flips to ilk+ */ + ilk_do_mmio_flip(crtc, work); - return 0; + intel_pipe_update_end(crtc, work); } static int intel_default_queue_flip(struct drm_device *dev, @@ -11442,37 +11575,32 @@ static int intel_default_queue_flip(struct drm_device *dev, return -ENODEV; } -static bool __intel_pageflip_stall_check(struct drm_device *dev, - struct drm_crtc *crtc) +static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc, + struct intel_flip_work *work) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work = intel_crtc->unpin_work; - u32 addr; - - if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) - return true; + u32 addr, vblank; - if (atomic_read(&work->pending) < INTEL_FLIP_PENDING) + if (!atomic_read(&work->pending)) return false; - if (!work->enable_stall_check) - return false; + smp_rmb(); + vblank = intel_crtc_get_vblank_counter(intel_crtc); if (work->flip_ready_vblank == 0) { if (work->flip_queued_req && !i915_gem_request_completed(work->flip_queued_req, true)) return false; - work->flip_ready_vblank = drm_crtc_vblank_count(crtc); + work->flip_ready_vblank = vblank; } - if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) + if (vblank - work->flip_ready_vblank < 3) return false; /* Potential stall - if we see that the flip has happened, * assume a missed interrupt. */ - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); else addr = I915_READ(DSPADDR(intel_crtc->plane)); @@ -11484,12 +11612,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev, return addr == work->gtt_offset; } -void intel_check_page_flip(struct drm_device *dev, int pipe) +void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = dev_priv->dev; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work; + struct intel_flip_work *work; WARN_ON(!in_interrupt()); @@ -11497,16 +11625,20 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) return; spin_lock(&dev->event_lock); - work = intel_crtc->unpin_work; - if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { - WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", - work->flip_queued_vblank, drm_vblank_count(dev, pipe)); + work = intel_crtc->flip_work; + + if (work != NULL && !is_mmio_work(work) && + __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) { + WARN_ONCE(1, + "Kicking stuck page flip: queued at %d, now %d\n", + work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc)); page_flip_completed(intel_crtc); work = NULL; } - if (work != NULL && - drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) - intel_queue_rps_boost_for_request(dev, work->flip_queued_req); + + if (work != NULL && !is_mmio_work(work) && + intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1) + intel_queue_rps_boost_for_request(work->flip_queued_req); spin_unlock(&dev->event_lock); } @@ -11522,7 +11654,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *primary = crtc->primary; enum pipe pipe = intel_crtc->pipe; - struct intel_unpin_work *work; + struct intel_flip_work *work; struct intel_engine_cs *engine; bool mmio_flip; struct drm_i915_gem_request *request = NULL; @@ -11559,19 +11691,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->crtc = crtc; work->old_fb = old_fb; - INIT_WORK(&work->work, intel_unpin_work_fn); + INIT_WORK(&work->unpin_work, intel_unpin_work_fn); ret = drm_crtc_vblank_get(crtc); if (ret) goto free_work; - /* We borrow the event spin lock for protecting unpin_work */ + /* We borrow the event spin lock for protecting flip_work */ spin_lock_irq(&dev->event_lock); - if (intel_crtc->unpin_work) { + if (intel_crtc->flip_work) { /* Before declaring the flip queue wedged, check if * the hardware completed the operation behind our backs. */ - if (__intel_pageflip_stall_check(dev, crtc)) { + if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) { DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); page_flip_completed(intel_crtc); } else { @@ -11583,7 +11715,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return -EBUSY; } } - intel_crtc->unpin_work = work; + intel_crtc->flip_work = work; spin_unlock_irq(&dev->event_lock); if (atomic_read(&intel_crtc->unpin_work_count) >= 2) @@ -11638,6 +11770,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, */ if (!mmio_flip) { ret = i915_gem_object_sync(obj, engine, &request); + if (!ret && !request) { + request = i915_gem_request_alloc(engine, NULL); + ret = PTR_ERR_OR_ZERO(request); + } + if (ret) goto cleanup_pending; } @@ -11649,38 +11786,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj, 0); work->gtt_offset += intel_crtc->dspaddr_offset; + work->rotation = crtc->primary->state->rotation; if (mmio_flip) { - ret = intel_queue_mmio_flip(dev, crtc, obj); - if (ret) - goto cleanup_unpin; + INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); i915_gem_request_assign(&work->flip_queued_req, obj->last_write_req); - } else { - if (!request) { - request = i915_gem_request_alloc(engine, NULL); - if (IS_ERR(request)) { - ret = PTR_ERR(request); - goto cleanup_unpin; - } - } + schedule_work(&work->mmio_work); + } else { + i915_gem_request_assign(&work->flip_queued_req, request); ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, page_flip_flags); if (ret) goto cleanup_unpin; - i915_gem_request_assign(&work->flip_queued_req, request); - } + intel_mark_page_flip_active(intel_crtc, work); - if (request) i915_add_request_no_flush(request); + } - work->flip_queued_vblank = drm_crtc_vblank_count(crtc); - work->enable_stall_check = true; - - i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, + i915_gem_track_fb(intel_fb_obj(old_fb), obj, to_intel_plane(primary)->frontbuffer_bit); mutex_unlock(&dev->struct_mutex); @@ -11706,7 +11833,7 @@ cleanup: drm_framebuffer_unreference(work->old_fb); spin_lock_irq(&dev->event_lock); - intel_crtc->unpin_work = NULL; + intel_crtc->flip_work = NULL; spin_unlock_irq(&dev->event_lock); drm_crtc_vblank_put(crtc); @@ -11808,12 +11935,12 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane_state *old_plane_state = to_intel_plane_state(plane->state); - int idx = intel_crtc->base.base.id, ret; bool mode_changed = needs_modeset(crtc_state); bool was_crtc_enabled = crtc->state->active; bool is_crtc_enabled = crtc_state->active; bool turn_off, turn_on, visible, was_visible; struct drm_framebuffer *fb = plane_state->fb; + int ret; if (crtc_state && INTEL_INFO(dev)->gen >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) { @@ -11834,6 +11961,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, * Visibility is calculated as if the crtc was on, but * after scaler setup everything depends on it being off * when the crtc isn't active. + * + * FIXME this is wrong for watermarks. Watermarks should also + * be computed as if the pipe would be active. Perhaps move + * per-plane wm computation to the .check_plane() hook, and + * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) to_intel_plane_state(plane_state)->visible = visible = false; @@ -11847,11 +11979,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); - DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, - plane->base.id, fb ? fb->base.id : -1); + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", + intel_crtc->base.base.id, + intel_crtc->base.name, + plane->base.id, plane->name, + fb ? fb->base.id : -1); - DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", - plane->base.id, was_visible, visible, + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", + plane->base.id, plane->name, + was_visible, visible, turn_off, turn_on, mode_changed); if (turn_on) { @@ -12007,7 +12143,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } } else if (dev_priv->display.compute_intermediate_wm) { if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) - pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; + pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; } if (INTEL_INFO(dev)->gen >= 9) { @@ -12142,7 +12278,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, struct intel_plane_state *state; struct drm_framebuffer *fb; - DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, + DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n", + crtc->base.base.id, crtc->base.name, context, pipe_config, pipe_name(crtc->pipe)); DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); @@ -12243,29 +12380,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, state = to_intel_plane_state(plane->state); fb = state->base.fb; if (!fb) { - DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " - "disabled, scaler_id = %d\n", - plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", - plane->base.id, intel_plane->pipe, - (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1, - drm_plane_index(plane), state->scaler_id); + DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", + plane->base.id, plane->name, state->scaler_id); continue; } - DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", - plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", - plane->base.id, intel_plane->pipe, - crtc->base.primary == plane ? 0 : intel_plane->plane + 1, - drm_plane_index(plane)); - DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", - fb->base.id, fb->width, fb->height, fb->pixel_format); - DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", - state->scaler_id, - state->src.x1 >> 16, state->src.y1 >> 16, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16, - state->dst.x1, state->dst.y1, - drm_rect_width(&state->dst), drm_rect_height(&state->dst)); + DRM_DEBUG_KMS("[PLANE:%d:%s] enabled", + plane->base.id, plane->name); + DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s", + fb->base.id, fb->width, fb->height, + drm_get_format_name(fb->pixel_format)); + DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", + state->scaler_id, + state->src.x1 >> 16, state->src.y1 >> 16, + drm_rect_width(&state->src) >> 16, + drm_rect_height(&state->src) >> 16, + state->dst.x1, state->dst.y1, + drm_rect_width(&state->dst), + drm_rect_height(&state->dst)); } } @@ -12932,7 +13064,7 @@ verify_crtc_state(struct drm_crtc *crtc, pipe_config->base.crtc = crtc; pipe_config->base.state = old_state; - DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); @@ -13280,6 +13412,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state) intel_state->active_crtcs |= 1 << i; else intel_state->active_crtcs &= ~(1 << i); + + if (crtc_state->active != crtc->state->active) + intel_state->active_pipe_changes |= drm_crtc_mask(crtc); } /* @@ -13290,9 +13425,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * adjusted_mode bits in the crtc directly. */ if (dev_priv->display.modeset_calc_cdclk) { + if (!intel_state->cdclk_pll_vco) + intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco; + if (!intel_state->cdclk_pll_vco) + intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq; + ret = dev_priv->display.modeset_calc_cdclk(state); + if (ret < 0) + return ret; - if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) + if (intel_state->dev_cdclk != dev_priv->cdclk_freq || + intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) ret = intel_modeset_all_pipes(state); if (ret < 0) @@ -13316,38 +13459,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state) * phase. The code here should be run after the per-crtc and per-plane 'check' * handlers to ensure that all derived state has been updated. */ -static void calc_watermark_data(struct drm_atomic_state *state) +static int calc_watermark_data(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_crtc *crtc; - struct drm_crtc_state *cstate; - struct drm_plane *plane; - struct drm_plane_state *pstate; - - /* - * Calculate watermark configuration details now that derived - * plane/crtc state is all properly updated. - */ - drm_for_each_crtc(crtc, dev) { - cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?: - crtc->state; - - if (cstate->active) - intel_state->wm_config.num_pipes_active++; - } - drm_for_each_legacy_plane(plane, dev) { - pstate = drm_atomic_get_existing_plane_state(state, plane) ?: - plane->state; + struct drm_i915_private *dev_priv = to_i915(dev); - if (!to_intel_plane_state(pstate)->visible) - continue; + /* Is there platform-specific watermark information to calculate? */ + if (dev_priv->display.compute_global_watermarks) + return dev_priv->display.compute_global_watermarks(state); - intel_state->wm_config.sprites_enabled = true; - if (pstate->crtc_w != pstate->src_w >> 16 || - pstate->crtc_h != pstate->src_h >> 16) - intel_state->wm_config.sprites_scaled = true; - } + return 0; } /** @@ -13377,14 +13498,13 @@ static int intel_atomic_check(struct drm_device *dev, if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) crtc_state->mode_changed = true; - if (!crtc_state->enable) { - if (needs_modeset(crtc_state)) - any_ms = true; + if (!needs_modeset(crtc_state)) continue; - } - if (!needs_modeset(crtc_state)) + if (!crtc_state->enable) { + any_ms = true; continue; + } /* FIXME: For only active_changed we shouldn't need to do any * state recomputation at all. */ @@ -13394,8 +13514,11 @@ static int intel_atomic_check(struct drm_device *dev, return ret; ret = intel_modeset_pipe_config(crtc, pipe_config); - if (ret) + if (ret) { + intel_dump_pipe_config(to_intel_crtc(crtc), + pipe_config, "[failed]"); return ret; + } if (i915.fastboot && intel_pipe_config_compare(dev, @@ -13405,13 +13528,12 @@ static int intel_atomic_check(struct drm_device *dev, to_intel_crtc_state(crtc_state)->update_pipe = true; } - if (needs_modeset(crtc_state)) { + if (needs_modeset(crtc_state)) any_ms = true; - ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) - return ret; - } + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) + return ret; intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, needs_modeset(crtc_state) ? @@ -13431,9 +13553,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; intel_fbc_choose_crtc(dev_priv, state); - calc_watermark_data(state); - - return 0; + return calc_watermark_data(state); } static int intel_atomic_prepare_commit(struct drm_device *dev, @@ -13495,6 +13615,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, return ret; } +u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + + if (!dev->max_vblank_count) + return drm_accurate_vblank_count(&crtc->base); + + return dev->driver->get_vblank_counter(dev, crtc->pipe); +} + static void intel_atomic_wait_for_vblanks(struct drm_device *dev, struct drm_i915_private *dev_priv, unsigned crtc_mask) @@ -13596,8 +13726,9 @@ static int intel_atomic_commit(struct drm_device *dev, return ret; } - drm_atomic_helper_swap_state(dev, state); - dev_priv->wm.config = intel_state->wm_config; + drm_atomic_helper_swap_state(state, true); + dev_priv->wm.distrust_bios_wm = false; + dev_priv->wm.skl_results = intel_state->wm_results; intel_shared_dpll_commit(state); if (intel_state->modeset) { @@ -13653,7 +13784,8 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_update_legacy_modeset_state(state->dev, state); if (dev_priv->display.modeset_commit_cdclk && - intel_state->dev_cdclk != dev_priv->cdclk_freq) + (intel_state->dev_cdclk != dev_priv->cdclk_freq || + intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) dev_priv->display.modeset_commit_cdclk(state); intel_modeset_verify_disabled(dev); @@ -13749,8 +13881,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc) state = drm_atomic_state_alloc(dev); if (!state) { - DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", - crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory", + crtc->base.id, crtc->name); return; } @@ -14006,7 +14138,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - intel_pipe_update_end(intel_crtc); + intel_pipe_update_end(intel_crtc, NULL); } /** @@ -14018,9 +14150,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, */ void intel_plane_destroy(struct drm_plane *plane) { - struct intel_plane *intel_plane = to_intel_plane(plane); + if (!plane) + return; + drm_plane_cleanup(plane); - kfree(intel_plane); + kfree(to_intel_plane(plane)); } const struct drm_plane_funcs intel_plane_funcs = { @@ -14092,10 +14226,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, primary->disable_plane = i9xx_disable_primary_plane; } - ret = drm_universal_plane_init(dev, &primary->base, 0, - &intel_plane_funcs, - intel_primary_formats, num_formats, - DRM_PLANE_TYPE_PRIMARY, NULL); + if (INTEL_INFO(dev)->gen >= 9) + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "plane 1%c", pipe_name(pipe)); + else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "primary %c", pipe_name(pipe)); + else + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, + "plane %c", plane_name(primary->plane)); if (ret) goto fail; @@ -14253,7 +14401,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, &intel_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), - DRM_PLANE_TYPE_CURSOR, NULL); + DRM_PLANE_TYPE_CURSOR, + "cursor %c", pipe_name(pipe)); if (ret) goto fail; @@ -14338,7 +14487,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) goto fail; ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, - cursor, &intel_crtc_funcs, NULL); + cursor, &intel_crtc_funcs, + "pipe %c", pipe_name(pipe)); if (ret) goto fail; @@ -14372,10 +14522,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) return; fail: - if (primary) - drm_plane_cleanup(primary); - if (cursor) - drm_plane_cleanup(cursor); + intel_plane_destroy(primary); + intel_plane_destroy(cursor); kfree(crtc_state); kfree(intel_crtc); } @@ -14554,6 +14702,8 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_D) & DP_DETECTED) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + bool has_edp; + /* * The DP_DETECTED bit is the latched state of the DDC * SDA pin at boot. However since eDP doesn't require DDC @@ -14563,19 +14713,17 @@ static void intel_setup_outputs(struct drm_device *dev) * eDP ports. Consult the VBT as well as DP_DETECTED to * detect eDP ports. */ - if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && - !intel_dp_is_edp(dev, PORT_B)) + has_edp = intel_dp_is_edp(dev, PORT_B); + if (I915_READ(VLV_DP_B) & DP_DETECTED || has_edp) + has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); + if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && !has_edp) intel_hdmi_init(dev, VLV_HDMIB, PORT_B); - if (I915_READ(VLV_DP_B) & DP_DETECTED || - intel_dp_is_edp(dev, PORT_B)) - intel_dp_init(dev, VLV_DP_B, PORT_B); - if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && - !intel_dp_is_edp(dev, PORT_C)) + has_edp = intel_dp_is_edp(dev, PORT_C); + if (I915_READ(VLV_DP_C) & DP_DETECTED || has_edp) + has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); + if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && !has_edp) intel_hdmi_init(dev, VLV_HDMIC, PORT_C); - if (I915_READ(VLV_DP_C) & DP_DETECTED || - intel_dp_is_edp(dev, PORT_C)) - intel_dp_init(dev, VLV_DP_C, PORT_C); if (IS_CHERRYVIEW(dev)) { /* eDP not supported on port D, so don't check VBT */ @@ -15050,12 +15198,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; - if (IS_BROADWELL(dev_priv)) { - dev_priv->display.modeset_commit_cdclk = - broadwell_modeset_commit_cdclk; - dev_priv->display.modeset_calc_cdclk = - broadwell_modeset_calc_cdclk; - } + } + + if (IS_BROADWELL(dev_priv)) { + dev_priv->display.modeset_commit_cdclk = + broadwell_modeset_commit_cdclk; + dev_priv->display.modeset_calc_cdclk = + broadwell_modeset_calc_cdclk; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->display.modeset_commit_cdclk = valleyview_modeset_commit_cdclk; @@ -15066,6 +15215,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) broxton_modeset_commit_cdclk; dev_priv->display.modeset_calc_cdclk = broxton_modeset_calc_cdclk; + } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + dev_priv->display.modeset_commit_cdclk = + skl_modeset_commit_cdclk; + dev_priv->display.modeset_calc_cdclk = + skl_modeset_calc_cdclk; } switch (INTEL_INFO(dev_priv)->gen) { @@ -15293,7 +15447,7 @@ void intel_modeset_init_hw(struct drm_device *dev) dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; intel_init_clock_gating(dev); - intel_enable_gt_powersave(dev); + intel_enable_gt_powersave(dev_priv); } /* @@ -15363,7 +15517,6 @@ retry: } /* Write calculated watermark values back */ - to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config; for_each_crtc_in_state(state, crtc, cstate, i) { struct intel_crtc_state *cs = to_intel_crtc_state(cstate); @@ -15461,11 +15614,13 @@ void intel_modeset_init(struct drm_device *dev) } intel_update_czclk(dev_priv); - intel_update_rawclk(dev_priv); intel_update_cdclk(dev); intel_shared_dpll_init(dev); + if (dev_priv->max_cdclk_freq == 0) + intel_update_max_cdclk(dev); + /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); @@ -15606,8 +15761,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { bool plane; - DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", - crtc->base.base.id); + DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", + crtc->base.base.id, crtc->base.name); /* Pipe has the wrong plane attached and the plane is active. * Temporarily change the plane mapping and disable everything @@ -15775,26 +15930,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (crtc_state->base.active) { dev_priv->active_crtcs |= 1 << crtc->pipe; - if (IS_BROADWELL(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) pixclk = ilk_pipe_pixel_rate(crtc_state); - - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (crtc_state->ips_enabled) - pixclk = DIV_ROUND_UP(pixclk * 100, 95); - } else if (IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv) || - IS_BROXTON(dev_priv)) + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) pixclk = crtc_state->base.adjusted_mode.crtc_clock; else WARN_ON(dev_priv->display.modeset_calc_cdclk); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) + pixclk = DIV_ROUND_UP(pixclk * 100, 95); } dev_priv->min_pixclk[crtc->pipe] = pixclk; readout_plane_state(crtc); - DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", - crtc->base.base.id, + DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", + crtc->base.base.id, crtc->base.name, crtc->active ? "enabled" : "disabled"); } @@ -16025,15 +16178,16 @@ retry: void intel_modeset_gem_init(struct drm_device *dev) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; struct drm_i915_gem_object *obj; int ret; - intel_init_gt_powersave(dev); + intel_init_gt_powersave(dev_priv); intel_modeset_init_hw(dev); - intel_setup_overlay(dev); + intel_setup_overlay(dev_priv); /* * Make sure any fbs we allocated at startup are properly @@ -16076,7 +16230,7 @@ void intel_modeset_cleanup(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_connector *connector; - intel_disable_gt_powersave(dev); + intel_disable_gt_powersave(dev_priv); intel_backlight_unregister(dev); @@ -16106,21 +16260,13 @@ void intel_modeset_cleanup(struct drm_device *dev) drm_mode_config_cleanup(dev); - intel_cleanup_overlay(dev); + intel_cleanup_overlay(dev_priv); - intel_cleanup_gt_powersave(dev); + intel_cleanup_gt_powersave(dev_priv); intel_teardown_gmbus(dev); } -/* - * Return which encoder is currently attached for connector. - */ -struct drm_encoder *intel_best_encoder(struct drm_connector *connector) -{ - return &intel_attached_encoder(connector)->base; -} - void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder) { @@ -16204,9 +16350,8 @@ struct intel_display_error_state { }; struct intel_display_error_state * -intel_display_capture_error_state(struct drm_device *dev) +intel_display_capture_error_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_display_error_state *error; int transcoders[] = { TRANSCODER_A, @@ -16216,14 +16361,14 @@ intel_display_capture_error_state(struct drm_device *dev) }; int i; - if (INTEL_INFO(dev)->num_pipes == 0) + if (INTEL_INFO(dev_priv)->num_pipes == 0) return NULL; error = kzalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); for_each_pipe(dev_priv, i) { @@ -16239,25 +16384,25 @@ intel_display_capture_error_state(struct drm_device *dev) error->plane[i].control = I915_READ(DSPCNTR(i)); error->plane[i].stride = I915_READ(DSPSTRIDE(i)); - if (INTEL_INFO(dev)->gen <= 3) { + if (INTEL_GEN(dev_priv) <= 3) { error->plane[i].size = I915_READ(DSPSIZE(i)); error->plane[i].pos = I915_READ(DSPPOS(i)); } - if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) + if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) error->plane[i].addr = I915_READ(DSPADDR(i)); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { error->plane[i].surface = I915_READ(DSPSURF(i)); error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } error->pipe[i].source = I915_READ(PIPESRC(i)); - if (HAS_GMCH_DISPLAY(dev)) + if (HAS_GMCH_DISPLAY(dev_priv)) error->pipe[i].stat = I915_READ(PIPESTAT(i)); } /* Note: this does not include DSI transcoders. */ - error->num_transcoders = INTEL_INFO(dev)->num_pipes; + error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; if (HAS_DDI(dev_priv)) error->num_transcoders++; /* Account for eDP. */ diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f192f58708c2..be083519dac9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev, enum pipe pipe); static void intel_dp_unset_edid(struct intel_dp *intel_dp); -static unsigned int intel_dp_unused_lane_mask(int lane_count) -{ - return ~((1 << lane_count) - 1) & 0xf; -} - static int intel_dp_max_link_bw(struct intel_dp *intel_dp) { @@ -775,6 +770,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_TIME_OUT_1600us | DP_AUX_CH_CTL_RECEIVE_ERROR | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); } @@ -1582,6 +1578,27 @@ found: &pipe_config->dp_m2_n2); } + /* + * DPLL0 VCO may need to be adjusted to get the correct + * clock for eDP. This will affect cdclk as well. + */ + if (is_edp(intel_dp) && + (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) { + int vco; + + switch (pipe_config->port_clock / 2) { + case 108000: + case 216000: + vco = 8640000; + break; + default: + vco = 8100000; + break; + } + + to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco; + } + if (!HAS_DDI(dev)) intel_dp_set_clock(encoder, pipe_config); @@ -2460,50 +2477,6 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder) intel_dp_link_down(intel_dp); } -static void chv_data_lane_soft_reset(struct intel_encoder *encoder, - bool reset) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - enum pipe pipe = crtc->pipe; - uint32_t val; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); - } - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); - } -} - static void chv_post_disable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); @@ -2811,266 +2784,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) static void vlv_pre_enable_dp(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); - val = 0; - if (pipe) - val |= (1<<21); - else - val &= ~(1<<21); - val |= 0x001000c4; - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); - - mutex_unlock(&dev_priv->sb_lock); + vlv_phy_pre_encoder_enable(encoder); intel_enable_dp(encoder); } static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - intel_dp_prepare(encoder); - /* Program Tx lane resets to default */ - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), - DPIO_PCS_TX_LANE2_RESET | - DPIO_PCS_TX_LANE1_RESET); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), - DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | - DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | - (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | - DPIO_PCS_CLK_SOFT_RESET); - - /* Fix up inter-pair skew failure */ - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); - mutex_unlock(&dev_priv->sb_lock); + vlv_phy_pre_pll_enable(encoder); } static void chv_pre_enable_dp(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - int data, i, stagger; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - /* allow hardware to manage TX FIFO reset source */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - } - - /* Program Tx lane latency optimal setting*/ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - /* Set the upar bit */ - if (intel_crtc->config->lane_count == 1) - data = 0x0; - else - data = (i == 1) ? 0x0 : 0x1; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), - data << DPIO_UPAR_SHIFT); - } - - /* Data lane stagger programming */ - if (intel_crtc->config->port_clock > 270000) - stagger = 0x18; - else if (intel_crtc->config->port_clock > 135000) - stagger = 0xd; - else if (intel_crtc->config->port_clock > 67500) - stagger = 0x7; - else if (intel_crtc->config->port_clock > 33750) - stagger = 0x4; - else - stagger = 0x2; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - } - - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(6) | - DPIO_TX2_STAGGER_MULT(0)); - - if (intel_crtc->config->lane_count > 2) { - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(7) | - DPIO_TX2_STAGGER_MULT(5)); - } - - /* Deassert data lane reset */ - chv_data_lane_soft_reset(encoder, false); - - mutex_unlock(&dev_priv->sb_lock); + chv_phy_pre_encoder_enable(encoder); intel_enable_dp(encoder); /* Second common lane will stay alive on its own now */ - if (dport->release_cl2_override) { - chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); - dport->release_cl2_override = false; - } + chv_phy_release_cl2_override(encoder); } static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); - enum pipe pipe = intel_crtc->pipe; - unsigned int lane_mask = - intel_dp_unused_lane_mask(intel_crtc->config->lane_count); - u32 val; - intel_dp_prepare(encoder); - /* - * Must trick the second common lane into life. - * Otherwise we can't even access the PLL. - */ - if (ch == DPIO_CH0 && pipe == PIPE_B) - dport->release_cl2_override = - !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); - - chv_phy_powergate_lanes(encoder, true, lane_mask); - - mutex_lock(&dev_priv->sb_lock); - - /* Assert data lane reset */ - chv_data_lane_soft_reset(encoder, true); - - /* program left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA1_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA1_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA2_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA2_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - /* program clock channel usage */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); - } - - /* - * This a a bit weird since generally CL - * matches the pipe, but here we need to - * pick the CL based on the port. - */ - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); - if (pipe != PIPE_B) - val &= ~CHV_CMN_USEDCLKCHANNEL; - else - val |= CHV_CMN_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); - - mutex_unlock(&dev_priv->sb_lock); + chv_phy_pre_pll_enable(encoder); } static void chv_dp_post_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - /* disable left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - mutex_unlock(&dev_priv->sb_lock); - - /* - * Leave the power down bit cleared for at least one - * lane so that chv_powergate_phy_ch() will power - * on something when the channel is otherwise unused. - * When the port is off and the override is removed - * the lanes power down anyway, so otherwise it doesn't - * really matter what the state of power down bits is - * after this. - */ - chv_phy_powergate_lanes(encoder, false, 0x0); + chv_phy_post_pll_disable(encoder); } /* @@ -3178,16 +2923,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct intel_crtc *intel_crtc = - to_intel_crtc(dport->base.base.crtc); + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; unsigned long demph_reg_value, preemph_reg_value, uniqtranscale_reg_value; uint8_t train_set = intel_dp->train_set[0]; - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -3262,37 +3001,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) return 0; } - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), - uniqtranscale_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000); - mutex_unlock(&dev_priv->sb_lock); + vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value, + uniqtranscale_reg_value, 0); return 0; } -static bool chv_need_uniq_trans_scale(uint8_t train_set) -{ - return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 && - (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3; -} - static uint32_t chv_signal_levels(struct intel_dp *intel_dp) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc); - u32 deemph_reg_value, margin_reg_value, val; + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + u32 deemph_reg_value, margin_reg_value; + bool uniq_trans_scale = false; uint8_t train_set = intel_dp->train_set[0]; - enum dpio_channel ch = vlv_dport_to_channel(dport); - enum pipe pipe = intel_crtc->pipe; - int i; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPH_LEVEL_0: @@ -3312,7 +3032,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: deemph_reg_value = 128; margin_reg_value = 154; - /* FIXME extra to set for 1200 */ + uniq_trans_scale = true; break; default: return 0; @@ -3364,88 +3084,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp) return 0; } - mutex_lock(&dev_priv->sb_lock); - - /* Clear calc init */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - } - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); - } - - /* Program swing deemph */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); - val &= ~DPIO_SWING_DEEMPH9P5_MASK; - val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); - } - - /* Program swing margin */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); - - val &= ~DPIO_SWING_MARGIN000_MASK; - val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; - - /* - * Supposedly this value shouldn't matter when unique transition - * scale is disabled, but in fact it does matter. Let's just - * always program the same value and hope it's OK. - */ - val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); - val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; - - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); - } - - /* - * The document said it needs to set bit 27 for ch0 and bit 26 - * for ch1. Might be a typo in the doc. - * For now, for this unique transition scale selection, set bit - * 27 for ch0 and ch1. - */ - for (i = 0; i < intel_crtc->config->lane_count; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); - if (chv_need_uniq_trans_scale(train_set)) - val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; - else - val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); - } - - /* Start swing calculation */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - if (intel_crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - } - - mutex_unlock(&dev_priv->sb_lock); + chv_set_phy_signal_level(encoder, deemph_reg_value, + margin_reg_value, uniq_trans_scale); return 0; } @@ -3714,7 +3354,6 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint8_t rev; if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, sizeof(intel_dp->dpcd)) < 0) @@ -3771,6 +3410,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) DRM_DEBUG_KMS("PSR2 %s on sink", dev_priv->psr.psr2_support ? "supported" : "not supported"); } + + /* Read the eDP Display control capabilities registers */ + memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd)); + if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && + (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, + intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == + sizeof(intel_dp->edp_dpcd))) + DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), + intel_dp->edp_dpcd); } DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", @@ -3778,10 +3426,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) yesno(drm_dp_tps3_supported(intel_dp->dpcd))); /* Intermediate frequency support */ - if (is_edp(intel_dp) && - (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && - (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && - (rev >= 0x03)) { /* eDp v1.4 or higher */ + if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */ __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; int i; @@ -4935,7 +4580,6 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { .get_modes = intel_dp_get_modes, .mode_valid = intel_dp_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_dp_enc_funcs = { @@ -5590,14 +5234,14 @@ void intel_edp_drrs_flush(struct drm_device *dev, * * DRRS saves power by switching to low RR based on usage scenarios. * - * eDP DRRS:- - * The implementation is based on frontbuffer tracking implementation. - * When there is a disturbance on the screen triggered by user activity or a - * periodic system activity, DRRS is disabled (RR is changed to high RR). - * When there is no movement on screen, after a timeout of 1 second, a switch - * to low RR is made. - * For integration with frontbuffer tracking code, - * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called. + * The implementation is based on frontbuffer tracking implementation. When + * there is a disturbance on the screen triggered by user activity or a periodic + * system activity, DRRS is disabled (RR is changed to high RR). When there is + * no movement on screen, after a timeout of 1 second, a switch to low RR is + * made. + * + * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate() + * and intel_edp_drrs_flush() are called. * * DRRS can be further extended to support other internal panels and also * the scenario of video playback wherein RR is set based on the rate @@ -5725,8 +5369,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); - if (fixed_mode) + if (fixed_mode) { fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; + } } mutex_unlock(&dev->mode_config.mutex); @@ -5923,9 +5570,9 @@ fail: return false; } -void -intel_dp_init(struct drm_device *dev, - i915_reg_t output_reg, enum port port) +bool intel_dp_init(struct drm_device *dev, + i915_reg_t output_reg, + enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; @@ -5935,7 +5582,7 @@ intel_dp_init(struct drm_device *dev, intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); if (!intel_dig_port) - return; + return false; intel_connector = intel_connector_alloc(); if (!intel_connector) @@ -5945,7 +5592,7 @@ intel_dp_init(struct drm_device *dev, encoder = &intel_encoder->base; if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, - DRM_MODE_ENCODER_TMDS, NULL)) + DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port))) goto err_encoder_init; intel_encoder->compute_config = intel_dp_compute_config; @@ -5992,7 +5639,7 @@ intel_dp_init(struct drm_device *dev, if (!intel_dp_init_connector(intel_dig_port, intel_connector)) goto err_init_connector; - return; + return true; err_init_connector: drm_encoder_cleanup(encoder); @@ -6000,8 +5647,7 @@ err_encoder_init: kfree(intel_connector); err_connector_alloc: kfree(intel_dig_port); - - return; + return false; } void intel_dp_mst_suspend(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c new file mode 100644 index 000000000000..6532e226db29 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -0,0 +1,172 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "intel_drv.h" + +static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) +{ + uint8_t reg_val = 0; + + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +/* + * Read the current backlight value from DPCD register(s) based + * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported + */ +static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + uint8_t read_val[2] = { 0x0 }; + uint16_t level = 0; + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + level = read_val[0]; + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +/* + * Sends the current backlight level over the aux channel, checking if its using + * 8-bit or 16 bit value (MSB and LSB) + */ +static void +intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + uint8_t vals[2] = { 0x0 }; + + vals[0] = level; + + /* Write the MSB and/or LSB */ + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return; + } +} + +static void intel_dp_aux_enable_backlight(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + uint8_t dpcd_buf = 0; + + set_aux_backlight_enable(intel_dp, true); + + if ((drm_dp_dpcd_readb(&intel_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) && + ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) == + DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET)) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD)); +} + +static void intel_dp_aux_disable_backlight(struct intel_connector *connector) +{ + set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false); +} + +static int intel_dp_aux_setup_backlight(struct intel_connector *connector, + enum pipe pipe) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + struct intel_panel *panel = &connector->panel; + + intel_dp_aux_enable_backlight(connector); + + if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + panel->backlight.max = 0xFFFF; + else + panel->backlight.max = 0xFF; + + panel->backlight.min = 0; + panel->backlight.level = intel_dp_aux_get_backlight(connector); + + panel->backlight.enabled = panel->backlight.level != 0; + + return 0; +} + +static bool +intel_dp_aux_display_control_capable(struct intel_connector *connector) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); + + /* Check the eDP Display control capabilities registers to determine if + * the panel can support backlight control over the aux channel + */ + if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) && + !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) || + (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + return true; + } + return false; +} + +int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) +{ + struct intel_panel *panel = &intel_connector->panel; + + if (!i915.enable_dpcd_backlight) + return -ENODEV; + + if (!intel_dp_aux_display_control_capable(intel_connector)) + return -ENODEV; + + panel->backlight.setup = intel_dp_aux_setup_backlight; + panel->backlight.enable = intel_dp_aux_enable_backlight; + panel->backlight.disable = intel_dp_aux_disable_backlight; + panel->backlight.set = intel_dp_aux_set_backlight; + panel->backlight.get = intel_dp_aux_get_backlight; + + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 7a34090cef34..f62ca9a126b3 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -534,7 +534,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_mst->primary = intel_dig_port; drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, - DRM_MODE_ENCODER_DPMST, NULL); + DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe)); intel_encoder->type = INTEL_OUTPUT_DP_MST; intel_encoder->crtc_mask = 0x7; diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c new file mode 100644 index 000000000000..288da35572b4 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -0,0 +1,470 @@ +/* + * Copyright © 2014-2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "intel_drv.h" + +void chv_set_phy_signal_level(struct intel_encoder *encoder, + u32 deemph_reg_value, u32 margin_reg_value, + bool uniq_trans_scale) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + enum pipe pipe = intel_crtc->pipe; + u32 val; + int i; + + mutex_lock(&dev_priv->sb_lock); + + /* Clear calc init */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); + val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); + val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); + val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); + val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); + val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); + val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); + } + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); + val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); + val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); + val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); + val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); + } + + /* Program swing deemph */ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); + val &= ~DPIO_SWING_DEEMPH9P5_MASK; + val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); + } + + /* Program swing margin */ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); + + val &= ~DPIO_SWING_MARGIN000_MASK; + val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT; + + /* + * Supposedly this value shouldn't matter when unique transition + * scale is disabled, but in fact it does matter. Let's just + * always program the same value and hope it's OK. + */ + val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); + val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; + + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); + } + + /* + * The document said it needs to set bit 27 for ch0 and bit 26 + * for ch1. Might be a typo in the doc. + * For now, for this unique transition scale selection, set bit + * 27 for ch0 and ch1. + */ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); + if (uniq_trans_scale) + val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; + else + val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); + } + + /* Start swing calculation */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); + val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); + val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); + } + + mutex_unlock(&dev_priv->sb_lock); + +} + +void chv_data_lane_soft_reset(struct intel_encoder *encoder, + bool reset) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + enum pipe pipe = crtc->pipe; + uint32_t val; + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); + if (reset) + val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); + else + val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); + + if (crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); + if (reset) + val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); + else + val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); + } + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); + val |= CHV_PCS_REQ_SOFTRESET_EN; + if (reset) + val &= ~DPIO_PCS_CLK_SOFT_RESET; + else + val |= DPIO_PCS_CLK_SOFT_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); + + if (crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); + val |= CHV_PCS_REQ_SOFTRESET_EN; + if (reset) + val &= ~DPIO_PCS_CLK_SOFT_RESET; + else + val |= DPIO_PCS_CLK_SOFT_RESET; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); + } +} + +void chv_phy_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + enum pipe pipe = intel_crtc->pipe; + unsigned int lane_mask = + intel_dp_unused_lane_mask(intel_crtc->config->lane_count); + u32 val; + + /* + * Must trick the second common lane into life. + * Otherwise we can't even access the PLL. + */ + if (ch == DPIO_CH0 && pipe == PIPE_B) + dport->release_cl2_override = + !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); + + chv_phy_powergate_lanes(encoder, true, lane_mask); + + mutex_lock(&dev_priv->sb_lock); + + /* Assert data lane reset */ + chv_data_lane_soft_reset(encoder, true); + + /* program left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA1_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA1_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + if (ch == DPIO_CH0) + val |= CHV_BUFLEFTENA2_FORCE; + if (ch == DPIO_CH1) + val |= CHV_BUFRIGHTENA2_FORCE; + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + + /* program clock channel usage */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); + val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; + if (pipe != PIPE_B) + val &= ~CHV_PCS_USEDCLKCHANNEL; + else + val |= CHV_PCS_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); + } + + /* + * This a a bit weird since generally CL + * matches the pipe, but here we need to + * pick the CL based on the port. + */ + val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); + if (pipe != PIPE_B) + val &= ~CHV_CMN_USEDCLKCHANNEL; + else + val |= CHV_CMN_USEDCLKCHANNEL; + vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); + + mutex_unlock(&dev_priv->sb_lock); +} + +void chv_phy_pre_encoder_enable(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel ch = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + int data, i, stagger; + u32 val; + + mutex_lock(&dev_priv->sb_lock); + + /* allow hardware to manage TX FIFO reset source */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); + val &= ~DPIO_LANEDESKEW_STRAP_OVRD; + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); + val &= ~DPIO_LANEDESKEW_STRAP_OVRD; + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); + } + + /* Program Tx lane latency optimal setting*/ + for (i = 0; i < intel_crtc->config->lane_count; i++) { + /* Set the upar bit */ + if (intel_crtc->config->lane_count == 1) + data = 0x0; + else + data = (i == 1) ? 0x0 : 0x1; + vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), + data << DPIO_UPAR_SHIFT); + } + + /* Data lane stagger programming */ + if (intel_crtc->config->port_clock > 270000) + stagger = 0x18; + else if (intel_crtc->config->port_clock > 135000) + stagger = 0xd; + else if (intel_crtc->config->port_clock > 67500) + stagger = 0x7; + else if (intel_crtc->config->port_clock > 33750) + stagger = 0x4; + else + stagger = 0x2; + + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); + val |= DPIO_TX2_STAGGER_MASK(0x1f); + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); + + if (intel_crtc->config->lane_count > 2) { + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); + val |= DPIO_TX2_STAGGER_MASK(0x1f); + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); + } + + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), + DPIO_LANESTAGGER_STRAP(stagger) | + DPIO_LANESTAGGER_STRAP_OVRD | + DPIO_TX1_STAGGER_MASK(0x1f) | + DPIO_TX1_STAGGER_MULT(6) | + DPIO_TX2_STAGGER_MULT(0)); + + if (intel_crtc->config->lane_count > 2) { + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), + DPIO_LANESTAGGER_STRAP(stagger) | + DPIO_LANESTAGGER_STRAP_OVRD | + DPIO_TX1_STAGGER_MASK(0x1f) | + DPIO_TX1_STAGGER_MULT(7) | + DPIO_TX2_STAGGER_MULT(5)); + } + + /* Deassert data lane reset */ + chv_data_lane_soft_reset(encoder, false); + + mutex_unlock(&dev_priv->sb_lock); +} + +void chv_phy_release_cl2_override(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (dport->release_cl2_override) { + chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); + dport->release_cl2_override = false; + } +} + +void chv_phy_post_pll_disable(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; + u32 val; + + mutex_lock(&dev_priv->sb_lock); + + /* disable left/right clock distribution */ + if (pipe != PIPE_B) { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); + val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); + } else { + val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); + val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); + vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); + } + + mutex_unlock(&dev_priv->sb_lock); + + /* + * Leave the power down bit cleared for at least one + * lane so that chv_powergate_phy_ch() will power + * on something when the channel is otherwise unused. + * When the port is off and the override is removed + * the lanes power down anyway, so otherwise it doesn't + * really matter what the state of power down bits is + * after this. + */ + chv_phy_powergate_lanes(encoder, false, 0x0); +} + +void vlv_set_phy_signal_level(struct intel_encoder *encoder, + u32 demph_reg_value, u32 preemph_reg_value, + u32 uniqtranscale_reg_value, u32 tx3_demph) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + + mutex_lock(&dev_priv->sb_lock); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), + uniqtranscale_reg_value); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); + + if (tx3_demph) + vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph); + + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); + mutex_unlock(&dev_priv->sb_lock); +} + +void vlv_phy_pre_pll_enable(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + + /* Program Tx lane resets to default */ + mutex_lock(&dev_priv->sb_lock); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), + DPIO_PCS_TX_LANE2_RESET | + DPIO_PCS_TX_LANE1_RESET); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), + DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | + DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | + (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | + DPIO_PCS_CLK_SOFT_RESET); + + /* Fix up inter-pair skew failure */ + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); + vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); + mutex_unlock(&dev_priv->sb_lock); +} + +void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + u32 val; + + mutex_lock(&dev_priv->sb_lock); + + /* Enable clock channels for this port */ + val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); + val = 0; + if (pipe) + val |= (1<<21); + else + val &= ~(1<<21); + val |= 0x001000c4; + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); + + /* Program lane clock */ + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); + + mutex_unlock(&dev_priv->sb_lock); +} + +void vlv_phy_reset_lanes(struct intel_encoder *encoder) +{ + struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); + enum dpio_channel port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + + mutex_lock(&dev_priv->sb_lock); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); + vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); + mutex_unlock(&dev_priv->sb_lock); +} diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 3ac705936b04..c0eff1571731 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc, if (memcmp(&crtc_state->dpll_hw_state, &shared_dpll[i].hw_state, sizeof(crtc_state->dpll_hw_state)) == 0) { - DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n", - crtc->base.base.id, pll->name, + DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", + crtc->base.base.id, crtc->base.name, pll->name, shared_dpll[i].crtc_mask, pll->active_mask); return pll; @@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc, for (i = range_min; i <= range_max; i++) { pll = &dev_priv->shared_dplls[i]; if (shared_dpll[i].crtc_mask == 0) { - DRM_DEBUG_KMS("CRTC:%d allocated %s\n", - crtc->base.base.id, pll->name); + DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n", + crtc->base.base.id, crtc->base.name, pll->name); return pll; } } @@ -358,14 +358,17 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, i = (enum intel_dpll_id) crtc->pipe; pll = &dev_priv->shared_dplls[i]; - DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", - crtc->base.base.id, pll->name); + DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", + crtc->base.base.id, crtc->base.name, pll->name); } else { pll = intel_find_shared_dpll(crtc, crtc_state, DPLL_ID_PCH_PLL_A, DPLL_ID_PCH_PLL_B); } + if (!pll) + return NULL; + /* reference the pll */ intel_reference_shared_dpll(pll, crtc_state); @@ -1236,9 +1239,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, case 162000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); break; - /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which - results in CDCLK change. Need to handle the change of CDCLK by - disabling pipes and re-enabling them */ case 108000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); break; @@ -1508,7 +1508,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, int clock = crtc_state->port_clock; if (encoder->type == INTEL_OUTPUT_HDMI) { - intel_clock_t best_clock; + struct dpll best_clock; /* Calculate HDMI div */ /* @@ -1613,8 +1613,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, i = (enum intel_dpll_id) intel_dig_port->port; pll = intel_get_shared_dpll_by_id(dev_priv, i); - DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", - crtc->base.base.id, pll->name); + DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", + crtc->base.base.id, crtc->base.name, pll->name); intel_reference_shared_dpll(pll, crtc_state); @@ -1633,18 +1633,10 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { static void intel_ddi_pll_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t val = I915_READ(LCPLL_CTL); - - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { - int cdclk_freq; - - cdclk_freq = dev_priv->display.get_display_clock_speed(dev); - dev_priv->skl_boot_cdclk = cdclk_freq; - if (skl_sanitize_cdclk(dev_priv)) - DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n"); - if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) - DRM_ERROR("LCPLL1 is disabled\n"); - } else if (!IS_BROXTON(dev_priv)) { + + if (INTEL_GEN(dev_priv) < 9) { + uint32_t val = I915_READ(LCPLL_CTL); + /* * The LCPLL register should be turned on by the BIOS. For now * let's just check its state and print errors in case diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a28b4aac1e02..270da8de0acf 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -266,7 +266,7 @@ struct intel_connector { struct intel_dp *mst_port; }; -typedef struct dpll { +struct dpll { /* given values */ int n; int m1, m2; @@ -276,7 +276,7 @@ typedef struct dpll { int vco; int m; int p; -} intel_clock_t; +}; struct intel_atomic_state { struct drm_atomic_state base; @@ -291,17 +291,32 @@ struct intel_atomic_state { bool dpll_set, modeset; + /* + * Does this transaction change the pipes that are active? This mask + * tracks which CRTC's have changed their active state at the end of + * the transaction (not counting the temporary disable during modesets). + * This mask should only be non-zero when intel_state->modeset is true, + * but the converse is not necessarily true; simply changing a mode may + * not flip the final active status of any CRTC's + */ + unsigned int active_pipe_changes; + unsigned int active_crtcs; unsigned int min_pixclk[I915_MAX_PIPES]; + /* SKL/KBL Only */ + unsigned int cdclk_pll_vco; + struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; - struct intel_wm_config wm_config; /* * Current watermarks can't be trusted during hardware readout, so * don't bother calculating intermediate watermarks. */ bool skip_intermediate_wm; + + /* Gen9+ only */ + struct skl_wm_values wm_results; }; struct intel_plane_state { @@ -405,6 +420,48 @@ struct skl_pipe_wm { uint32_t linetime; }; +struct intel_crtc_wm_state { + union { + struct { + /* + * Intermediate watermarks; these can be + * programmed immediately since they satisfy + * both the current configuration we're + * switching away from and the new + * configuration we're switching to. + */ + struct intel_pipe_wm intermediate; + + /* + * Optimal watermarks, programmed post-vblank + * when this state is committed. + */ + struct intel_pipe_wm optimal; + } ilk; + + struct { + /* gen9+ only needs 1-step wm programming */ + struct skl_pipe_wm optimal; + + /* cached plane data rate */ + unsigned plane_data_rate[I915_MAX_PLANES]; + unsigned plane_y_data_rate[I915_MAX_PLANES]; + + /* minimum block allocation */ + uint16_t minimum_blocks[I915_MAX_PLANES]; + uint16_t minimum_y_blocks[I915_MAX_PLANES]; + } skl; + }; + + /* + * Platforms with two-step watermark programming will need to + * update watermark programming post-vblank to switch from the + * safe intermediate watermarks to the optimal final + * watermarks. + */ + bool need_postvbl_update; +}; + struct intel_crtc_state { struct drm_crtc_state base; @@ -558,32 +615,7 @@ struct intel_crtc_state { /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ bool disable_lp_wm; - struct { - /* - * Optimal watermarks, programmed post-vblank when this state - * is committed. - */ - union { - struct intel_pipe_wm ilk; - struct skl_pipe_wm skl; - } optimal; - - /* - * Intermediate watermarks; these can be programmed immediately - * since they satisfy both the current configuration we're - * switching away from and the new configuration we're switching - * to. - */ - struct intel_pipe_wm intermediate; - - /* - * Platforms with two-step watermark programming will need to - * update watermark programming post-vblank to switch from the - * safe intermediate watermarks to the optimal final - * watermarks. - */ - bool need_postvbl_update; - } wm; + struct intel_crtc_wm_state wm; /* Gamma mode programmed on the pipe */ uint32_t gamma_mode; @@ -598,14 +630,6 @@ struct vlv_wm_state { bool cxsr; }; -struct intel_mmio_flip { - struct work_struct work; - struct drm_i915_private *i915; - struct drm_i915_gem_request *req; - struct intel_crtc *crtc; - unsigned int rotation; -}; - struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -620,7 +644,7 @@ struct intel_crtc { unsigned long enabled_power_domains; bool lowfreq_avail; struct intel_overlay *overlay; - struct intel_unpin_work *unpin_work; + struct intel_flip_work *flip_work; atomic_t unpin_work_count; @@ -815,6 +839,7 @@ struct intel_dp { uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ uint8_t num_sink_rates; int sink_rates[DP_MAX_SUPPORTED_RATES]; @@ -947,22 +972,21 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane) return dev_priv->plane_to_crtc_mapping[plane]; } -struct intel_unpin_work { - struct work_struct work; +struct intel_flip_work { + struct work_struct unpin_work; + struct work_struct mmio_work; + struct drm_crtc *crtc; struct drm_framebuffer *old_fb; struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; atomic_t pending; -#define INTEL_FLIP_INACTIVE 0 -#define INTEL_FLIP_PENDING 1 -#define INTEL_FLIP_COMPLETE 2 u32 flip_count; u32 gtt_offset; struct drm_i915_gem_request *flip_queued_req; u32 flip_queued_vblank; u32 flip_ready_vblank; - bool enable_stall_check; + unsigned int rotation; }; struct intel_load_detect_pipe { @@ -1031,9 +1055,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void gen6_reset_rps_interrupts(struct drm_device *dev); -void gen6_enable_rps_interrupts(struct drm_device *dev); -void gen6_disable_rps_interrupts(struct drm_device *dev); +void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv); +void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv); +void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv); u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); @@ -1112,14 +1136,16 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv); void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); /* intel_display.c */ +void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco); +void intel_update_rawclk(struct drm_i915_private *dev_priv); int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); extern const struct drm_plane_funcs intel_plane_funcs; void intel_init_display_hooks(struct drm_i915_private *dev_priv); unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); bool intel_has_pending_fb_unpin(struct drm_device *dev); -void intel_mark_busy(struct drm_device *dev); -void intel_mark_idle(struct drm_device *dev); +void intel_mark_busy(struct drm_i915_private *dev_priv); +void intel_mark_idle(struct drm_i915_private *dev_priv); void intel_crtc_restore_mode(struct drm_crtc *crtc); int intel_display_suspend(struct drm_device *dev); void intel_encoder_destroy(struct drm_encoder *encoder); @@ -1128,7 +1154,6 @@ struct intel_connector *intel_connector_alloc(void); bool intel_connector_get_hw_state(struct intel_connector *connector); void intel_connector_attach_encoder(struct intel_connector *connector, struct intel_encoder *encoder); -struct drm_encoder *intel_best_encoder(struct drm_connector *connector); struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); @@ -1151,6 +1176,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe) if (crtc->active) intel_wait_for_vblank(dev, pipe); } + +u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); + int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dport, @@ -1164,14 +1192,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx); int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); +void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj); -void intel_prepare_page_flip(struct drm_device *dev, int plane); -void intel_finish_page_flip(struct drm_device *dev, int pipe); -void intel_finish_page_flip_plane(struct drm_device *dev, int plane); -void intel_check_page_flip(struct drm_device *dev, int pipe); +void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe); +void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe); +void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe); int intel_prepare_plane_fb(struct drm_plane *plane, const struct drm_plane_state *new_state); void intel_cleanup_plane_fb(struct drm_plane *plane, @@ -1228,13 +1256,12 @@ u32 intel_compute_tile_offset(int *x, int *y, const struct drm_framebuffer *fb, int plane, unsigned int pitch, unsigned int rotation); -void intel_prepare_reset(struct drm_device *dev); -void intel_finish_reset(struct drm_device *dev); +void intel_prepare_reset(struct drm_i915_private *dev_priv); +void intel_finish_reset(struct drm_i915_private *dev_priv); void hsw_enable_pc8(struct drm_i915_private *dev_priv); void hsw_disable_pc8(struct drm_i915_private *dev_priv); void broxton_init_cdclk(struct drm_i915_private *dev_priv); void broxton_uninit_cdclk(struct drm_i915_private *dev_priv); -bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv); void broxton_ddi_phy_init(struct drm_i915_private *dev_priv); void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv); void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv); @@ -1243,8 +1270,8 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv); void bxt_disable_dc9(struct drm_i915_private *dev_priv); void gen9_enable_dc5(struct drm_i915_private *dev_priv); void skl_init_cdclk(struct drm_i915_private *dev_priv); -int skl_sanitize_cdclk(struct drm_i915_private *dev_priv); void skl_uninit_cdclk(struct drm_i915_private *dev_priv); +unsigned int skl_cdclk_get_vco(unsigned int freq); void skl_enable_dc6(struct drm_i915_private *dev_priv); void skl_disable_dc6(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, @@ -1252,8 +1279,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, - intel_clock_t *best_clock); -int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock); + struct dpll *best_clock); +int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); bool intel_crtc_active(struct drm_crtc *crtc); void hsw_enable_ips(struct intel_crtc *crtc); @@ -1284,7 +1311,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); void intel_csr_ucode_resume(struct drm_i915_private *); /* intel_dp.c */ -void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); +bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, @@ -1339,12 +1366,22 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); bool intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); +static inline unsigned int intel_dp_unused_lane_mask(int lane_count) +{ + return ~((1 << lane_count) - 1) & 0xf; +} + +/* intel_dp_aux_backlight.c */ +int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); + /* intel_dp_mst.c */ int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); /* intel_dsi.c */ void intel_dsi_init(struct drm_device *dev); +/* intel_dsi_dcs_backlight.c */ +int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); /* intel_dvo.c */ void intel_dvo_init(struct drm_device *dev); @@ -1424,13 +1461,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector); /* intel_overlay.c */ -void intel_setup_overlay(struct drm_device *dev); -void intel_cleanup_overlay(struct drm_device *dev); +void intel_setup_overlay(struct drm_i915_private *dev_priv); +void intel_cleanup_overlay(struct drm_i915_private *dev_priv); int intel_overlay_switch_off(struct intel_overlay *overlay); -int intel_overlay_put_image(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int intel_overlay_attrs(struct drm_device *dev, void *data, - struct drm_file *file_priv); +int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); void intel_overlay_reset(struct drm_i915_private *dev_priv); @@ -1601,21 +1638,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); void intel_pm_setup(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); -void intel_init_gt_powersave(struct drm_device *dev); -void intel_cleanup_gt_powersave(struct drm_device *dev); -void intel_enable_gt_powersave(struct drm_device *dev); -void intel_disable_gt_powersave(struct drm_device *dev); -void intel_suspend_gt_powersave(struct drm_device *dev); -void intel_reset_gt_powersave(struct drm_device *dev); -void gen6_update_ring_freq(struct drm_device *dev); +void intel_init_gt_powersave(struct drm_i915_private *dev_priv); +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); +void intel_reset_gt_powersave(struct drm_i915_private *dev_priv); +void gen6_update_ring_freq(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_boost(struct drm_i915_private *dev_priv, struct intel_rps_client *rps, unsigned long submitted); -void intel_queue_rps_boost_for_request(struct drm_device *dev, - struct drm_i915_gem_request *req); +void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req); void vlv_wm_get_hw_state(struct drm_device *dev); void ilk_wm_get_hw_state(struct drm_device *dev); void skl_wm_get_hw_state(struct drm_device *dev); @@ -1623,7 +1659,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */); uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); bool ilk_disable_lp_wm(struct drm_device *dev); -int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); +int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); +static inline int intel_enable_rc6(void) +{ + return i915.enable_rc6; +} /* intel_sdvo.c */ bool intel_sdvo_init(struct drm_device *dev, @@ -1635,7 +1675,7 @@ int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_pipe_update_start(struct intel_crtc *crtc); -void intel_pipe_update_end(struct intel_crtc *crtc); +void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work); /* intel_tv.c */ void intel_tv_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 366ad6c67ce4..a2ead5eac336 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -532,7 +532,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); enum port port; - u32 tmp; DRM_DEBUG_KMS("\n"); @@ -551,11 +550,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) msleep(intel_dsi->panel_on_delay); - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + u32 val; + /* Disable DPOunit clock gating, can stall pipe */ - tmp = I915_READ(DSPCLK_GATE_D); - tmp |= DPOUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, tmp); + val = I915_READ(DSPCLK_GATE_D); + val |= DPOUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, val); } /* put device in ready state */ @@ -693,7 +694,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder) intel_dsi_clear_device_ready(encoder); - if (!IS_BROXTON(dev_priv)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 val; val = I915_READ(DSPCLK_GATE_D); @@ -1378,7 +1379,6 @@ static const struct drm_encoder_funcs intel_dsi_funcs = { static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = { .get_modes = intel_dsi_get_modes, .mode_valid = intel_dsi_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_connector_funcs intel_dsi_connector_funcs = { @@ -1449,7 +1449,7 @@ void intel_dsi_init(struct drm_device *dev) connector = &intel_connector->base; drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, - NULL); + "DSI %c", port_name(port)); intel_encoder->compute_config = intel_dsi_compute_config; intel_encoder->pre_enable = intel_dsi_pre_enable; @@ -1473,10 +1473,42 @@ void intel_dsi_init(struct drm_device *dev) else intel_encoder->crtc_mask = BIT(PIPE_B); - if (dev_priv->vbt.dsi.config->dual_link) + if (dev_priv->vbt.dsi.config->dual_link) { intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); - else + + switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) { + case DL_DCS_PORT_A: + intel_dsi->dcs_backlight_ports = BIT(PORT_A); + break; + case DL_DCS_PORT_C: + intel_dsi->dcs_backlight_ports = BIT(PORT_C); + break; + default: + case DL_DCS_PORT_A_AND_C: + intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C); + break; + } + + switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) { + case DL_DCS_PORT_A: + intel_dsi->dcs_cabc_ports = BIT(PORT_A); + break; + case DL_DCS_PORT_C: + intel_dsi->dcs_cabc_ports = BIT(PORT_C); + break; + default: + case DL_DCS_PORT_A_AND_C: + intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C); + break; + } + } else { intel_dsi->ports = BIT(port); + intel_dsi->dcs_backlight_ports = BIT(port); + intel_dsi->dcs_cabc_ports = BIT(port); + } + + if (!dev_priv->vbt.dsi.config->cabc_supported) + intel_dsi->dcs_cabc_ports = 0; /* Create a DSI host (and a device) for each port. */ for_each_dsi_port(port, intel_dsi->ports) { @@ -1545,6 +1577,9 @@ void intel_dsi_init(struct drm_device *dev) goto err; } + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; + intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_dsi_add_properties(intel_connector); diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 61a6957fc6c2..5967ea6d6045 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -78,6 +78,10 @@ struct intel_dsi { u8 escape_clk_div; u8 dual_link; + + u16 dcs_backlight_ports; + u16 dcs_cabc_ports; + u8 pixel_overlap; u32 port_bits; u32 bw_timer; diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c new file mode 100644 index 000000000000..f0dc427743f8 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c @@ -0,0 +1,179 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Deepak M <m.deepak at intel.com> + */ + +#include "intel_drv.h" +#include "intel_dsi.h" +#include "i915_drv.h" +#include <video/mipi_display.h> +#include <drm/drm_mipi_dsi.h> + +#define CONTROL_DISPLAY_BCTRL (1 << 5) +#define CONTROL_DISPLAY_DD (1 << 3) +#define CONTROL_DISPLAY_BL (1 << 2) + +#define POWER_SAVE_OFF (0 << 0) +#define POWER_SAVE_LOW (1 << 0) +#define POWER_SAVE_MEDIUM (2 << 0) +#define POWER_SAVE_HIGH (3 << 0) +#define POWER_SAVE_OUTDOOR_MODE (4 << 0) + +#define PANEL_PWM_MAX_VALUE 0xFF + +static u32 dcs_get_backlight(struct intel_connector *connector) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi_device; + u8 data; + enum port port; + + /* FIXME: Need to take care of 16 bit brightness level */ + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS, + &data, sizeof(data)); + break; + } + + return data; +} + +static void dcs_set_backlight(struct intel_connector *connector, u32 level) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi_device; + u8 data = level; + enum port port; + + /* FIXME: Need to take care of 16 bit brightness level */ + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, + &data, sizeof(data)); + } +} + +static void dcs_disable_backlight(struct intel_connector *connector) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct mipi_dsi_device *dsi_device; + enum port port; + + dcs_set_backlight(connector, 0); + + for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { + u8 cabc = POWER_SAVE_OFF; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE, + &cabc, sizeof(cabc)); + } + + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + u8 ctrl = 0; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + + mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + + ctrl &= ~CONTROL_DISPLAY_BL; + ctrl &= ~CONTROL_DISPLAY_DD; + ctrl &= ~CONTROL_DISPLAY_BCTRL; + + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + } +} + +static void dcs_enable_backlight(struct intel_connector *connector) +{ + struct intel_encoder *encoder = connector->encoder; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + struct intel_panel *panel = &connector->panel; + struct mipi_dsi_device *dsi_device; + enum port port; + + for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { + u8 ctrl = 0; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + + mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + + ctrl |= CONTROL_DISPLAY_BL; + ctrl |= CONTROL_DISPLAY_DD; + ctrl |= CONTROL_DISPLAY_BCTRL; + + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY, + &ctrl, sizeof(ctrl)); + } + + for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) { + u8 cabc = POWER_SAVE_MEDIUM; + + dsi_device = intel_dsi->dsi_hosts[port]->device; + mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE, + &cabc, sizeof(cabc)); + } + + dcs_set_backlight(connector, panel->backlight.level); +} + +static int dcs_setup_backlight(struct intel_connector *connector, + enum pipe unused) +{ + struct intel_panel *panel = &connector->panel; + + panel->backlight.max = PANEL_PWM_MAX_VALUE; + panel->backlight.level = PANEL_PWM_MAX_VALUE; + + return 0; +} + +int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) +{ + struct drm_device *dev = intel_connector->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_encoder *encoder = intel_connector->encoder; + struct intel_panel *panel = &intel_connector->panel; + + if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS) + return -ENODEV; + + if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI)) + return -EINVAL; + + panel->backlight.setup = dcs_setup_backlight; + panel->backlight.enable = dcs_enable_backlight; + panel->backlight.disable = dcs_disable_backlight; + panel->backlight.set = dcs_set_backlight; + panel->backlight.get = dcs_get_backlight; + + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index e498f1c3221e..f122484bedfc 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = { { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, }; +#define CHV_GPIO_IDX_START_N 0 +#define CHV_GPIO_IDX_START_E 73 +#define CHV_GPIO_IDX_START_SW 100 +#define CHV_GPIO_IDX_START_SE 198 + +#define CHV_VBT_MAX_PINS_PER_FMLY 15 + +#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8) +#define CHV_GPIO_GPIOEN (1 << 15) +#define CHV_GPIO_GPIOCFG_GPIO (0 << 8) +#define CHV_GPIO_GPIOCFG_GPO (1 << 8) +#define CHV_GPIO_GPIOCFG_GPI (2 << 8) +#define CHV_GPIO_GPIOCFG_HIZ (3 << 8) +#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1) + +#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4) +#define CHV_GPIO_CFGLOCK (1 << 31) + static inline enum port intel_dsi_seq_port_to_port(u8 port) { return port ? PORT_C : PORT_A; @@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv, map = &vlv_gpio_table[gpio_index]; if (dev_priv->vbt.dsi.seq_version >= 3) { - DRM_DEBUG_KMS("GPIO element v3 not supported\n"); - return; + /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ + port = IOSF_PORT_GPIO_NC; } else { if (gpio_source == 0) { port = IOSF_PORT_GPIO_NC; } else if (gpio_source == 1) { - port = IOSF_PORT_GPIO_SC; + DRM_DEBUG_KMS("SC gpio not supported\n"); + return; } else { DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); return; @@ -231,6 +250,56 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv, mutex_unlock(&dev_priv->sb_lock); } +static void chv_exec_gpio(struct drm_i915_private *dev_priv, + u8 gpio_source, u8 gpio_index, bool value) +{ + u16 cfg0, cfg1; + u16 family_num; + u8 port; + + if (dev_priv->vbt.dsi.seq_version >= 3) { + if (gpio_index >= CHV_GPIO_IDX_START_SE) { + /* XXX: it's unclear whether 255->57 is part of SE. */ + gpio_index -= CHV_GPIO_IDX_START_SE; + port = CHV_IOSF_PORT_GPIO_SE; + } else if (gpio_index >= CHV_GPIO_IDX_START_SW) { + gpio_index -= CHV_GPIO_IDX_START_SW; + port = CHV_IOSF_PORT_GPIO_SW; + } else if (gpio_index >= CHV_GPIO_IDX_START_E) { + gpio_index -= CHV_GPIO_IDX_START_E; + port = CHV_IOSF_PORT_GPIO_E; + } else { + port = CHV_IOSF_PORT_GPIO_N; + } + } else { + /* XXX: The spec is unclear about CHV GPIO on seq v2 */ + if (gpio_source != 0) { + DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); + return; + } + + if (gpio_index >= CHV_GPIO_IDX_START_E) { + DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n", + gpio_index); + return; + } + + port = CHV_IOSF_PORT_GPIO_N; + } + + family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY; + gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY; + + cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index); + cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index); + + mutex_lock(&dev_priv->sb_lock); + vlv_iosf_sb_write(dev_priv, port, cfg1, 0); + vlv_iosf_sb_write(dev_priv, port, cfg0, + CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value)); + mutex_unlock(&dev_priv->sb_lock); +} + static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { struct drm_device *dev = intel_dsi->base.base.dev; @@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) if (IS_VALLEYVIEW(dev_priv)) vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value); + else if (IS_CHERRYVIEW(dev_priv)) + chv_exec_gpio(dev_priv, gpio_source, gpio_index, value); else DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 286baec979c8..c86f88ed92fd 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -351,7 +351,6 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { .mode_valid = intel_dvo_mode_valid, .get_modes = intel_dvo_get_modes, - .best_encoder = intel_best_encoder, }; static void intel_dvo_enc_destroy(struct drm_encoder *encoder) @@ -406,6 +405,18 @@ intel_dvo_get_current_mode(struct drm_connector *connector) return mode; } +static char intel_dvo_port_name(i915_reg_t dvo_reg) +{ + if (i915_mmio_reg_equal(dvo_reg, DVOA)) + return 'A'; + else if (i915_mmio_reg_equal(dvo_reg, DVOB)) + return 'B'; + else if (i915_mmio_reg_equal(dvo_reg, DVOC)) + return 'C'; + else + return '?'; +} + void intel_dvo_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -428,8 +439,6 @@ void intel_dvo_init(struct drm_device *dev) intel_dvo->attached_connector = intel_connector; intel_encoder = &intel_dvo->base; - drm_encoder_init(dev, &intel_encoder->base, - &intel_dvo_enc_funcs, encoder_type, NULL); intel_encoder->disable = intel_disable_dvo; intel_encoder->enable = intel_enable_dvo; @@ -496,6 +505,10 @@ void intel_dvo_init(struct drm_device *dev) if (!dvoinit) continue; + drm_encoder_init(dev, &intel_encoder->base, + &intel_dvo_enc_funcs, encoder_type, + "DVO %c", intel_dvo_port_name(dvo->dvo_reg)); + intel_encoder->type = INTEL_OUTPUT_DVO; intel_encoder->crtc_mask = (1 << 0) | (1 << 1); switch (dvo->type) { diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index d5a7cfec589b..45ee07b888a0 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -374,8 +374,9 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) * @dev_priv: i915 device instance * * This function is used to verify the current state of FBC. + * * FIXME: This should be tracked in the plane config eventually - * instead of queried at runtime for most callers. + * instead of queried at runtime for most callers. */ bool intel_fbc_is_active(struct drm_i915_private *dev_priv) { @@ -740,7 +741,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc) /* FIXME: We lack the proper locking here, so only run this on the * platforms that need. */ - if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) + if (IS_GEN(dev_priv, 5, 6)) cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); cache->fb.pixel_format = fb->pixel_format; cache->fb.stride = fb->pitches[0]; @@ -827,7 +828,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc) bool enable_by_default = IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv); - if (intel_vgpu_active(dev_priv->dev)) { + if (intel_vgpu_active(dev_priv)) { fbc->no_fbc_reason = "VGPU is active"; return false; } diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index ab8d09a81f14..4c725ad6fb54 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper, if (size * 2 < ggtt->stolen_usable_size) obj = i915_gem_object_create_stolen(dev, size); if (obj == NULL) - obj = i915_gem_alloc_object(dev, size); - if (!obj) { + obj = i915_gem_object_create(dev, size); + if (IS_ERR(obj)) { DRM_ERROR("failed to allocate framebuffer\n"); - ret = -ENOMEM; + ret = PTR_ERR(obj); goto out; } @@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper, struct i915_ggtt *ggtt = &dev_priv->ggtt; struct fb_info *info; struct drm_framebuffer *fb; + struct i915_vma *vma; struct drm_i915_gem_object *obj; - int size, ret; bool prealloc = false; + void *vaddr; + int ret; if (intel_fb && (sizes->fb_width > intel_fb->base.width || @@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper, } obj = intel_fb->obj; - size = obj->base.size; mutex_lock(&dev->struct_mutex); @@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper, info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; + vma = i915_gem_obj_to_ggtt(obj); + /* setup aperture base/size for vesafb takeover */ info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = ggtt->mappable_end; - info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); - info->fix.smem_len = size; + info->fix.smem_start = dev->mode_config.fb_base + vma->node.start; + info->fix.smem_len = vma->node.size; - info->screen_base = - ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj), - size); - if (!info->screen_base) { + vaddr = i915_vma_pin_iomap(vma); + if (IS_ERR(vaddr)) { DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); - ret = -ENOSPC; + ret = PTR_ERR(vaddr); goto out_destroy_fbi; } - info->screen_size = size; + info->screen_base = vaddr; + info->screen_size = vma->node.size; /* This driver doesn't need a VT switch to restore the mode on resume */ info->skip_vt_switch = true; @@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_destroy_fbi: drm_fb_helper_release_fbi(helper); out_unpin: - i915_gem_object_ggtt_unpin(obj); + intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); out_unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -488,10 +490,10 @@ retry: } crtcs[i] = new_crtc; - DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", + DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n", connector->name, - pipe_name(to_intel_crtc(connector->state->crtc)->pipe), connector->state->crtc->base.id, + connector->state->crtc->name, modes[i]->hdisplay, modes[i]->vdisplay, modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); @@ -551,6 +553,11 @@ static void intel_fbdev_destroy(struct drm_device *dev, if (ifbdev->fb) { drm_framebuffer_unregister_private(&ifbdev->fb->base); + + mutex_lock(&dev->struct_mutex); + intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); + mutex_unlock(&dev->struct_mutex); + drm_framebuffer_remove(&ifbdev->fb->base); } } @@ -717,8 +724,6 @@ int intel_fbdev_init(struct drm_device *dev) return ret; } - ifbdev->helper.atomic = true; - dev_priv->fbdev = ifbdev; INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 9d79c4c3e256..41601c71f529 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -48,14 +48,23 @@ struct drm_i915_gem_request; * queue (a circular array of work items), again described in the process * descriptor. Work queue pages are mapped momentarily as required. * - * Finally, we also keep a few statistics here, including the number of - * submissions to each engine, and a record of the last submission failure - * (if any). + * We also keep a few statistics on failures. Ideally, these should all + * be zero! + * no_wq_space: times that the submission pre-check found no space was + * available in the work queue (note, the queue is shared, + * not per-engine). It is OK for this to be nonzero, but + * it should not be huge! + * q_fail: failed to enqueue a work item. This should never happen, + * because we check for space beforehand. + * b_fail: failed to ring the doorbell. This should never happen, unless + * somehow the hardware misbehaves, or maybe if the GuC firmware + * crashes? We probably need to reset the GPU to recover. + * retcode: errno from last guc_submit() */ struct i915_guc_client { struct drm_i915_gem_object *client_obj; void *client_base; /* first page (only) of above */ - struct intel_context *owner; + struct i915_gem_context *owner; struct intel_guc *guc; uint32_t priority; uint32_t ctx_index; @@ -71,12 +80,13 @@ struct i915_guc_client { uint32_t wq_tail; uint32_t unused; /* Was 'wq_head' */ - /* GuC submission statistics & status */ - uint64_t submissions[GUC_MAX_ENGINES_NUM]; - uint32_t q_fail; + uint32_t no_wq_space; + uint32_t q_fail; /* No longer used */ uint32_t b_fail; int retcode; - int spare; /* pad to 32 DWords */ + + /* Per-engine counts of GuC submissions */ + uint64_t submissions[GUC_MAX_ENGINES_NUM]; }; enum intel_guc_fw_status { @@ -138,9 +148,9 @@ struct intel_guc { }; /* intel_guc_loader.c */ -extern void intel_guc_ucode_init(struct drm_device *dev); -extern int intel_guc_ucode_load(struct drm_device *dev); -extern void intel_guc_ucode_fini(struct drm_device *dev); +extern void intel_guc_init(struct drm_device *dev); +extern int intel_guc_setup(struct drm_device *dev); +extern void intel_guc_fini(struct drm_device *dev); extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); extern int intel_guc_suspend(struct drm_device *dev); extern int intel_guc_resume(struct drm_device *dev); @@ -148,10 +158,9 @@ extern int intel_guc_resume(struct drm_device *dev); /* i915_guc_submission.c */ int i915_guc_submission_init(struct drm_device *dev); int i915_guc_submission_enable(struct drm_device *dev); -int i915_guc_submit(struct i915_guc_client *client, - struct drm_i915_gem_request *rq); +int i915_guc_wq_check_space(struct drm_i915_gem_request *rq); +int i915_guc_submit(struct drm_i915_gem_request *rq); void i915_guc_submission_disable(struct drm_device *dev); void i915_guc_submission_fini(struct drm_device *dev); -int i915_guc_wq_check_space(struct i915_guc_client *client); #endif diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 2de57ffe5e18..944786d7075b 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -71,7 +71,8 @@ #define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT) #define WQ_RING_TAIL_SHIFT 20 -#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT) +#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ +#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) #define GUC_DOORBELL_ENABLED 1 #define GUC_DOORBELL_DISABLED 0 diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 876e5da44c4e..f2b88c7209cb 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -59,9 +59,12 @@ * */ -#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin" +#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin" MODULE_FIRMWARE(I915_SKL_GUC_UCODE); +#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin" +MODULE_FIRMWARE(I915_BXT_GUC_UCODE); + /* User-friendly representation of an enum */ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) { @@ -100,6 +103,7 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; int irqs; + u32 tmp; /* tell all command streamers to forward interrupts and vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); @@ -114,6 +118,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) I915_WRITE(GUC_BCS_RCS_IER, ~irqs); I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); I915_WRITE(GUC_WD_VECS_IER, ~irqs); + + /* + * If GuC has routed PM interrupts to itself, don't keep it. + * and keep other interrupts those are unmasked by GuC. + */ + tmp = I915_READ(GEN6_PMINTRMSK); + if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) { + dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP); + dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; + } } static u32 get_gttype(struct drm_i915_private *dev_priv) @@ -281,6 +295,17 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) return ret; } +static u32 guc_wopcm_size(struct drm_i915_private *dev_priv) +{ + u32 wopcm_size = GUC_WOPCM_TOP; + + /* On BXT, the top of WOPCM is reserved for RC6 context */ + if (IS_BROXTON(dev_priv)) + wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED; + + return wopcm_size; +} + /* * Load the GuC firmware blob into the MinuteIA. */ @@ -308,7 +333,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* init WOPCM */ - I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE); + I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv)); I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE); /* Enable MIA caching. GuC clock gating is disabled. */ @@ -372,65 +397,58 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv) } /** - * intel_guc_ucode_load() - load GuC uCode into the device + * intel_guc_setup() - finish preparing the GuC for activity * @dev: drm device * * Called from gem_init_hw() during driver loading and also after a GPU reset. * + * The main action required here it to load the GuC uCode into the device. * The firmware image should have already been fetched into memory by the - * earlier call to intel_guc_ucode_init(), so here we need only check that - * is succeeded, and then transfer the image to the h/w. + * earlier call to intel_guc_init(), so here we need only check that worked, + * and then transfer the image to the h/w. * * Return: non-zero code on error */ -int intel_guc_ucode_load(struct drm_device *dev) +int intel_guc_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; - int retries, err = 0; + const char *fw_path = guc_fw->guc_fw_path; + int retries, ret, err; - if (!i915.enable_guc_submission) - return 0; - - DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", + DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n", + fw_path, intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); - direct_interrupts_to_host(dev_priv); - - if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE) - return 0; - - if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS && - guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) - return -ENOEXEC; - - guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING; - - DRM_DEBUG_DRIVER("GuC fw fetch status %s\n", - intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status)); + /* Loading forbidden, or no firmware to load? */ + if (!i915.enable_guc_loading) { + err = 0; + goto fail; + } else if (fw_path == NULL || *fw_path == '\0') { + if (*fw_path == '\0') + DRM_INFO("No GuC firmware known for this platform\n"); + err = -ENODEV; + goto fail; + } - switch (guc_fw->guc_fw_fetch_status) { - case GUC_FIRMWARE_FAIL: - /* something went wrong :( */ + /* Fetch failed, or already fetched but failed to load? */ + if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) { err = -EIO; goto fail; - - case GUC_FIRMWARE_NONE: - case GUC_FIRMWARE_PENDING: - default: - /* "can't happen" */ - WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n", - guc_fw->guc_fw_path, - intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), - guc_fw->guc_fw_fetch_status); - err = -ENXIO; + } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) { + err = -ENOEXEC; goto fail; - - case GUC_FIRMWARE_SUCCESS: - break; } + direct_interrupts_to_host(dev_priv); + + guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING; + + DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n", + intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), + intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); + err = i915_guc_submission_init(dev); if (err) goto fail; @@ -448,7 +466,7 @@ int intel_guc_ucode_load(struct drm_device *dev) */ err = i915_reset_guc(dev_priv); if (err) { - DRM_ERROR("GuC reset failed, err %d\n", err); + DRM_ERROR("GuC reset failed: %d\n", err); goto fail; } @@ -459,8 +477,8 @@ int intel_guc_ucode_load(struct drm_device *dev) if (--retries == 0) goto fail; - DRM_INFO("GuC fw load failed, err %d; will reset and " - "retry %d more time(s)\n", err, retries); + DRM_INFO("GuC fw load failed: %d; will reset and " + "retry %d more time(s)\n", err, retries); } guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; @@ -482,7 +500,6 @@ int intel_guc_ucode_load(struct drm_device *dev) return 0; fail: - DRM_ERROR("GuC firmware load failed, err %d\n", err); if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; @@ -490,7 +507,41 @@ fail: i915_guc_submission_disable(dev); i915_guc_submission_fini(dev); - return err; + /* + * We've failed to load the firmware :( + * + * Decide whether to disable GuC submission and fall back to + * execlist mode, and whether to hide the error by returning + * zero or to return -EIO, which the caller will treat as a + * nonfatal error (i.e. it doesn't prevent driver load, but + * marks the GPU as wedged until reset). + */ + if (i915.enable_guc_loading > 1) { + ret = -EIO; + } else if (i915.enable_guc_submission > 1) { + ret = -EIO; + } else { + ret = 0; + } + + if (err == 0) + DRM_INFO("GuC firmware load skipped\n"); + else if (ret == -EIO) + DRM_ERROR("GuC firmware load failed: %d\n", err); + else + DRM_INFO("GuC firmware load failed: %d\n", err); + + if (i915.enable_guc_submission) { + if (fw_path == NULL) + DRM_INFO("GuC submission without firmware not supported\n"); + if (ret == 0) + DRM_INFO("Falling back to execlist mode\n"); + else + DRM_ERROR("GuC init failed: %d\n", ret); + } + i915.enable_guc_submission = 0; + + return ret; } static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) @@ -552,9 +603,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) /* Header and uCode will be loaded to WOPCM. Size of the two. */ size = guc_fw->header_size + guc_fw->ucode_size; - - /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */ - if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) { + if (size > guc_wopcm_size(dev->dev_private)) { DRM_ERROR("Firmware is too large to fit in WOPCM\n"); goto fail; } @@ -617,22 +666,25 @@ fail: } /** - * intel_guc_ucode_init() - define parameters and fetch firmware + * intel_guc_init() - define parameters and fetch firmware * @dev: drm device * * Called early during driver load, but after GEM is initialised. * * The firmware will be transferred to the GuC's memory later, - * when intel_guc_ucode_load() is called. + * when intel_guc_setup() is called. */ -void intel_guc_ucode_init(struct drm_device *dev) +void intel_guc_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; const char *fw_path; - if (!HAS_GUC_SCHED(dev)) - i915.enable_guc_submission = false; + /* A negative value means "use platform default" */ + if (i915.enable_guc_loading < 0) + i915.enable_guc_loading = HAS_GUC_UCODE(dev); + if (i915.enable_guc_submission < 0) + i915.enable_guc_submission = HAS_GUC_SCHED(dev); if (!HAS_GUC_UCODE(dev)) { fw_path = NULL; @@ -640,27 +692,26 @@ void intel_guc_ucode_init(struct drm_device *dev) fw_path = I915_SKL_GUC_UCODE; guc_fw->guc_fw_major_wanted = 6; guc_fw->guc_fw_minor_wanted = 1; + } else if (IS_BROXTON(dev)) { + fw_path = I915_BXT_GUC_UCODE; + guc_fw->guc_fw_major_wanted = 8; + guc_fw->guc_fw_minor_wanted = 7; } else { - i915.enable_guc_submission = false; fw_path = ""; /* unknown device */ } - if (!i915.enable_guc_submission) - return; - guc_fw->guc_dev = dev; guc_fw->guc_fw_path = fw_path; guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; + /* Early (and silent) return if GuC loading is disabled */ + if (!i915.enable_guc_loading) + return; if (fw_path == NULL) return; - - if (*fw_path == '\0') { - DRM_ERROR("No GuC firmware known for this platform\n"); - guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL; + if (*fw_path == '\0') return; - } guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); @@ -669,10 +720,10 @@ void intel_guc_ucode_init(struct drm_device *dev) } /** - * intel_guc_ucode_fini() - clean up all allocated resources + * intel_guc_fini() - clean up all allocated resources * @dev: drm device */ -void intel_guc_ucode_fini(struct drm_device *dev) +void intel_guc_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2c3bd9c2573e..6b29da9bba38 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1678,35 +1678,12 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - u32 val; - /* Enable clock channels for this port */ - mutex_lock(&dev_priv->sb_lock); - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); - val = 0; - if (pipe) - val |= (1<<21); - else - val &= ~(1<<21); - val |= 0x001000c4; - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); + vlv_phy_pre_encoder_enable(encoder); /* HDMI 1.0V-2dB */ - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040); - vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); - - /* Program lane clock */ - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); - mutex_unlock(&dev_priv->sb_lock); + vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, + 0x2b247878); intel_hdmi->set_infoframes(&encoder->base, intel_crtc->config->has_hdmi_sink, @@ -1719,207 +1696,27 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - intel_hdmi_prepare(encoder); - /* Program Tx lane resets to default */ - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), - DPIO_PCS_TX_LANE2_RESET | - DPIO_PCS_TX_LANE1_RESET); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), - DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | - DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | - (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | - DPIO_PCS_CLK_SOFT_RESET); - - /* Fix up inter-pair skew failure */ - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); - - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000); - vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN); - mutex_unlock(&dev_priv->sb_lock); -} - -static void chv_data_lane_soft_reset(struct intel_encoder *encoder, - bool reset) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base)); - struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - enum pipe pipe = crtc->pipe; - uint32_t val; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); - if (reset) - val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - else - val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); - } - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); - - if (crtc->config->lane_count > 2) { - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); - val |= CHV_PCS_REQ_SOFTRESET_EN; - if (reset) - val &= ~DPIO_PCS_CLK_SOFT_RESET; - else - val |= DPIO_PCS_CLK_SOFT_RESET; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); - } + vlv_phy_pre_pll_enable(encoder); } static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_device *dev = encoder->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); - enum pipe pipe = intel_crtc->pipe; - u32 val; - intel_hdmi_prepare(encoder); - /* - * Must trick the second common lane into life. - * Otherwise we can't even access the PLL. - */ - if (ch == DPIO_CH0 && pipe == PIPE_B) - dport->release_cl2_override = - !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); - - chv_phy_powergate_lanes(encoder, true, 0x0); - - mutex_lock(&dev_priv->sb_lock); - - /* Assert data lane reset */ - chv_data_lane_soft_reset(encoder, true); - - /* program left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA1_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA1_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - if (ch == DPIO_CH0) - val |= CHV_BUFLEFTENA2_FORCE; - if (ch == DPIO_CH1) - val |= CHV_BUFRIGHTENA2_FORCE; - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - /* program clock channel usage */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch)); - val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE; - if (pipe != PIPE_B) - val &= ~CHV_PCS_USEDCLKCHANNEL; - else - val |= CHV_PCS_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val); - - /* - * This a a bit weird since generally CL - * matches the pipe, but here we need to - * pick the CL based on the port. - */ - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch)); - if (pipe != PIPE_B) - val &= ~CHV_CMN_USEDCLKCHANNEL; - else - val |= CHV_CMN_USEDCLKCHANNEL; - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val); - - mutex_unlock(&dev_priv->sb_lock); + chv_phy_pre_pll_enable(encoder); } static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; - u32 val; - - mutex_lock(&dev_priv->sb_lock); - - /* disable left/right clock distribution */ - if (pipe != PIPE_B) { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); - val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); - } else { - val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); - val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); - vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); - } - - mutex_unlock(&dev_priv->sb_lock); - - /* - * Leave the power down bit cleared for at least one - * lane so that chv_powergate_phy_ch() will power - * on something when the channel is otherwise unused. - * When the port is off and the override is removed - * the lanes power down anyway, so otherwise it doesn't - * really matter what the state of power down bits is - * after this. - */ - chv_phy_powergate_lanes(encoder, false, 0x0); + chv_phy_post_pll_disable(encoder); } static void vlv_hdmi_post_disable(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - /* Reset lanes to avoid HDMI flicker (VLV w/a) */ - mutex_lock(&dev_priv->sb_lock); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000); - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060); - mutex_unlock(&dev_priv->sb_lock); + vlv_phy_reset_lanes(encoder); } static void chv_hdmi_post_disable(struct intel_encoder *encoder) @@ -1944,138 +1741,12 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; - enum dpio_channel ch = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - int data, i, stagger; - u32 val; - mutex_lock(&dev_priv->sb_lock); - - /* allow hardware to manage TX FIFO reset source */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val &= ~DPIO_LANEDESKEW_STRAP_OVRD; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - - /* Program Tx latency optimal setting */ - for (i = 0; i < 4; i++) { - /* Set the upar bit */ - data = (i == 1) ? 0x0 : 0x1; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), - data << DPIO_UPAR_SHIFT); - } - - /* Data lane stagger programming */ - if (intel_crtc->config->port_clock > 270000) - stagger = 0x18; - else if (intel_crtc->config->port_clock > 135000) - stagger = 0xd; - else if (intel_crtc->config->port_clock > 67500) - stagger = 0x7; - else if (intel_crtc->config->port_clock > 33750) - stagger = 0x4; - else - stagger = 0x2; - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch)); - val |= DPIO_TX2_STAGGER_MASK(0x1f); - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val); - - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(6) | - DPIO_TX2_STAGGER_MULT(0)); - - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch), - DPIO_LANESTAGGER_STRAP(stagger) | - DPIO_LANESTAGGER_STRAP_OVRD | - DPIO_TX1_STAGGER_MASK(0x1f) | - DPIO_TX1_STAGGER_MULT(7) | - DPIO_TX2_STAGGER_MULT(5)); - - /* Deassert data lane reset */ - chv_data_lane_soft_reset(encoder, false); - - /* Clear calc init */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); - val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK); - val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch)); - val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK); - val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val); + chv_phy_pre_encoder_enable(encoder); /* FIXME: Program the support xxx V-dB */ /* Use 800mV-0dB */ - for (i = 0; i < 4; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); - val &= ~DPIO_SWING_DEEMPH9P5_MASK; - val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); - } - - for (i = 0; i < 4; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); - - val &= ~DPIO_SWING_MARGIN000_MASK; - val |= 102 << DPIO_SWING_MARGIN000_SHIFT; - - /* - * Supposedly this value shouldn't matter when unique transition - * scale is disabled, but in fact it does matter. Let's just - * always program the same value and hope it's OK. - */ - val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); - val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT; - - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); - } - - /* - * The document said it needs to set bit 27 for ch0 and bit 26 - * for ch1. Might be a typo in the doc. - * For now, for this unique transition scale selection, set bit - * 27 for ch0 and ch1. - */ - for (i = 0; i < 4; i++) { - val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); - val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); - } - - /* Start swing calculation */ - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); - - val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); - val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; - vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); - - mutex_unlock(&dev_priv->sb_lock); + chv_set_phy_signal_level(encoder, 128, 102, false); intel_hdmi->set_infoframes(&encoder->base, intel_crtc->config->has_hdmi_sink, @@ -2086,10 +1757,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) vlv_wait_port_ready(dev_priv, dport, 0x0); /* Second common lane will stay alive on its own now */ - if (dport->release_cl2_override) { - chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); - dport->release_cl2_override = false; - } + chv_phy_release_cl2_override(encoder); } static void intel_hdmi_destroy(struct drm_connector *connector) @@ -2114,7 +1782,6 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { .get_modes = intel_hdmi_get_modes, .mode_valid = intel_hdmi_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { @@ -2277,7 +1944,7 @@ void intel_hdmi_init(struct drm_device *dev, intel_encoder = &intel_dig_port->base; drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, - DRM_MODE_ENCODER_TMDS, NULL); + DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port)); intel_encoder->compute_config = intel_hdmi_compute_config; if (HAS_PCH_SPLIT(dev)) { diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index bee673005d48..38eeca7a6e72 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) } } if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); intel_runtime_pm_put(dev_priv); @@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work) /** * intel_hpd_irq_handler - main hotplug irq handler - * @dev: drm device + * @dev_priv: drm_i915_private * @pin_mask: a mask of hpd pins that have triggered the irq * @long_mask: a mask of hpd pins that may be long hpd pulses * @@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work) * Here, we do hotplug irq storm detection and mitigation, and pass further * processing to appropriate bottom halves. */ -void intel_hpd_irq_handler(struct drm_device *dev, +void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; int i; enum port port; bool storm_detected = false; @@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev, * hotplug bits itself. So only WARN about unexpected * interrupts on saner platforms. */ - WARN_ONCE(!HAS_GMCH_DISPLAY(dev), + WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv), "Received HPD interrupt on pin %d although disabled\n", i); continue; } @@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev, } if (storm_detected) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock(&dev_priv->irq_lock); /* @@ -485,7 +484,7 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) */ spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); + dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 42eac37de047..5c191a1afaaf 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -224,10 +224,16 @@ enum { FAULT_AND_CONTINUE /* Unsupported */ }; #define GEN8_CTX_ID_SHIFT 32 +#define GEN8_CTX_ID_WIDTH 21 #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 #define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 -static int intel_lr_context_pin(struct intel_context *ctx, +/* Typical size of the average request (2 pipecontrols and a MI_BB) */ +#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ + +static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); +static int intel_lr_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine); /** @@ -240,23 +246,22 @@ static int intel_lr_context_pin(struct intel_context *ctx, * * Return: 1 if Execlists is supported and has to be enabled. */ -int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) +int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists) { - WARN_ON(i915.enable_ppgtt == -1); - /* On platforms with execlist available, vGPU will only * support execlist mode, no ring buffer mode. */ - if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) + if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv)) return 1; - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) return 1; if (enable_execlists == 0) return 0; - if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && + if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && + USES_PPGTT(dev_priv) && i915.use_mmio_flip >= 0) return 1; @@ -266,19 +271,19 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists static void logical_ring_init_platform_invariants(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; + struct drm_i915_private *dev_priv = engine->i915; - if (IS_GEN8(dev) || IS_GEN9(dev)) + if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) engine->idle_lite_restore_wa = ~0; - engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && + engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) && (engine->id == VCS || engine->id == VCS2); engine->ctx_desc_template = GEN8_CTX_VALID; - engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << + engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev_priv) << GEN8_CTX_ADDRESSING_MODE_SHIFT; - if (IS_GEN8(dev)) + if (IS_GEN8(dev_priv)) engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; @@ -297,7 +302,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) * descriptor for a pinned context * * @ctx: Context to work on - * @ring: Engine the descriptor will be used with + * @engine: Engine the descriptor will be used with * * The context descriptor encodes various attributes of a context, * including its GTT address and some flags. Because it's fairly @@ -305,62 +310,41 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine) * which remains valid until the context is unpinned. * * This is what a descriptor looks like, from LSB to MSB: - * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) + * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) * bits 12-31: LRCA, GTT address of (the HWSP of) this context - * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) - * bits 52-63: reserved, may encode the engine ID (for GuC) + * bits 32-52: ctx ID, a globally unique tag + * bits 53-54: mbz, reserved for use by hardware + * bits 55-63: group ID, currently unused and set to 0 */ static void -intel_lr_context_descriptor_update(struct intel_context *ctx, +intel_lr_context_descriptor_update(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { - uint64_t lrca, desc; + struct intel_context *ce = &ctx->engine[engine->id]; + u64 desc; - lrca = ctx->engine[engine->id].lrc_vma->node.start + - LRC_PPHWSP_PN * PAGE_SIZE; + BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); - desc = engine->ctx_desc_template; /* bits 0-11 */ - desc |= lrca; /* bits 12-31 */ - desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ + desc = engine->ctx_desc_template; /* bits 0-11 */ + desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; + /* bits 12-31 */ + desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ - ctx->engine[engine->id].lrc_desc = desc; + ce->lrc_desc = desc; } -uint64_t intel_lr_context_descriptor(struct intel_context *ctx, +uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { return ctx->engine[engine->id].lrc_desc; } -/** - * intel_execlists_ctx_id() - get the Execlists Context ID - * @ctx: Context to get the ID for - * @ring: Engine to get the ID for - * - * Do not confuse with ctx->id! Unfortunately we have a name overload - * here: the old context ID we pass to userspace as a handler so that - * they can refer to a context, and the new context ID we pass to the - * ELSP so that the GPU can inform us of the context status via - * interrupts. - * - * The context ID is a portion of the context descriptor, so we can - * just extract the required part from the cached descriptor. - * - * Return: 20-bits globally unique context ID. - */ -u32 intel_execlists_ctx_id(struct intel_context *ctx, - struct intel_engine_cs *engine) -{ - return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT; -} - static void execlists_elsp_write(struct drm_i915_gem_request *rq0, struct drm_i915_gem_request *rq1) { struct intel_engine_cs *engine = rq0->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = rq0->i915; uint64_t desc[2]; if (rq1) { @@ -442,7 +426,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) * If irqs are not active generate a warning as batches that finish * without the irqs may get lost and a GPU Hang may occur. */ - WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); + WARN_ON(!intel_irqs_enabled(engine->i915)); /* Try to read in pairs */ list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, @@ -453,8 +437,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) /* Same ctx: ignore first request, as second request * will update tail past first request's workload */ cursor->elsp_submitted = req0->elsp_submitted; - list_move_tail(&req0->execlist_link, - &engine->execlist_retired_req_list); + list_del(&req0->execlist_link); + i915_gem_request_unreference(req0); req0 = cursor; } else { req1 = cursor; @@ -486,7 +470,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine) } static unsigned int -execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) +execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id) { struct drm_i915_gem_request *head_req; @@ -496,19 +480,16 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) struct drm_i915_gem_request, execlist_link); - if (!head_req) - return 0; - - if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id)) - return 0; + if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id))) + return 0; WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); if (--head_req->elsp_submitted > 0) return 0; - list_move_tail(&head_req->execlist_link, - &engine->execlist_retired_req_list); + list_del(&head_req->execlist_link); + i915_gem_request_unreference(head_req); return 1; } @@ -517,7 +498,7 @@ static u32 get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, u32 *context_id) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 status; read_pointer %= GEN8_CSB_ENTRIES; @@ -543,7 +524,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, static void intel_lrc_irq_handler(unsigned long data) { struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 status_pointer; unsigned int read_pointer, write_pointer; u32 csb[GEN8_CSB_ENTRIES][2]; @@ -612,11 +593,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) struct drm_i915_gem_request *cursor; int num_elements = 0; - if (request->ctx != request->i915->kernel_context) - intel_lr_context_pin(request->ctx, engine); - - i915_gem_request_reference(request); - spin_lock_bh(&engine->execlist_lock); list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) @@ -633,12 +609,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) if (request->ctx == tail_req->ctx) { WARN(tail_req->elsp_submitted != 0, "More than 2 already-submitted reqs queued\n"); - list_move_tail(&tail_req->execlist_link, - &engine->execlist_retired_req_list); + list_del(&tail_req->execlist_link); + i915_gem_request_unreference(tail_req); } } + i915_gem_request_reference(request); list_add_tail(&request->execlist_link, &engine->execlist_queue); + request->ctx_hw_id = request->ctx->hw_id; if (num_elements == 0) execlists_context_unqueue(engine); @@ -698,9 +676,23 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - int ret = 0; + struct intel_engine_cs *engine = request->engine; + struct intel_context *ce = &request->ctx->engine[engine->id]; + int ret; + + /* Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. + */ + request->reserved_space += EXECLISTS_REQUEST_SIZE; - request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; + if (!ce->state) { + ret = execlists_context_deferred_alloc(request->ctx, engine); + if (ret) + return ret; + } + + request->ringbuf = ce->ringbuf; if (i915.enable_guc_submission) { /* @@ -708,16 +700,39 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request * going any further, as the i915_add_request() call * later on mustn't fail ... */ - struct intel_guc *guc = &request->i915->guc; - - ret = i915_guc_wq_check_space(guc->execbuf_client); + ret = i915_guc_wq_check_space(request); if (ret) return ret; } - if (request->ctx != request->i915->kernel_context) - ret = intel_lr_context_pin(request->ctx, request->engine); + ret = intel_lr_context_pin(request->ctx, engine); + if (ret) + return ret; + + ret = intel_ring_begin(request, 0); + if (ret) + goto err_unpin; + + if (!ce->initialised) { + ret = engine->init_context(request); + if (ret) + goto err_unpin; + + ce->initialised = true; + } + + /* Note that after this point, we have committed to using + * this request as it is being used to both track the + * state of engine initialisation and liveness of the + * golden renderstate above. Think twice before you try + * to cancel/unwind this request now. + */ + + request->reserved_space -= EXECLISTS_REQUEST_SIZE; + return 0; +err_unpin: + intel_lr_context_unpin(request->ctx, engine); return ret; } @@ -734,7 +749,6 @@ static int intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; - struct drm_i915_private *dev_priv = request->i915; struct intel_engine_cs *engine = request->engine; intel_logical_ring_advance(ringbuf); @@ -753,40 +767,23 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) if (intel_engine_stopped(engine)) return 0; - if (engine->last_context != request->ctx) { - if (engine->last_context) - intel_lr_context_unpin(engine->last_context, engine); - if (request->ctx != request->i915->kernel_context) { - intel_lr_context_pin(request->ctx, engine); - engine->last_context = request->ctx; - } else { - engine->last_context = NULL; - } - } + /* We keep the previous context alive until we retire the following + * request. This ensures that any the context object is still pinned + * for any residual writes the HW makes into it on the context switch + * into the next object following the breadcrumb. Otherwise, we may + * retire the context too early. + */ + request->previous_context = engine->last_context; + engine->last_context = request->ctx; - if (dev_priv->guc.execbuf_client) - i915_guc_submit(dev_priv->guc.execbuf_client, request); + if (i915.enable_guc_submission) + i915_guc_submit(request); else execlists_context_queue(request); return 0; } -int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) -{ - /* - * The first call merely notes the reserve request and is common for - * all back ends. The subsequent localised _begin() call actually - * ensures that the reservation is available. Without the begin, if - * the request creator immediately submitted the request without - * adding any commands to it then there might not actually be - * sufficient room for the submission commands. - */ - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); - - return intel_ring_begin(request, 0); -} - /** * execlists_submission() - submit a batchbuffer for execution, Execlists style * @dev: DRM device. @@ -881,28 +878,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, return 0; } -void intel_execlists_retire_requests(struct intel_engine_cs *engine) +void intel_execlists_cancel_requests(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req, *tmp; - struct list_head retired_list; + LIST_HEAD(cancel_list); - WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); - if (list_empty(&engine->execlist_retired_req_list)) - return; + WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex)); - INIT_LIST_HEAD(&retired_list); spin_lock_bh(&engine->execlist_lock); - list_replace_init(&engine->execlist_retired_req_list, &retired_list); + list_replace_init(&engine->execlist_queue, &cancel_list); spin_unlock_bh(&engine->execlist_lock); - list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { - struct intel_context *ctx = req->ctx; - struct drm_i915_gem_object *ctx_obj = - ctx->engine[engine->id].state; - - if (ctx_obj && (ctx != req->i915->kernel_context)) - intel_lr_context_unpin(ctx, engine); - + list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) { list_del(&req->execlist_link); i915_gem_request_unreference(req); } @@ -910,7 +897,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine) void intel_logical_ring_stop(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; if (!intel_engine_initialized(engine)) @@ -946,25 +933,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) return 0; } -static int intel_lr_context_do_pin(struct intel_context *ctx, - struct intel_engine_cs *engine) +static int intel_lr_context_pin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; - struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf; + struct drm_i915_private *dev_priv = ctx->i915; + struct intel_context *ce = &ctx->engine[engine->id]; void *vaddr; u32 *lrc_reg_state; int ret; - WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); + lockdep_assert_held(&ctx->i915->dev->struct_mutex); - ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, - PIN_OFFSET_BIAS | GUC_WOPCM_TOP); + if (ce->pin_count++) + return 0; + + ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN, + PIN_OFFSET_BIAS | GUC_WOPCM_TOP); if (ret) - return ret; + goto err; - vaddr = i915_gem_object_pin_map(ctx_obj); + vaddr = i915_gem_object_pin_map(ce->state); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); goto unpin_ctx_obj; @@ -972,65 +960,54 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; - ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); + ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf); if (ret) goto unpin_map; - ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); + i915_gem_context_reference(ctx); + ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state); intel_lr_context_descriptor_update(ctx, engine); - lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; - ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; - ctx_obj->dirty = true; + + lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start; + ce->lrc_reg_state = lrc_reg_state; + ce->state->dirty = true; /* Invalidate GuC TLB. */ if (i915.enable_guc_submission) I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); - return ret; + return 0; unpin_map: - i915_gem_object_unpin_map(ctx_obj); + i915_gem_object_unpin_map(ce->state); unpin_ctx_obj: - i915_gem_object_ggtt_unpin(ctx_obj); - + i915_gem_object_ggtt_unpin(ce->state); +err: + ce->pin_count = 0; return ret; } -static int intel_lr_context_pin(struct intel_context *ctx, - struct intel_engine_cs *engine) +void intel_lr_context_unpin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - int ret = 0; + struct intel_context *ce = &ctx->engine[engine->id]; - if (ctx->engine[engine->id].pin_count++ == 0) { - ret = intel_lr_context_do_pin(ctx, engine); - if (ret) - goto reset_pin_count; + lockdep_assert_held(&ctx->i915->dev->struct_mutex); + GEM_BUG_ON(ce->pin_count == 0); - i915_gem_context_reference(ctx); - } - return ret; + if (--ce->pin_count) + return; -reset_pin_count: - ctx->engine[engine->id].pin_count = 0; - return ret; -} + intel_unpin_ringbuffer_obj(ce->ringbuf); -void intel_lr_context_unpin(struct intel_context *ctx, - struct intel_engine_cs *engine) -{ - struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; + i915_gem_object_unpin_map(ce->state); + i915_gem_object_ggtt_unpin(ce->state); - WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); - if (--ctx->engine[engine->id].pin_count == 0) { - i915_gem_object_unpin_map(ctx_obj); - intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); - ctx->engine[engine->id].lrc_vma = NULL; - ctx->engine[engine->id].lrc_desc = 0; - ctx->engine[engine->id].lrc_reg_state = NULL; + ce->lrc_vma = NULL; + ce->lrc_desc = 0; + ce->lrc_reg_state = NULL; - i915_gem_context_unreference(ctx); - } + i915_gem_context_unreference(ctx); } static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) @@ -1038,9 +1015,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) int ret, i; struct intel_engine_cs *engine = req->engine; struct intel_ringbuffer *ringbuf = req->ringbuf; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_workarounds *w = &dev_priv->workarounds; + struct i915_workarounds *w = &req->i915->workarounds; if (w->count == 0) return 0; @@ -1111,7 +1086,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, * this batch updates GEN8_L3SQCREG4 with default value we need to * set this bit here to retain the WA during flush. */ - if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0)) l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | @@ -1200,7 +1175,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ - if (IS_BROADWELL(engine->dev)) { + if (IS_BROADWELL(engine->i915)) { int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); if (rc < 0) return rc; @@ -1272,12 +1247,11 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, uint32_t *offset) { int ret; - struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaDisableCtxRestoreArbitration:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ @@ -1298,12 +1272,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, uint32_t *const batch, uint32_t *offset) { - struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); wa_ctx_emit(batch, index, @@ -1312,7 +1285,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, } /* WaClearTdlStateAckDirtyBits:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) { wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); @@ -1331,8 +1304,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine, } /* WaDisableCtxRestoreArbitration:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) || + IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); @@ -1344,11 +1317,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) { int ret; - engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev, + engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev, PAGE_ALIGN(size)); - if (!engine->wa_ctx.obj) { + if (IS_ERR(engine->wa_ctx.obj)) { DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); - return -ENOMEM; + ret = PTR_ERR(engine->wa_ctx.obj); + engine->wa_ctx.obj = NULL; + return ret; } ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); @@ -1382,9 +1357,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) WARN_ON(engine->id != RCS); /* update this when WA for higher Gen are added */ - if (INTEL_INFO(engine->dev)->gen > 9) { + if (INTEL_GEN(engine->i915) > 9) { DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", - INTEL_INFO(engine->dev)->gen); + INTEL_GEN(engine->i915)); return 0; } @@ -1404,7 +1379,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) batch = kmap_atomic(page); offset = 0; - if (INTEL_INFO(engine->dev)->gen == 8) { + if (IS_GEN8(engine->i915)) { ret = gen8_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, @@ -1418,7 +1393,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) &offset); if (ret) goto out; - } else if (INTEL_INFO(engine->dev)->gen == 9) { + } else if (IS_GEN9(engine->i915)) { ret = gen9_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, @@ -1444,7 +1419,7 @@ out: static void lrc_init_hws(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE(RING_HWS_PGA(engine->mmio_base), (u32)engine->status_page.gfx_addr); @@ -1453,8 +1428,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine) static int gen8_init_common_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned int next_context_status_buffer_hw; lrc_init_hws(engine); @@ -1501,8 +1475,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) static int gen8_init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret; ret = gen8_init_common_ring(engine); @@ -1579,7 +1552,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, if (req->ctx->ppgtt && (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { if (!USES_FULL_48BIT_PPGTT(req->i915) && - !intel_vgpu_active(req->i915->dev)) { + !intel_vgpu_active(req->i915)) { ret = intel_logical_ring_emit_pdps(req); if (ret) return ret; @@ -1607,8 +1580,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1627,8 +1599,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1645,8 +1616,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, { struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_engine_cs *engine = ringbuf->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = request->i915; uint32_t cmd; int ret; @@ -1714,7 +1684,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (IS_GEN9(engine->dev)) + if (IS_GEN9(request->i915)) vf_flush_wa = true; } @@ -1782,11 +1752,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno) */ #define WA_TAIL_DWORDS 2 -static inline u32 hws_seqno_address(struct intel_engine_cs *engine) -{ - return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; -} - static int gen8_emit_request(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; @@ -1802,7 +1767,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) intel_logical_ring_emit(ringbuf, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); intel_logical_ring_emit(ringbuf, - hws_seqno_address(request->engine) | + intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); @@ -1832,7 +1797,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) (PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE)); - intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); + intel_logical_ring_emit(ringbuf, + intel_hws_seqno_address(request->engine)); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); /* We're thrashing one dword of HWS. */ @@ -1911,7 +1877,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) tasklet_kill(&engine->irq_tasklet); - dev_priv = engine->dev->dev_private; + dev_priv = engine->i915; if (engine->buffer) { intel_logical_ring_stop(engine); @@ -1928,18 +1894,18 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) i915_gem_object_unpin_map(engine->status_page.obj); engine->status_page.obj = NULL; } + intel_lr_context_unpin(dev_priv->kernel_context, engine); engine->idle_lite_restore_wa = 0; engine->disable_lite_restore_wa = false; engine->ctx_desc_template = 0; lrc_destroy_wa_ctx_obj(engine); - engine->dev = NULL; + engine->i915 = NULL; } static void -logical_ring_default_vfuncs(struct drm_device *dev, - struct intel_engine_cs *engine) +logical_ring_default_vfuncs(struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ engine->init_hw = gen8_init_common_ring; @@ -1950,7 +1916,7 @@ logical_ring_default_vfuncs(struct drm_device *dev, engine->emit_bb_start = gen8_emit_bb_start; engine->get_seqno = gen8_get_seqno; engine->set_seqno = gen8_set_seqno; - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) { engine->irq_seqno_barrier = bxt_a_seqno_barrier; engine->set_seqno = bxt_a_set_seqno; } @@ -1961,6 +1927,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; + init_waitqueue_head(&engine->irq_queue); } static int @@ -1981,32 +1948,68 @@ lrc_setup_hws(struct intel_engine_cs *engine, return 0; } -static int -logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) +static const struct logical_ring_info { + const char *name; + unsigned exec_id; + unsigned guc_id; + u32 mmio_base; + unsigned irq_shift; +} logical_rings[] = { + [RCS] = { + .name = "render ring", + .exec_id = I915_EXEC_RENDER, + .guc_id = GUC_RENDER_ENGINE, + .mmio_base = RENDER_RING_BASE, + .irq_shift = GEN8_RCS_IRQ_SHIFT, + }, + [BCS] = { + .name = "blitter ring", + .exec_id = I915_EXEC_BLT, + .guc_id = GUC_BLITTER_ENGINE, + .mmio_base = BLT_RING_BASE, + .irq_shift = GEN8_BCS_IRQ_SHIFT, + }, + [VCS] = { + .name = "bsd ring", + .exec_id = I915_EXEC_BSD, + .guc_id = GUC_VIDEO_ENGINE, + .mmio_base = GEN6_BSD_RING_BASE, + .irq_shift = GEN8_VCS1_IRQ_SHIFT, + }, + [VCS2] = { + .name = "bsd2 ring", + .exec_id = I915_EXEC_BSD, + .guc_id = GUC_VIDEO_ENGINE2, + .mmio_base = GEN8_BSD2_RING_BASE, + .irq_shift = GEN8_VCS2_IRQ_SHIFT, + }, + [VECS] = { + .name = "video enhancement ring", + .exec_id = I915_EXEC_VEBOX, + .guc_id = GUC_VIDEOENHANCE_ENGINE, + .mmio_base = VEBOX_RING_BASE, + .irq_shift = GEN8_VECS_IRQ_SHIFT, + }, +}; + +static struct intel_engine_cs * +logical_ring_setup(struct drm_device *dev, enum intel_engine_id id) { + const struct logical_ring_info *info = &logical_rings[id]; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_context *dctx = dev_priv->kernel_context; + struct intel_engine_cs *engine = &dev_priv->engine[id]; enum forcewake_domains fw_domains; - int ret; - - /* Intentionally left blank. */ - engine->buffer = NULL; - - engine->dev = dev; - INIT_LIST_HEAD(&engine->active_list); - INIT_LIST_HEAD(&engine->request_list); - i915_gem_batch_pool_init(dev, &engine->batch_pool); - init_waitqueue_head(&engine->irq_queue); - INIT_LIST_HEAD(&engine->buffers); - INIT_LIST_HEAD(&engine->execlist_queue); - INIT_LIST_HEAD(&engine->execlist_retired_req_list); - spin_lock_init(&engine->execlist_lock); + engine->id = id; + engine->name = info->name; + engine->exec_id = info->exec_id; + engine->guc_id = info->guc_id; + engine->mmio_base = info->mmio_base; - tasklet_init(&engine->irq_tasklet, - intel_lrc_irq_handler, (unsigned long)engine); + engine->i915 = dev_priv; - logical_ring_init_platform_invariants(engine); + /* Intentionally left blank. */ + engine->buffer = NULL; fw_domains = intel_uncore_forcewake_for_reg(dev_priv, RING_ELSP(engine), @@ -2022,20 +2025,44 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) engine->fw_domains = fw_domains; + INIT_LIST_HEAD(&engine->active_list); + INIT_LIST_HEAD(&engine->request_list); + INIT_LIST_HEAD(&engine->buffers); + INIT_LIST_HEAD(&engine->execlist_queue); + spin_lock_init(&engine->execlist_lock); + + tasklet_init(&engine->irq_tasklet, + intel_lrc_irq_handler, (unsigned long)engine); + + logical_ring_init_platform_invariants(engine); + logical_ring_default_vfuncs(engine); + logical_ring_default_irqs(engine, info->irq_shift); + + intel_engine_init_hangcheck(engine); + i915_gem_batch_pool_init(dev, &engine->batch_pool); + + return engine; +} + +static int +logical_ring_init(struct intel_engine_cs *engine) +{ + struct i915_gem_context *dctx = engine->i915->kernel_context; + int ret; + ret = i915_cmd_parser_init_ring(engine); if (ret) goto error; - ret = intel_lr_context_deferred_alloc(dctx, engine); + ret = execlists_context_deferred_alloc(dctx, engine); if (ret) goto error; /* As this is the default context, always pin it */ - ret = intel_lr_context_do_pin(dctx, engine); + ret = intel_lr_context_pin(dctx, engine); if (ret) { - DRM_ERROR( - "Failed to pin and map ringbuffer %s: %d\n", - engine->name, ret); + DRM_ERROR("Failed to pin context for %s: %d\n", + engine->name, ret); goto error; } @@ -2055,22 +2082,12 @@ error: static int logical_render_ring_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[RCS]; + struct intel_engine_cs *engine = logical_ring_setup(dev, RCS); int ret; - engine->name = "render ring"; - engine->id = RCS; - engine->exec_id = I915_EXEC_RENDER; - engine->guc_id = GUC_RENDER_ENGINE; - engine->mmio_base = RENDER_RING_BASE; - - logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT); if (HAS_L3_DPF(dev)) engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - logical_ring_default_vfuncs(dev, engine); - /* Override some for render ring. */ if (INTEL_INFO(dev)->gen >= 9) engine->init_hw = gen9_init_render_ring; @@ -2081,8 +2098,6 @@ static int logical_render_ring_init(struct drm_device *dev) engine->emit_flush = gen8_emit_flush_render; engine->emit_request = gen8_emit_request_render; - engine->dev = dev; - ret = intel_init_pipe_control(engine); if (ret) return ret; @@ -2098,7 +2113,7 @@ static int logical_render_ring_init(struct drm_device *dev) ret); } - ret = logical_ring_init(dev, engine); + ret = logical_ring_init(engine); if (ret) { lrc_destroy_wa_ctx_obj(engine); } @@ -2108,70 +2123,30 @@ static int logical_render_ring_init(struct drm_device *dev) static int logical_bsd_ring_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[VCS]; - - engine->name = "bsd ring"; - engine->id = VCS; - engine->exec_id = I915_EXEC_BSD; - engine->guc_id = GUC_VIDEO_ENGINE; - engine->mmio_base = GEN6_BSD_RING_BASE; - - logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); + struct intel_engine_cs *engine = logical_ring_setup(dev, VCS); - return logical_ring_init(dev, engine); + return logical_ring_init(engine); } static int logical_bsd2_ring_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; - - engine->name = "bsd2 ring"; - engine->id = VCS2; - engine->exec_id = I915_EXEC_BSD; - engine->guc_id = GUC_VIDEO_ENGINE2; - engine->mmio_base = GEN8_BSD2_RING_BASE; - - logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); + struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2); - return logical_ring_init(dev, engine); + return logical_ring_init(engine); } static int logical_blt_ring_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[BCS]; - - engine->name = "blitter ring"; - engine->id = BCS; - engine->exec_id = I915_EXEC_BLT; - engine->guc_id = GUC_BLITTER_ENGINE; - engine->mmio_base = BLT_RING_BASE; - - logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); + struct intel_engine_cs *engine = logical_ring_setup(dev, BCS); - return logical_ring_init(dev, engine); + return logical_ring_init(engine); } static int logical_vebox_ring_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *engine = &dev_priv->engine[VECS]; - - engine->name = "video enhancement ring"; - engine->id = VECS; - engine->exec_id = I915_EXEC_VEBOX; - engine->guc_id = GUC_VIDEOENHANCE_ENGINE; - engine->mmio_base = VEBOX_RING_BASE; + struct intel_engine_cs *engine = logical_ring_setup(dev, VECS); - logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, engine); - - return logical_ring_init(dev, engine); + return logical_ring_init(engine); } /** @@ -2232,7 +2207,7 @@ cleanup_render_ring: } static u32 -make_rpcs(struct drm_device *dev) +make_rpcs(struct drm_i915_private *dev_priv) { u32 rpcs = 0; @@ -2240,7 +2215,7 @@ make_rpcs(struct drm_device *dev) * No explicit RPCS request is needed to ensure full * slice/subslice/EU enablement prior to Gen9. */ - if (INTEL_INFO(dev)->gen < 9) + if (INTEL_GEN(dev_priv) < 9) return 0; /* @@ -2249,24 +2224,24 @@ make_rpcs(struct drm_device *dev) * must make an explicit request through RPCS for full * enablement. */ - if (INTEL_INFO(dev)->has_slice_pg) { + if (INTEL_INFO(dev_priv)->has_slice_pg) { rpcs |= GEN8_RPCS_S_CNT_ENABLE; - rpcs |= INTEL_INFO(dev)->slice_total << + rpcs |= INTEL_INFO(dev_priv)->slice_total << GEN8_RPCS_S_CNT_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } - if (INTEL_INFO(dev)->has_subslice_pg) { + if (INTEL_INFO(dev_priv)->has_subslice_pg) { rpcs |= GEN8_RPCS_SS_CNT_ENABLE; - rpcs |= INTEL_INFO(dev)->subslice_per_slice << + rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice << GEN8_RPCS_SS_CNT_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } - if (INTEL_INFO(dev)->has_eu_pg) { - rpcs |= INTEL_INFO(dev)->eu_per_subslice << + if (INTEL_INFO(dev_priv)->has_eu_pg) { + rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice << GEN8_RPCS_EU_MIN_SHIFT; - rpcs |= INTEL_INFO(dev)->eu_per_subslice << + rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice << GEN8_RPCS_EU_MAX_SHIFT; rpcs |= GEN8_RPCS_ENABLE; } @@ -2278,9 +2253,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) { u32 indirect_ctx_offset; - switch (INTEL_INFO(engine->dev)->gen) { + switch (INTEL_GEN(engine->i915)) { default: - MISSING_CASE(INTEL_INFO(engine->dev)->gen); + MISSING_CASE(INTEL_GEN(engine->i915)); /* fall through */ case 9: indirect_ctx_offset = @@ -2296,13 +2271,12 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) } static int -populate_lr_context(struct intel_context *ctx, +populate_lr_context(struct i915_gem_context *ctx, struct drm_i915_gem_object *ctx_obj, struct intel_engine_cs *engine, struct intel_ringbuffer *ringbuf) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = ctx->i915; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; void *vaddr; u32 *reg_state; @@ -2340,7 +2314,7 @@ populate_lr_context(struct intel_context *ctx, RING_CONTEXT_CONTROL(engine), _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - (HAS_RESOURCE_STREAMER(dev) ? + (HAS_RESOURCE_STREAMER(dev_priv) ? CTX_CTRL_RS_CTX_ENABLE : 0))); ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 0); @@ -2429,7 +2403,7 @@ populate_lr_context(struct intel_context *ctx, if (engine->id == RCS) { reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, - make_rpcs(dev)); + make_rpcs(dev_priv)); } i915_gem_object_unpin_map(ctx_obj); @@ -2438,37 +2412,6 @@ populate_lr_context(struct intel_context *ctx, } /** - * intel_lr_context_free() - free the LRC specific bits of a context - * @ctx: the LR context to free. - * - * The real context freeing is done in i915_gem_context_free: this only - * takes care of the bits that are LRC related: the per-engine backing - * objects and the logical ringbuffer. - */ -void intel_lr_context_free(struct intel_context *ctx) -{ - int i; - - for (i = I915_NUM_ENGINES; --i >= 0; ) { - struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; - struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; - - if (!ctx_obj) - continue; - - if (ctx == ctx->i915->kernel_context) { - intel_unpin_ringbuffer_obj(ringbuf); - i915_gem_object_ggtt_unpin(ctx_obj); - i915_gem_object_unpin_map(ctx_obj); - } - - WARN_ON(ctx->engine[i].pin_count); - intel_ringbuffer_free(ringbuf); - drm_gem_object_unreference(&ctx_obj->base); - } -} - -/** * intel_lr_context_size() - return the size of the context for an engine * @ring: which engine to find the context size for * @@ -2486,11 +2429,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) { int ret = 0; - WARN_ON(INTEL_INFO(engine->dev)->gen < 8); + WARN_ON(INTEL_GEN(engine->i915) < 8); switch (engine->id) { case RCS: - if (INTEL_INFO(engine->dev)->gen >= 9) + if (INTEL_GEN(engine->i915) >= 9) ret = GEN9_LR_CONTEXT_RENDER_SIZE; else ret = GEN8_LR_CONTEXT_RENDER_SIZE; @@ -2507,9 +2450,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) } /** - * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context + * execlists_context_deferred_alloc() - create the LRC specific bits of a context * @ctx: LR context to create. - * @ring: engine to be used with the context. + * @engine: engine to be used with the context. * * This function can be called more than once, with different engines, if we plan * to use the context with them. The context backing objects and the ringbuffers @@ -2519,28 +2462,26 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine) * * Return: non-zero on error. */ - -int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *engine) +static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; struct drm_i915_gem_object *ctx_obj; + struct intel_context *ce = &ctx->engine[engine->id]; uint32_t context_size; struct intel_ringbuffer *ringbuf; int ret; - WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); - WARN_ON(ctx->engine[engine->id].state); + WARN_ON(ce->state); context_size = round_up(intel_lr_context_size(engine), 4096); /* One extra page as the sharing data between driver and GuC */ context_size += PAGE_SIZE * LRC_PPHWSP_PN; - ctx_obj = i915_gem_alloc_object(dev, context_size); - if (!ctx_obj) { + ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size); + if (IS_ERR(ctx_obj)) { DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); - return -ENOMEM; + return PTR_ERR(ctx_obj); } ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); @@ -2555,48 +2496,29 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, goto error_ringbuf; } - ctx->engine[engine->id].ringbuf = ringbuf; - ctx->engine[engine->id].state = ctx_obj; + ce->ringbuf = ringbuf; + ce->state = ctx_obj; + ce->initialised = engine->init_context == NULL; - if (ctx != ctx->i915->kernel_context && engine->init_context) { - struct drm_i915_gem_request *req; - - req = i915_gem_request_alloc(engine, ctx); - if (IS_ERR(req)) { - ret = PTR_ERR(req); - DRM_ERROR("ring create req: %d\n", ret); - goto error_ringbuf; - } - - ret = engine->init_context(req); - i915_add_request_no_flush(req); - if (ret) { - DRM_ERROR("ring init context: %d\n", - ret); - goto error_ringbuf; - } - } return 0; error_ringbuf: intel_ringbuffer_free(ringbuf); error_deref_obj: drm_gem_object_unreference(&ctx_obj->base); - ctx->engine[engine->id].ringbuf = NULL; - ctx->engine[engine->id].state = NULL; + ce->ringbuf = NULL; + ce->state = NULL; return ret; } void intel_lr_context_reset(struct drm_i915_private *dev_priv, - struct intel_context *ctx) + struct i915_gem_context *ctx) { struct intel_engine_cs *engine; for_each_engine(engine, dev_priv) { - struct drm_i915_gem_object *ctx_obj = - ctx->engine[engine->id].state; - struct intel_ringbuffer *ringbuf = - ctx->engine[engine->id].ringbuf; + struct intel_context *ce = &ctx->engine[engine->id]; + struct drm_i915_gem_object *ctx_obj = ce->state; void *vaddr; uint32_t *reg_state; @@ -2615,7 +2537,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv, i915_gem_object_unpin_map(ctx_obj); - ringbuf->head = 0; - ringbuf->tail = 0; + ce->ringbuf->head = 0; + ce->ringbuf->tail = 0; } } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 60a7385bc531..a8db42a9c50f 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -99,30 +99,27 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, #define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) -void intel_lr_context_free(struct intel_context *ctx); +struct i915_gem_context; + uint32_t intel_lr_context_size(struct intel_engine_cs *engine); -int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *engine); -void intel_lr_context_unpin(struct intel_context *ctx, +void intel_lr_context_unpin(struct i915_gem_context *ctx, struct intel_engine_cs *engine); struct drm_i915_private; void intel_lr_context_reset(struct drm_i915_private *dev_priv, - struct intel_context *ctx); -uint64_t intel_lr_context_descriptor(struct intel_context *ctx, + struct i915_gem_context *ctx); +uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, struct intel_engine_cs *engine); -u32 intel_execlists_ctx_id(struct intel_context *ctx, - struct intel_engine_cs *engine); - /* Execlists */ -int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); +int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, + int enable_execlists); struct i915_execbuffer_params; int intel_execlists_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas); -void intel_execlists_retire_requests(struct intel_engine_cs *engine); +void intel_execlists_cancel_requests(struct intel_engine_cs *engine); #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index bc53c0dd34d0..e06b9036bebc 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -190,7 +190,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) /* Set the dithering flag on LVDS as needed, note that there is no * special lvds dither control bit on pch-split platforms, dithering is * only controlled through the PIPECONF reg. */ - if (INTEL_INFO(dev)->gen == 4) { + if (IS_GEN4(dev_priv)) { /* Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. */ if (crtc->config->dither && crtc->config->pipe_bpp == 18) @@ -547,7 +547,6 @@ static int intel_lvds_set_property(struct drm_connector *connector, static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, - .best_encoder = intel_best_encoder, }; static const struct drm_connector_funcs intel_lvds_connector_funcs = { @@ -978,7 +977,7 @@ void intel_lvds_init(struct drm_device *dev) DRM_MODE_CONNECTOR_LVDS); drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, - DRM_MODE_ENCODER_LVDS, NULL); + DRM_MODE_ENCODER_LVDS, "LVDS"); intel_encoder->enable = intel_enable_lvds; intel_encoder->pre_enable = intel_pre_enable_lvds; @@ -1082,6 +1081,8 @@ void intel_lvds_init(struct drm_device *dev) fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); if (fixed_mode) { fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + connector->display_info.width_mm = fixed_mode->width_mm; + connector->display_info.height_mm = fixed_mode->height_mm; goto out; } } diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 6ba4bf7f2a89..b765c75f3fcd 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -189,7 +189,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index) */ int intel_mocs_init_engine(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; struct drm_i915_mocs_table table; unsigned int index; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 99e26034ae8d..f6d8a21d2c49 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -240,10 +240,11 @@ struct opregion_asle_ext { #define MAX_DSLP 1500 -static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) +static int swsci(struct drm_i915_private *dev_priv, + u32 function, u32 parm, u32 *parm_out) { - struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_swsci *swsci = dev_priv->opregion.swsci; + struct pci_dev *pdev = dev_priv->dev->pdev; u32 main_function, sub_function, scic; u16 swsci_val; u32 dslp; @@ -293,16 +294,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) swsci->scic = scic; /* Ensure SCI event is selected and event trigger is cleared. */ - pci_read_config_word(dev->pdev, SWSCI, &swsci_val); + pci_read_config_word(pdev, SWSCI, &swsci_val); if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) { swsci_val |= SWSCI_SCISEL; swsci_val &= ~SWSCI_GSSCIE; - pci_write_config_word(dev->pdev, SWSCI, swsci_val); + pci_write_config_word(pdev, SWSCI, swsci_val); } /* Use event trigger to tell bios to check the mail. */ swsci_val |= SWSCI_GSSCIE; - pci_write_config_word(dev->pdev, SWSCI, swsci_val); + pci_write_config_word(pdev, SWSCI, swsci_val); /* Poll for the result. */ #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) @@ -336,13 +337,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) { - struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); u32 parm = 0; u32 type = 0; u32 port; /* don't care about old stuff for now */ - if (!HAS_DDI(dev)) + if (!HAS_DDI(dev_priv)) return 0; if (intel_encoder->type == INTEL_OUTPUT_DSI) @@ -382,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, parm |= type << (16 + port * 3); - return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); + return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); } static const struct { @@ -396,27 +397,28 @@ static const struct { { PCI_D3cold, 0x04 }, }; -int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) +int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, + pci_power_t state) { int i; - if (!HAS_DDI(dev)) + if (!HAS_DDI(dev_priv)) return 0; for (i = 0; i < ARRAY_SIZE(power_state_map); i++) { if (state == power_state_map[i].pci_power_state) - return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE, + return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE, power_state_map[i].parm, NULL); } return -EINVAL; } -static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) +static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_connector *connector; struct opregion_asle *asle = dev_priv->opregion.asle; + struct drm_device *dev = dev_priv->dev; DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); @@ -449,7 +451,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) return 0; } -static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) +static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi) { /* alsi is the current ALS reading in lux. 0 indicates below sensor range, 0xffff indicates above sensor range. 1-0xfffe are valid */ @@ -457,13 +459,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) return ASLC_ALS_ILLUM_FAILED; } -static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) +static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb) { DRM_DEBUG_DRIVER("PWM freq is not supported\n"); return ASLC_PWM_FREQ_FAILED; } -static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) +static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit) { /* Panel fitting is currently controlled by the X code, so this is a noop until modesetting support works fully */ @@ -471,13 +473,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) return ASLC_PFIT_FAILED; } -static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot) +static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot) { DRM_DEBUG_DRIVER("SROT is not supported\n"); return ASLC_ROTATION_ANGLES_FAILED; } -static u32 asle_set_button_array(struct drm_device *dev, u32 iuer) +static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer) { if (!iuer) DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n"); @@ -495,7 +497,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer) return ASLC_BUTTON_ARRAY_FAILED; } -static u32 asle_set_convertible(struct drm_device *dev, u32 iuer) +static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer) { if (iuer & ASLE_IUER_CONVERTIBLE) DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n"); @@ -505,7 +507,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer) return ASLC_CONVERTIBLE_FAILED; } -static u32 asle_set_docking(struct drm_device *dev, u32 iuer) +static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer) { if (iuer & ASLE_IUER_DOCKING) DRM_DEBUG_DRIVER("Docking is not supported (docked)\n"); @@ -515,7 +517,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer) return ASLC_DOCKING_FAILED; } -static u32 asle_isct_state(struct drm_device *dev) +static u32 asle_isct_state(struct drm_i915_private *dev_priv) { DRM_DEBUG_DRIVER("ISCT is not supported\n"); return ASLC_ISCT_STATE_FAILED; @@ -527,7 +529,6 @@ static void asle_work(struct work_struct *work) container_of(work, struct intel_opregion, asle_work); struct drm_i915_private *dev_priv = container_of(opregion, struct drm_i915_private, opregion); - struct drm_device *dev = dev_priv->dev; struct opregion_asle *asle = dev_priv->opregion.asle; u32 aslc_stat = 0; u32 aslc_req; @@ -544,40 +545,38 @@ static void asle_work(struct work_struct *work) } if (aslc_req & ASLC_SET_ALS_ILLUM) - aslc_stat |= asle_set_als_illum(dev, asle->alsi); + aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi); if (aslc_req & ASLC_SET_BACKLIGHT) - aslc_stat |= asle_set_backlight(dev, asle->bclp); + aslc_stat |= asle_set_backlight(dev_priv, asle->bclp); if (aslc_req & ASLC_SET_PFIT) - aslc_stat |= asle_set_pfit(dev, asle->pfit); + aslc_stat |= asle_set_pfit(dev_priv, asle->pfit); if (aslc_req & ASLC_SET_PWM_FREQ) - aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb); + aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb); if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) - aslc_stat |= asle_set_supported_rotation_angles(dev, + aslc_stat |= asle_set_supported_rotation_angles(dev_priv, asle->srot); if (aslc_req & ASLC_BUTTON_ARRAY) - aslc_stat |= asle_set_button_array(dev, asle->iuer); + aslc_stat |= asle_set_button_array(dev_priv, asle->iuer); if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) - aslc_stat |= asle_set_convertible(dev, asle->iuer); + aslc_stat |= asle_set_convertible(dev_priv, asle->iuer); if (aslc_req & ASLC_DOCKING_INDICATOR) - aslc_stat |= asle_set_docking(dev, asle->iuer); + aslc_stat |= asle_set_docking(dev_priv, asle->iuer); if (aslc_req & ASLC_ISCT_STATE_CHANGE) - aslc_stat |= asle_isct_state(dev); + aslc_stat |= asle_isct_state(dev_priv); asle->aslc = aslc_stat; } -void intel_opregion_asle_intr(struct drm_device *dev) +void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->opregion.asle) schedule_work(&dev_priv->opregion.asle_work); } @@ -658,10 +657,10 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val) } } -static void intel_didl_outputs(struct drm_device *dev) +static void intel_didl_outputs(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; + struct pci_dev *pdev = dev_priv->dev->pdev; struct drm_connector *connector; acpi_handle handle; struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; @@ -670,7 +669,7 @@ static void intel_didl_outputs(struct drm_device *dev) u32 temp, max_outputs; int i = 0; - handle = ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(&pdev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) return; @@ -725,7 +724,7 @@ end: blind_set: i = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) { int output_type = ACPI_OTHER_OUTPUT; if (i >= max_outputs) { DRM_DEBUG_KMS("More than %u outputs in connector list\n", @@ -761,9 +760,8 @@ blind_set: goto end; } -static void intel_setup_cadls(struct drm_device *dev) +static void intel_setup_cadls(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; int i = 0; u32 disp_id; @@ -780,17 +778,16 @@ static void intel_setup_cadls(struct drm_device *dev) } while (++i < 8 && disp_id != 0); } -void intel_opregion_init(struct drm_device *dev) +void intel_opregion_register(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->header) return; if (opregion->acpi) { - intel_didl_outputs(dev); - intel_setup_cadls(dev); + intel_didl_outputs(dev_priv); + intel_setup_cadls(dev_priv); /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. @@ -808,9 +805,8 @@ void intel_opregion_init(struct drm_device *dev) } } -void intel_opregion_fini(struct drm_device *dev) +void intel_opregion_unregister(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->header) @@ -842,9 +838,8 @@ void intel_opregion_fini(struct drm_device *dev) opregion->lid_state = NULL; } -static void swsci_setup(struct drm_device *dev) +static void swsci_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; bool requested_callbacks = false; u32 tmp; @@ -854,7 +849,7 @@ static void swsci_setup(struct drm_device *dev) opregion->swsci_sbcb_sub_functions = 1; /* We use GBDA to ask for supported GBDA calls. */ - if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { + if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { /* make the bits match the sub-function codes */ tmp <<= 1; opregion->swsci_gbda_sub_functions |= tmp; @@ -865,7 +860,7 @@ static void swsci_setup(struct drm_device *dev) * must not call interfaces that are not specifically requested by the * bios. */ - if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { + if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { /* here, the bits already match sub-function codes */ opregion->swsci_sbcb_sub_functions |= tmp; requested_callbacks = true; @@ -876,7 +871,7 @@ static void swsci_setup(struct drm_device *dev) * the callback is _requested_. But we still can't call interfaces that * are not requested. */ - if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { + if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { /* make the bits match the sub-function codes */ u32 low = tmp & 0x7ff; u32 high = tmp & ~0xfff; /* bit 11 is reserved */ @@ -918,10 +913,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { { } }; -int intel_opregion_setup(struct drm_device *dev) +int intel_opregion_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; + struct pci_dev *pdev = dev_priv->dev->pdev; u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; int err = 0; @@ -933,7 +928,7 @@ int intel_opregion_setup(struct drm_device *dev) BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); - pci_read_config_dword(dev->pdev, ASLS, &asls); + pci_read_config_dword(pdev, ASLS, &asls); DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); if (asls == 0) { DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); @@ -965,7 +960,7 @@ int intel_opregion_setup(struct drm_device *dev) if (mboxes & MBOX_SWSCI) { DRM_DEBUG_DRIVER("SWSCI supported\n"); opregion->swsci = base + OPREGION_SWSCI_OFFSET; - swsci_setup(dev); + swsci_setup(dev_priv); } if (mboxes & MBOX_ASLE) { @@ -1014,12 +1009,12 @@ err_out: } int -intel_opregion_get_panel_type(struct drm_device *dev) +intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) { u32 panel_details; int ret; - ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); + ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); if (ret) { DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n", ret); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index bd38e49f7334..eb93f90bb74d 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -168,7 +168,7 @@ struct overlay_registers { }; struct intel_overlay { - struct drm_device *dev; + struct drm_i915_private *i915; struct intel_crtc *crtc; struct drm_i915_gem_object *vid_bo; struct drm_i915_gem_object *old_vid_bo; @@ -190,15 +190,15 @@ struct intel_overlay { static struct overlay_registers __iomem * intel_overlay_map_regs(struct intel_overlay *overlay) { - struct drm_i915_private *dev_priv = to_i915(overlay->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_wc(ggtt->mappable, - i915_gem_obj_ggtt_offset(overlay->reg_bo)); + regs = io_mapping_map_wc(dev_priv->ggtt.mappable, + overlay->flip_addr, + PAGE_SIZE); return regs; } @@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) static void intel_overlay_unmap_regs(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { - if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) io_mapping_unmap(regs); } @@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, /* overlay needs to be disable in OCMD reg */ static int intel_overlay_on(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; int ret; WARN_ON(overlay->active); - WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); + WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) @@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) static int intel_overlay_continue(struct intel_overlay *overlay, bool load_polyphase_filter) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; u32 flip_addr = overlay->flip_addr; @@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay) /* overlay needs to be disabled in OCMD reg */ static int intel_overlay_off(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; u32 flip_addr = overlay->flip_addr; @@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) intel_ring_emit(engine, flip_addr); intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ - if (IS_I830(dev)) { + if (IS_I830(dev_priv)) { /* Workaround: Don't disable the overlay fully, since otherwise * it dies on the next OVERLAY_ON cmd. */ intel_ring_emit(engine, MI_NOOP); @@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) */ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; struct intel_engine_cs *engine = &dev_priv->engine[RCS]; int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + lockdep_assert_held(&dev_priv->dev->struct_mutex); /* Only wait if there is actually an old frame to release to * guarantee forward progress. @@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format) } } -static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) +static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width) { u32 mask, shift, ret; - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { mask = 0x1f; shift = 5; } else { @@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) shift = 6; } ret = ((offset + width + mask) >> shift) - (offset >> shift); - if (!IS_GEN2(dev)) + if (!IS_GEN2(dev_priv)) ret <<= 1; ret -= 1; return ret << 2; @@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, int ret, tmp_width; struct overlay_registers __iomem *regs; bool scale_changed = false; - struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = overlay->i915; u32 swidth, swidthsw, sheight, ostride; enum pipe pipe = overlay->crtc->pipe; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + lockdep_assert_held(&dev_priv->dev->struct_mutex); + WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); ret = intel_overlay_release_old_vid(overlay); if (ret != 0) @@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_unpin; } oconfig = OCONF_CC_OUT_8BIT; - if (IS_GEN4(overlay->dev)) + if (IS_GEN4(dev_priv)) oconfig |= OCONF_CSC_MODE_BT709; oconfig |= pipe == 0 ? OCONF_PIPE_A : OCONF_PIPE_B; @@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, tmp_width = params->src_w; swidth = params->src_w; - swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); + swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width); sheight = params->src_h; iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y); ostride = params->stride_Y; @@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, int uv_vscale = uv_vsubsampling(params->format); u32 tmp_U, tmp_V; swidth |= (params->src_w/uv_hscale) << 16; - tmp_U = calc_swidthsw(overlay->dev, params->offset_U, + tmp_U = calc_swidthsw(dev_priv, params->offset_U, params->src_w/uv_hscale); - tmp_V = calc_swidthsw(overlay->dev, params->offset_V, + tmp_V = calc_swidthsw(dev_priv, params->offset_V, params->src_w/uv_hscale); swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; sheight |= (params->src_h/uv_vscale) << 16; @@ -840,8 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, overlay->old_vid_bo = overlay->vid_bo; overlay->vid_bo = new_bo; - intel_frontbuffer_flip(dev, - INTEL_FRONTBUFFER_OVERLAY(pipe)); + intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe)); return 0; @@ -852,12 +847,12 @@ out_unpin: int intel_overlay_switch_off(struct intel_overlay *overlay) { + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - struct drm_device *dev = overlay->dev; int ret; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + lockdep_assert_held(&dev_priv->dev->struct_mutex); + WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); ret = intel_overlay_recover_from_interrupt(overlay); if (ret != 0) @@ -897,15 +892,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay, static void update_pfit_vscale_ratio(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = overlay->i915; u32 pfit_control = I915_READ(PFIT_CONTROL); u32 ratio; /* XXX: This is not the same logic as in the xorg driver, but more in * line with the intel documentation for the i965 */ - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { /* on i965 use the PGM reg to read out the autoscaler values */ ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; } else { @@ -948,7 +942,7 @@ static int check_overlay_scaling(struct put_image_params *rec) return 0; } -static int check_overlay_src(struct drm_device *dev, +static int check_overlay_src(struct drm_i915_private *dev_priv, struct drm_intel_overlay_put_image *rec, struct drm_i915_gem_object *new_bo) { @@ -959,7 +953,7 @@ static int check_overlay_src(struct drm_device *dev, u32 tmp; /* check src dimensions */ - if (IS_845G(dev) || IS_I830(dev)) { + if (IS_845G(dev_priv) || IS_I830(dev_priv)) { if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || rec->src_width > IMAGE_MAX_WIDTH_LEGACY) return -EINVAL; @@ -1011,14 +1005,14 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; /* stride checking */ - if (IS_I830(dev) || IS_845G(dev)) + if (IS_I830(dev_priv) || IS_845G(dev_priv)) stride_mask = 255; else stride_mask = 63; if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; - if (IS_GEN4(dev) && rec->stride_Y < 512) + if (IS_GEN4(dev_priv) && rec->stride_Y < 512) return -EINVAL; tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? @@ -1063,13 +1057,13 @@ static int check_overlay_src(struct drm_device *dev, * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ -static int intel_panel_fitter_pipe(struct drm_device *dev) +static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 pfit_control; /* i830 doesn't have a panel fitter */ - if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) + if (INTEL_GEN(dev_priv) <= 3 && + (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) return -1; pfit_control = I915_READ(PFIT_CONTROL); @@ -1079,15 +1073,15 @@ static int intel_panel_fitter_pipe(struct drm_device *dev) return -1; /* 965 can place panel fitter on either pipe */ - if (IS_GEN4(dev)) + if (IS_GEN4(dev_priv)) return (pfit_control >> 29) & 0x3; /* older chips can only use pipe 1 */ return 1; } -int intel_overlay_put_image(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_intel_overlay_put_image *put_image_rec = data; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1162,7 +1156,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, /* line too wide, i.e. one-line-mode */ if (mode->hdisplay > 1024 && - intel_panel_fitter_pipe(dev) == crtc->pipe) { + intel_panel_fitter_pipe(dev_priv) == crtc->pipe) { overlay->pfit_active = true; update_pfit_vscale_ratio(overlay); } else @@ -1196,7 +1190,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, goto out_unlock; } - ret = check_overlay_src(dev, put_image_rec, new_bo); + ret = check_overlay_src(dev_priv, put_image_rec, new_bo); if (ret != 0) goto out_unlock; params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; @@ -1284,8 +1278,8 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs) return 0; } -int intel_overlay_attrs(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_intel_overlay_attrs *attrs = data; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1309,7 +1303,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, attrs->contrast = overlay->contrast; attrs->saturation = overlay->saturation; - if (!IS_GEN2(dev)) { + if (!IS_GEN2(dev_priv)) { attrs->gamma0 = I915_READ(OGAMC0); attrs->gamma1 = I915_READ(OGAMC1); attrs->gamma2 = I915_READ(OGAMC2); @@ -1341,7 +1335,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data, intel_overlay_unmap_regs(overlay, regs); if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) goto out_unlock; if (overlay->active) { @@ -1371,37 +1365,36 @@ out_unlock: return ret; } -void intel_setup_overlay(struct drm_device *dev) +void intel_setup_overlay(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay; struct drm_i915_gem_object *reg_bo; struct overlay_registers __iomem *regs; int ret; - if (!HAS_OVERLAY(dev)) + if (!HAS_OVERLAY(dev_priv)) return; overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->dev->struct_mutex); if (WARN_ON(dev_priv->overlay)) goto out_free; - overlay->dev = dev; + overlay->i915 = dev_priv; reg_bo = NULL; - if (!OVERLAY_NEEDS_PHYSICAL(dev)) - reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); - if (reg_bo == NULL) - reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); + if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) + reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE); if (reg_bo == NULL) + reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE); + if (IS_ERR(reg_bo)) goto out_free; overlay->reg_bo = reg_bo; - if (OVERLAY_NEEDS_PHYSICAL(dev)) { + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) { ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); @@ -1441,25 +1434,23 @@ void intel_setup_overlay(struct drm_device *dev) intel_overlay_unmap_regs(overlay, regs); dev_priv->overlay = overlay; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->dev->struct_mutex); DRM_INFO("initialized overlay support\n"); return; out_unpin_bo: - if (!OVERLAY_NEEDS_PHYSICAL(dev)) + if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) i915_gem_object_ggtt_unpin(reg_bo); out_free_bo: drm_gem_object_unreference(®_bo->base); out_free: - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->dev->struct_mutex); kfree(overlay); return; } -void intel_cleanup_overlay(struct drm_device *dev) +void intel_cleanup_overlay(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (!dev_priv->overlay) return; @@ -1482,18 +1473,17 @@ struct intel_overlay_error_state { static struct overlay_registers __iomem * intel_overlay_map_regs_atomic(struct intel_overlay *overlay) { - struct drm_i915_private *dev_priv = to_i915(overlay->dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *dev_priv = overlay->i915; struct overlay_registers __iomem *regs; - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) /* Cast to make sparse happy, but it's wc memory anyway, so * equivalent to the wc io mapping on X86. */ regs = (struct overlay_registers __iomem *) overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_atomic_wc(ggtt->mappable, - i915_gem_obj_ggtt_offset(overlay->reg_bo)); + regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, + overlay->flip_addr); return regs; } @@ -1501,15 +1491,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { - if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) + if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) io_mapping_unmap_atomic(regs); } - struct intel_overlay_error_state * -intel_overlay_capture_error_state(struct drm_device *dev) +intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_overlay *overlay = dev_priv->overlay; struct intel_overlay_error_state *error; struct overlay_registers __iomem *regs; @@ -1523,10 +1511,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) error->dovsta = I915_READ(DOVSTA); error->isr = I915_READ(ISR); - if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - error->base = (__force long)overlay->reg_bo->phys_handle->vaddr; - else - error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); + error->base = overlay->flip_addr; regs = intel_overlay_map_regs_atomic(overlay); if (!regs) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 8357d571553a..f0b1602c3258 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1724,6 +1724,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) container_of(panel, struct intel_connector, panel); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && + intel_dp_aux_init_backlight_funcs(connector) == 0) + return; + + if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI && + intel_dsi_dcs_init_backlight_funcs(connector) == 0) + return; + if (IS_BROXTON(dev_priv)) { panel->backlight.setup = bxt_setup_backlight; panel->backlight.enable = bxt_enable_backlight; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a7ef45da0a9e..08274591db7e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -26,6 +26,7 @@ */ #include <linux/cpufreq.h> +#include <drm/drm_plane_helper.h> #include "i915_drv.h" #include "intel_drv.h" #include "../../../platform/x86/intel_ips.h" @@ -58,6 +59,10 @@ static void bxt_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + /* See Bspec note for PSR2_CTL bit 31, Wa#828:bxt */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); + /* WaDisableSDEUnitClockGating:bxt */ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); @@ -2012,10 +2017,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, } static uint32_t -hsw_compute_linetime_wm(struct drm_device *dev, - struct intel_crtc_state *cstate) +hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) { - struct drm_i915_private *dev_priv = dev->dev_private; + const struct intel_atomic_state *intel_state = + to_intel_atomic_state(cstate->base.state); const struct drm_display_mode *adjusted_mode = &cstate->base.adjusted_mode; u32 linetime, ips_linetime; @@ -2024,7 +2029,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, return 0; if (WARN_ON(adjusted_mode->crtc_clock == 0)) return 0; - if (WARN_ON(dev_priv->cdclk_freq == 0)) + if (WARN_ON(intel_state->cdclk == 0)) return 0; /* The WM are computed with base on how long it takes to fill a single @@ -2033,7 +2038,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, adjusted_mode->crtc_clock); ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, - dev_priv->cdclk_freq); + intel_state->cdclk); return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | PIPE_WM_LINETIME_TIME(linetime); @@ -2146,14 +2151,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) { /* ILK sprite LP0 latency is 1300 ns */ - if (INTEL_INFO(dev)->gen == 5) + if (IS_GEN5(dev)) wm[0] = 13; } static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) { /* ILK cursor LP0 latency is 1300 ns */ - if (INTEL_INFO(dev)->gen == 5) + if (IS_GEN5(dev)) wm[0] = 13; /* WaDoubleCursorLP3Latency:ivb */ @@ -2309,7 +2314,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) int level, max_level = ilk_wm_max_level(dev), usable_level; struct ilk_wm_maximums max; - pipe_wm = &cstate->wm.optimal.ilk; + pipe_wm = &cstate->wm.ilk.optimal; for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { struct intel_plane_state *ps; @@ -2352,7 +2357,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) pipe_wm->wm[0] = pipe_wm->raw_wm[0]; if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate); + pipe_wm->linetime = hsw_compute_linetime_wm(cstate); if (!ilk_validate_pipe_wm(dev, pipe_wm)) return -EINVAL; @@ -2391,7 +2396,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, struct intel_crtc *intel_crtc, struct intel_crtc_state *newstate) { - struct intel_pipe_wm *a = &newstate->wm.intermediate; + struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate; struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; int level, max_level = ilk_wm_max_level(dev); @@ -2400,7 +2405,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, * currently active watermarks to get values that are safe both before * and after the vblank. */ - *a = newstate->wm.optimal.ilk; + *a = newstate->wm.ilk.optimal; a->pipe_enabled |= b->pipe_enabled; a->sprites_enabled |= b->sprites_enabled; a->sprites_scaled |= b->sprites_scaled; @@ -2429,7 +2434,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, * If our intermediate WM are identical to the final WM, then we can * omit the post-vblank programming; only update if it's different. */ - if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0) + if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0) newstate->wm.need_postvbl_update = false; return 0; @@ -2849,20 +2854,29 @@ skl_wm_plane_id(const struct intel_plane *plane) static void skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, const struct intel_crtc_state *cstate, - const struct intel_wm_config *config, - struct skl_ddb_entry *alloc /* out */) + struct skl_ddb_entry *alloc, /* out */ + int *num_active /* out */) { + struct drm_atomic_state *state = cstate->base.state; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *for_crtc = cstate->base.crtc; - struct drm_crtc *crtc; unsigned int pipe_size, ddb_size; int nth_active_pipe; + int pipe = to_intel_crtc(for_crtc)->pipe; - if (!cstate->base.active) { + if (WARN_ON(!state) || !cstate->base.active) { alloc->start = 0; alloc->end = 0; + *num_active = hweight32(dev_priv->active_crtcs); return; } + if (intel_state->active_pipe_changes) + *num_active = hweight32(intel_state->active_crtcs); + else + *num_active = hweight32(dev_priv->active_crtcs); + if (IS_BROXTON(dev)) ddb_size = BXT_DDB_SIZE; else @@ -2870,25 +2884,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, ddb_size -= 4; /* 4 blocks for bypass path allocation */ - nth_active_pipe = 0; - for_each_crtc(dev, crtc) { - if (!to_intel_crtc(crtc)->active) - continue; - - if (crtc == for_crtc) - break; - - nth_active_pipe++; + /* + * If the state doesn't change the active CRTC's, then there's + * no need to recalculate; the existing pipe allocation limits + * should remain unchanged. Note that we're safe from racing + * commits since any racing commit that changes the active CRTC + * list would need to grab _all_ crtc locks, including the one + * we currently hold. + */ + if (!intel_state->active_pipe_changes) { + *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe]; + return; } - pipe_size = ddb_size / config->num_pipes_active; - alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; + nth_active_pipe = hweight32(intel_state->active_crtcs & + (drm_crtc_mask(for_crtc) - 1)); + pipe_size = ddb_size / hweight32(intel_state->active_crtcs); + alloc->start = nth_active_pipe * ddb_size / *num_active; alloc->end = alloc->start + pipe_size; } -static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) +static unsigned int skl_cursor_allocation(int num_active) { - if (config->num_pipes_active == 1) + if (num_active == 1) return 32; return 8; @@ -2932,6 +2950,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, } } +/* + * Determines the downscale amount of a plane for the purposes of watermark calculations. + * The bspec defines downscale amount as: + * + * """ + * Horizontal down scale amount = maximum[1, Horizontal source size / + * Horizontal destination size] + * Vertical down scale amount = maximum[1, Vertical source size / + * Vertical destination size] + * Total down scale amount = Horizontal down scale amount * + * Vertical down scale amount + * """ + * + * Return value is provided in 16.16 fixed point form to retain fractional part. + * Caller should take care of dividing & rounding off the value. + */ +static uint32_t +skl_plane_downscale_amount(const struct intel_plane_state *pstate) +{ + uint32_t downscale_h, downscale_w; + uint32_t src_w, src_h, dst_w, dst_h; + + if (WARN_ON(!pstate->visible)) + return DRM_PLANE_HELPER_NO_SCALING; + + /* n.b., src is 16.16 fixed point, dst is whole integer */ + src_w = drm_rect_width(&pstate->src); + src_h = drm_rect_height(&pstate->src); + dst_w = drm_rect_width(&pstate->dst); + dst_h = drm_rect_height(&pstate->dst); + if (intel_rotation_90_or_270(pstate->base.rotation)) + swap(dst_w, dst_h); + + downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); + downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); + + /* Provide result in 16.16 fixed point */ + return (uint64_t)downscale_w * downscale_h >> 16; +} + static unsigned int skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, const struct drm_plane_state *pstate, @@ -2939,7 +2997,16 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, { struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); struct drm_framebuffer *fb = pstate->fb; + uint32_t down_scale_amount, data_rate; uint32_t width = 0, height = 0; + unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888; + + if (!intel_pstate->visible) + return 0; + if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR) + return 0; + if (y && format != DRM_FORMAT_NV12) + return 0; width = drm_rect_width(&intel_pstate->src) >> 16; height = drm_rect_height(&intel_pstate->src) >> 16; @@ -2948,17 +3015,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, swap(width, height); /* for planar format */ - if (fb->pixel_format == DRM_FORMAT_NV12) { + if (format == DRM_FORMAT_NV12) { if (y) /* y-plane data rate */ - return width * height * - drm_format_plane_cpp(fb->pixel_format, 0); + data_rate = width * height * + drm_format_plane_cpp(format, 0); else /* uv-plane data rate */ - return (width / 2) * (height / 2) * - drm_format_plane_cpp(fb->pixel_format, 1); + data_rate = (width / 2) * (height / 2) * + drm_format_plane_cpp(format, 1); + } else { + /* for packed formats */ + data_rate = width * height * drm_format_plane_cpp(format, 0); } - /* for packed formats */ - return width * height * drm_format_plane_cpp(fb->pixel_format, 0); + down_scale_amount = skl_plane_downscale_amount(intel_pstate); + + return (uint64_t)data_rate * down_scale_amount >> 16; } /* @@ -2967,86 +3038,188 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, * 3 * 4096 * 8192 * 4 < 2^32 */ static unsigned int -skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) +skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate) { - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); - struct drm_device *dev = intel_crtc->base.dev; + struct drm_crtc_state *cstate = &intel_cstate->base; + struct drm_atomic_state *state = cstate->state; + struct drm_crtc *crtc = cstate->crtc; + struct drm_device *dev = crtc->dev; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + const struct drm_plane *plane; const struct intel_plane *intel_plane; - unsigned int total_data_rate = 0; + struct drm_plane_state *pstate; + unsigned int rate, total_data_rate = 0; + int id; + int i; - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - const struct drm_plane_state *pstate = intel_plane->base.state; + if (WARN_ON(!state)) + return 0; - if (pstate->fb == NULL) - continue; + /* Calculate and cache data rate for each plane */ + for_each_plane_in_state(state, plane, pstate, i) { + id = skl_wm_plane_id(to_intel_plane(plane)); + intel_plane = to_intel_plane(plane); - if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) + if (intel_plane->pipe != intel_crtc->pipe) continue; /* packed/uv */ - total_data_rate += skl_plane_relative_data_rate(cstate, - pstate, - 0); + rate = skl_plane_relative_data_rate(intel_cstate, + pstate, 0); + intel_cstate->wm.skl.plane_data_rate[id] = rate; + + /* y-plane */ + rate = skl_plane_relative_data_rate(intel_cstate, + pstate, 1); + intel_cstate->wm.skl.plane_y_data_rate[id] = rate; + } + + /* Calculate CRTC's total data rate from cached values */ + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + int id = skl_wm_plane_id(intel_plane); - if (pstate->fb->pixel_format == DRM_FORMAT_NV12) - /* y-plane */ - total_data_rate += skl_plane_relative_data_rate(cstate, - pstate, - 1); + /* packed/uv */ + total_data_rate += intel_cstate->wm.skl.plane_data_rate[id]; + total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; } + WARN_ON(cstate->plane_mask && total_data_rate == 0); + return total_data_rate; } -static void +static uint16_t +skl_ddb_min_alloc(const struct drm_plane_state *pstate, + const int y) +{ + struct drm_framebuffer *fb = pstate->fb; + struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); + uint32_t src_w, src_h; + uint32_t min_scanlines = 8; + uint8_t plane_bpp; + + if (WARN_ON(!fb)) + return 0; + + /* For packed formats, no y-plane, return 0 */ + if (y && fb->pixel_format != DRM_FORMAT_NV12) + return 0; + + /* For Non Y-tile return 8-blocks */ + if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED && + fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED) + return 8; + + src_w = drm_rect_width(&intel_pstate->src) >> 16; + src_h = drm_rect_height(&intel_pstate->src) >> 16; + + if (intel_rotation_90_or_270(pstate->rotation)) + swap(src_w, src_h); + + /* Halve UV plane width and height for NV12 */ + if (fb->pixel_format == DRM_FORMAT_NV12 && !y) { + src_w /= 2; + src_h /= 2; + } + + if (fb->pixel_format == DRM_FORMAT_NV12 && !y) + plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1); + else + plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0); + + if (intel_rotation_90_or_270(pstate->rotation)) { + switch (plane_bpp) { + case 1: + min_scanlines = 32; + break; + case 2: + min_scanlines = 16; + break; + case 4: + min_scanlines = 8; + break; + case 8: + min_scanlines = 4; + break; + default: + WARN(1, "Unsupported pixel depth %u for rotation", + plane_bpp); + min_scanlines = 32; + } + } + + return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3; +} + +static int skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct skl_ddb_allocation *ddb /* out */) { + struct drm_atomic_state *state = cstate->base.state; struct drm_crtc *crtc = cstate->base.crtc; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_wm_config *config = &dev_priv->wm.config; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_plane *intel_plane; + struct drm_plane *plane; + struct drm_plane_state *pstate; enum pipe pipe = intel_crtc->pipe; struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; uint16_t alloc_size, start, cursor_blocks; - uint16_t minimum[I915_MAX_PLANES]; - uint16_t y_minimum[I915_MAX_PLANES]; + uint16_t *minimum = cstate->wm.skl.minimum_blocks; + uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks; unsigned int total_data_rate; + int num_active; + int id, i; + + if (WARN_ON(!state)) + return 0; - skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); + if (!cstate->base.active) { + ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0; + memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); + memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe])); + return 0; + } + + skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active); alloc_size = skl_ddb_entry_size(alloc); if (alloc_size == 0) { memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); - memset(&ddb->plane[pipe][PLANE_CURSOR], 0, - sizeof(ddb->plane[pipe][PLANE_CURSOR])); - return; + return 0; } - cursor_blocks = skl_cursor_allocation(config); + cursor_blocks = skl_cursor_allocation(num_active); ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; alloc_size -= cursor_blocks; - alloc->end -= cursor_blocks; /* 1. Allocate the mininum required blocks for each active plane */ - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - struct drm_plane *plane = &intel_plane->base; - struct drm_framebuffer *fb = plane->state->fb; - int id = skl_wm_plane_id(intel_plane); + for_each_plane_in_state(state, plane, pstate, i) { + intel_plane = to_intel_plane(plane); + id = skl_wm_plane_id(intel_plane); - if (!to_intel_plane_state(plane->state)->visible) + if (intel_plane->pipe != pipe) continue; - if (plane->type == DRM_PLANE_TYPE_CURSOR) + if (!to_intel_plane_state(pstate)->visible) { + minimum[id] = 0; + y_minimum[id] = 0; + continue; + } + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + minimum[id] = 0; + y_minimum[id] = 0; continue; + } - minimum[id] = 8; - alloc_size -= minimum[id]; - y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; - alloc_size -= y_minimum[id]; + minimum[id] = skl_ddb_min_alloc(pstate, 0); + y_minimum[id] = skl_ddb_min_alloc(pstate, 1); + } + + for (i = 0; i < PLANE_CURSOR; i++) { + alloc_size -= minimum[i]; + alloc_size -= y_minimum[i]; } /* @@ -3056,21 +3229,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, * FIXME: we may not allocate every single block here. */ total_data_rate = skl_get_total_relative_data_rate(cstate); + if (total_data_rate == 0) + return 0; start = alloc->start; for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - struct drm_plane *plane = &intel_plane->base; - struct drm_plane_state *pstate = intel_plane->base.state; unsigned int data_rate, y_data_rate; uint16_t plane_blocks, y_plane_blocks = 0; int id = skl_wm_plane_id(intel_plane); - if (!to_intel_plane_state(pstate)->visible) - continue; - if (plane->type == DRM_PLANE_TYPE_CURSOR) - continue; - - data_rate = skl_plane_relative_data_rate(cstate, pstate, 0); + data_rate = cstate->wm.skl.plane_data_rate[id]; /* * allocation for (packed formats) or (uv-plane part of planar format): @@ -3081,30 +3249,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, plane_blocks += div_u64((uint64_t)alloc_size * data_rate, total_data_rate); - ddb->plane[pipe][id].start = start; - ddb->plane[pipe][id].end = start + plane_blocks; + /* Leave disabled planes at (0,0) */ + if (data_rate) { + ddb->plane[pipe][id].start = start; + ddb->plane[pipe][id].end = start + plane_blocks; + } start += plane_blocks; /* * allocation for y_plane part of planar format: */ - if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { - y_data_rate = skl_plane_relative_data_rate(cstate, - pstate, - 1); - y_plane_blocks = y_minimum[id]; - y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, - total_data_rate); + y_data_rate = cstate->wm.skl.plane_y_data_rate[id]; + + y_plane_blocks = y_minimum[id]; + y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, + total_data_rate); + if (y_data_rate) { ddb->y_plane[pipe][id].start = start; ddb->y_plane[pipe][id].end = start + y_plane_blocks; - - start += y_plane_blocks; } + start += y_plane_blocks; } + return 0; } static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) @@ -3161,35 +3331,41 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, return ret; } -static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, - const struct intel_crtc *intel_crtc) +static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, + struct intel_plane_state *pstate) { - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; + uint64_t adjusted_pixel_rate; + uint64_t downscale_amount; + uint64_t pixel_rate; + + /* Shouldn't reach here on disabled planes... */ + if (WARN_ON(!pstate->visible)) + return 0; /* - * If ddb allocation of pipes changed, it may require recalculation of - * watermarks + * Adjusted plane pixel rate is just the pipe's adjusted pixel rate + * with additional adjustments for plane-specific scaling. */ - if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe))) - return true; + adjusted_pixel_rate = skl_pipe_pixel_rate(cstate); + downscale_amount = skl_plane_downscale_amount(pstate); + + pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; + WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0)); - return false; + return pixel_rate; } -static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, - struct intel_crtc_state *cstate, - struct intel_plane *intel_plane, - uint16_t ddb_allocation, - int level, - uint16_t *out_blocks, /* out */ - uint8_t *out_lines /* out */) +static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, + struct intel_crtc_state *cstate, + struct intel_plane_state *intel_pstate, + uint16_t ddb_allocation, + int level, + uint16_t *out_blocks, /* out */ + uint8_t *out_lines, /* out */ + bool *enabled /* out */) { - struct drm_plane *plane = &intel_plane->base; - struct drm_framebuffer *fb = plane->state->fb; - struct intel_plane_state *intel_pstate = - to_intel_plane_state(plane->state); + struct drm_plane_state *pstate = &intel_pstate->base; + struct drm_framebuffer *fb = pstate->fb; uint32_t latency = dev_priv->wm.skl_latency[level]; uint32_t method1, method2; uint32_t plane_bytes_per_line, plane_blocks_per_line; @@ -3197,20 +3373,24 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint32_t selected_result; uint8_t cpp; uint32_t width = 0, height = 0; + uint32_t plane_pixel_rate; - if (latency == 0 || !cstate->base.active || !intel_pstate->visible) - return false; + if (latency == 0 || !cstate->base.active || !intel_pstate->visible) { + *enabled = false; + return 0; + } width = drm_rect_width(&intel_pstate->src) >> 16; height = drm_rect_height(&intel_pstate->src) >> 16; - if (intel_rotation_90_or_270(plane->state->rotation)) + if (intel_rotation_90_or_270(pstate->rotation)) swap(width, height); cpp = drm_format_plane_cpp(fb->pixel_format, 0); - method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), - cpp, latency); - method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), + plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); + + method1 = skl_wm_method1(plane_pixel_rate, cpp, latency); + method2 = skl_wm_method2(plane_pixel_rate, cstate->base.adjusted_mode.crtc_htotal, width, cpp, @@ -3224,7 +3404,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { uint32_t min_scanlines = 4; uint32_t y_tile_minimum; - if (intel_rotation_90_or_270(plane->state->rotation)) { + if (intel_rotation_90_or_270(pstate->rotation)) { int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? drm_format_plane_cpp(fb->pixel_format, 1) : drm_format_plane_cpp(fb->pixel_format, 0); @@ -3260,40 +3440,99 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, res_blocks++; } - if (res_blocks >= ddb_allocation || res_lines > 31) - return false; + if (res_blocks >= ddb_allocation || res_lines > 31) { + *enabled = false; + + /* + * If there are no valid level 0 watermarks, then we can't + * support this display configuration. + */ + if (level) { + return 0; + } else { + DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); + DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n", + to_intel_crtc(cstate->base.crtc)->pipe, + skl_wm_plane_id(to_intel_plane(pstate->plane)), + res_blocks, ddb_allocation, res_lines); + + return -EINVAL; + } + } *out_blocks = res_blocks; *out_lines = res_lines; + *enabled = true; - return true; + return 0; } -static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, - struct skl_ddb_allocation *ddb, - struct intel_crtc_state *cstate, - int level, - struct skl_wm_level *result) +static int +skl_compute_wm_level(const struct drm_i915_private *dev_priv, + struct skl_ddb_allocation *ddb, + struct intel_crtc_state *cstate, + int level, + struct skl_wm_level *result) { struct drm_device *dev = dev_priv->dev; + struct drm_atomic_state *state = cstate->base.state; struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_plane *plane; struct intel_plane *intel_plane; + struct intel_plane_state *intel_pstate; uint16_t ddb_blocks; enum pipe pipe = intel_crtc->pipe; + int ret; - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { + /* + * We'll only calculate watermarks for planes that are actually + * enabled, so make sure all other planes are set as disabled. + */ + memset(result, 0, sizeof(*result)); + + for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) { int i = skl_wm_plane_id(intel_plane); + plane = &intel_plane->base; + intel_pstate = NULL; + if (state) + intel_pstate = + intel_atomic_get_existing_plane_state(state, + intel_plane); + + /* + * Note: If we start supporting multiple pending atomic commits + * against the same planes/CRTC's in the future, plane->state + * will no longer be the correct pre-state to use for the + * calculations here and we'll need to change where we get the + * 'unchanged' plane data from. + * + * For now this is fine because we only allow one queued commit + * against a CRTC. Even if the plane isn't modified by this + * transaction and we don't have a plane lock, we still have + * the CRTC's lock, so we know that no other transactions are + * racing with us to update it. + */ + if (!intel_pstate) + intel_pstate = to_intel_plane_state(plane->state); + + WARN_ON(!intel_pstate->base.fb); + ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); - result->plane_en[i] = skl_compute_plane_wm(dev_priv, - cstate, - intel_plane, - ddb_blocks, - level, - &result->plane_res_b[i], - &result->plane_res_l[i]); + ret = skl_compute_plane_wm(dev_priv, + cstate, + intel_pstate, + ddb_blocks, + level, + &result->plane_res_b[i], + &result->plane_res_l[i], + &result->plane_en[i]); + if (ret) + return ret; } + + return 0; } static uint32_t @@ -3327,21 +3566,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate, } } -static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, - struct skl_ddb_allocation *ddb, - struct skl_pipe_wm *pipe_wm) +static int skl_build_pipe_wm(struct intel_crtc_state *cstate, + struct skl_ddb_allocation *ddb, + struct skl_pipe_wm *pipe_wm) { struct drm_device *dev = cstate->base.crtc->dev; const struct drm_i915_private *dev_priv = dev->dev_private; int level, max_level = ilk_wm_max_level(dev); + int ret; for (level = 0; level <= max_level; level++) { - skl_compute_wm_level(dev_priv, ddb, cstate, - level, &pipe_wm->wm[level]); + ret = skl_compute_wm_level(dev_priv, ddb, cstate, + level, &pipe_wm->wm[level]); + if (ret) + return ret; } pipe_wm->linetime = skl_compute_linetime_wm(cstate); skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); + + return 0; } static void skl_compute_wm_results(struct drm_device *dev, @@ -3421,7 +3665,9 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv, int i, level, max_level = ilk_wm_max_level(dev); enum pipe pipe = crtc->pipe; - if (!new->dirty[pipe]) + if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0) + continue; + if (!crtc->active) continue; I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); @@ -3588,87 +3834,144 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv, } } -static bool skl_update_pipe_wm(struct drm_crtc *crtc, - struct skl_ddb_allocation *ddb, /* out */ - struct skl_pipe_wm *pipe_wm /* out */) +static int skl_update_pipe_wm(struct drm_crtc_state *cstate, + struct skl_ddb_allocation *ddb, /* out */ + struct skl_pipe_wm *pipe_wm, /* out */ + bool *changed /* out */) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc); + struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); + int ret; - skl_allocate_pipe_ddb(cstate, ddb); - skl_compute_pipe_wm(cstate, ddb, pipe_wm); + ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); + if (ret) + return ret; if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) - return false; - - intel_crtc->wm.active.skl = *pipe_wm; + *changed = false; + else + *changed = true; - return true; + return 0; } -static void skl_update_other_pipe_wm(struct drm_device *dev, - struct drm_crtc *crtc, - struct skl_wm_values *r) +static int +skl_compute_ddb(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct intel_crtc *intel_crtc; - struct intel_crtc *this_crtc = to_intel_crtc(crtc); + struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; + unsigned realloc_pipes = dev_priv->active_crtcs; + int ret; /* - * If the WM update hasn't changed the allocation for this_crtc (the - * crtc we are currently computing the new WM values for), other - * enabled crtcs will keep the same allocation and we don't need to - * recompute anything for them. + * If this is our first atomic update following hardware readout, + * we can't trust the DDB that the BIOS programmed for us. Let's + * pretend that all pipes switched active status so that we'll + * ensure a full DDB recompute. */ - if (!skl_ddb_allocation_changed(&r->ddb, this_crtc)) - return; + if (dev_priv->wm.distrust_bios_wm) + intel_state->active_pipe_changes = ~0; /* - * Otherwise, because of this_crtc being freshly enabled/disabled, the - * other active pipes need new DDB allocation and WM values. + * If the modeset changes which CRTC's are active, we need to + * recompute the DDB allocation for *all* active pipes, even + * those that weren't otherwise being modified in any way by this + * atomic commit. Due to the shrinking of the per-pipe allocations + * when new active CRTC's are added, it's possible for a pipe that + * we were already using and aren't changing at all here to suddenly + * become invalid if its DDB needs exceeds its new allocation. + * + * Note that if we wind up doing a full DDB recompute, we can't let + * any other display updates race with this transaction, so we need + * to grab the lock on *all* CRTC's. */ - for_each_intel_crtc(dev, intel_crtc) { - struct skl_pipe_wm pipe_wm = {}; - bool wm_changed; - - if (this_crtc->pipe == intel_crtc->pipe) - continue; - - if (!intel_crtc->active) - continue; + if (intel_state->active_pipe_changes) { + realloc_pipes = ~0; + intel_state->wm_results.dirty_pipes = ~0; + } - wm_changed = skl_update_pipe_wm(&intel_crtc->base, - &r->ddb, &pipe_wm); + for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { + struct intel_crtc_state *cstate; - /* - * If we end up re-computing the other pipe WM values, it's - * because it was really needed, so we expect the WM values to - * be different. - */ - WARN_ON(!wm_changed); + cstate = intel_atomic_get_crtc_state(state, intel_crtc); + if (IS_ERR(cstate)) + return PTR_ERR(cstate); - skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc); - r->dirty[intel_crtc->pipe] = true; + ret = skl_allocate_pipe_ddb(cstate, ddb); + if (ret) + return ret; } + + return 0; } -static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe) +static int +skl_compute_wm(struct drm_atomic_state *state) { - watermarks->wm_linetime[pipe] = 0; - memset(watermarks->plane[pipe], 0, - sizeof(uint32_t) * 8 * I915_MAX_PLANES); - memset(watermarks->plane_trans[pipe], - 0, sizeof(uint32_t) * I915_MAX_PLANES); - watermarks->plane_trans[pipe][PLANE_CURSOR] = 0; + struct drm_crtc *crtc; + struct drm_crtc_state *cstate; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct skl_wm_values *results = &intel_state->wm_results; + struct skl_pipe_wm *pipe_wm; + bool changed = false; + int ret, i; - /* Clear ddb entries for pipe */ - memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry)); - memset(&watermarks->ddb.plane[pipe], 0, - sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); - memset(&watermarks->ddb.y_plane[pipe], 0, - sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); - memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0, - sizeof(struct skl_ddb_entry)); + /* + * If this transaction isn't actually touching any CRTC's, don't + * bother with watermark calculation. Note that if we pass this + * test, we're guaranteed to hold at least one CRTC state mutex, + * which means we can safely use values like dev_priv->active_crtcs + * since any racing commits that want to update them would need to + * hold _all_ CRTC state mutexes. + */ + for_each_crtc_in_state(state, crtc, cstate, i) + changed = true; + if (!changed) + return 0; + + /* Clear all dirty flags */ + results->dirty_pipes = 0; + + ret = skl_compute_ddb(state); + if (ret) + return ret; + + /* + * Calculate WM's for all pipes that are part of this transaction. + * Note that the DDB allocation above may have added more CRTC's that + * weren't otherwise being modified (and set bits in dirty_pipes) if + * pipe allocations had to change. + * + * FIXME: Now that we're doing this in the atomic check phase, we + * should allow skl_update_pipe_wm() to return failure in cases where + * no suitable watermark values can be found. + */ + for_each_crtc_in_state(state, crtc, cstate, i) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *intel_cstate = + to_intel_crtc_state(cstate); + + pipe_wm = &intel_cstate->wm.skl.optimal; + ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm, + &changed); + if (ret) + return ret; + + if (changed) + results->dirty_pipes |= drm_crtc_mask(crtc); + + if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) + /* This pipe's WM's did not change */ + continue; + + intel_cstate->update_wm_pre = true; + skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc); + } + return 0; } static void skl_update_wm(struct drm_crtc *crtc) @@ -3678,26 +3981,22 @@ static void skl_update_wm(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct skl_wm_values *results = &dev_priv->wm.skl_results; struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl; - + struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; - /* Clear all dirty flags */ - memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES); - - skl_clear_wm(results, intel_crtc->pipe); - - if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm)) + if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) return; - skl_compute_wm_results(dev, pipe_wm, results, intel_crtc); - results->dirty[intel_crtc->pipe] = true; + intel_crtc->wm.active.skl = *pipe_wm; + + mutex_lock(&dev_priv->wm.wm_mutex); - skl_update_other_pipe_wm(dev, crtc, results); skl_write_wm_values(dev_priv, results); skl_flush_wm_values(dev_priv, results); /* store the new configuration */ dev_priv->wm.skl_hw = *results; + + mutex_unlock(&dev_priv->wm.wm_mutex); } static void ilk_compute_wm_config(struct drm_device *dev, @@ -3757,7 +4056,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate) struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); mutex_lock(&dev_priv->wm.wm_mutex); - intel_crtc->wm.active.ilk = cstate->wm.intermediate; + intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate; ilk_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -3769,7 +4068,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate) mutex_lock(&dev_priv->wm.wm_mutex); if (cstate->wm.need_postvbl_update) { - intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; + intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal; ilk_program_watermarks(dev_priv); } mutex_unlock(&dev_priv->wm.wm_mutex); @@ -3826,7 +4125,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) struct skl_wm_values *hw = &dev_priv->wm.skl_hw; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - struct skl_pipe_wm *active = &cstate->wm.optimal.skl; + struct skl_pipe_wm *active = &cstate->wm.skl.optimal; enum pipe pipe = intel_crtc->pipe; int level, i, max_level; uint32_t temp; @@ -3849,7 +4148,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) if (!intel_crtc->active) return; - hw->dirty[pipe] = true; + hw->dirty_pipes |= drm_crtc_mask(crtc); active->linetime = hw->wm_linetime[pipe]; @@ -3883,6 +4182,14 @@ void skl_wm_get_hw_state(struct drm_device *dev) skl_ddb_get_hw_state(dev_priv, ddb); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) skl_pipe_wm_get_hw_state(crtc); + + if (dev_priv->active_crtcs) { + /* Fully recompute DDB on first atomic commit */ + dev_priv->wm.distrust_bios_wm = true; + } else { + /* Easy/common case; just sanitize DDB now if everything off */ + memset(ddb, 0, sizeof(*ddb)); + } } static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) @@ -3892,7 +4199,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) struct ilk_wm_values *hw = &dev_priv->wm.hw; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; + struct intel_pipe_wm *active = &cstate->wm.ilk.optimal; enum pipe pipe = intel_crtc->pipe; static const i915_reg_t wm0_pipe_reg[] = { [PIPE_A] = WM0_PIPEA_ILK, @@ -4169,9 +4476,8 @@ DEFINE_SPINLOCK(mchdev_lock); * mchdev_lock. */ static struct drm_i915_private *i915_mch_dev; -bool ironlake_set_drps(struct drm_device *dev, u8 val) +bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl; assert_spin_locked(&mchdev_lock); @@ -4193,9 +4499,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val) return true; } -static void ironlake_enable_drps(struct drm_device *dev) +static void ironlake_enable_drps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 rgvmodectl; u8 fmax, fmin, fstart, vstart; @@ -4252,7 +4557,7 @@ static void ironlake_enable_drps(struct drm_device *dev) DRM_ERROR("stuck trying to change perf mode\n"); mdelay(1); - ironlake_set_drps(dev, fstart); + ironlake_set_drps(dev_priv, fstart); dev_priv->ips.last_count1 = I915_READ(DMIEC) + I915_READ(DDREC) + I915_READ(CSIEC); @@ -4263,9 +4568,8 @@ static void ironlake_enable_drps(struct drm_device *dev) spin_unlock_irq(&mchdev_lock); } -static void ironlake_disable_drps(struct drm_device *dev) +static void ironlake_disable_drps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u16 rgvswctl; spin_lock_irq(&mchdev_lock); @@ -4280,7 +4584,7 @@ static void ironlake_disable_drps(struct drm_device *dev) I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); /* Go back to the starting frequency */ - ironlake_set_drps(dev, dev_priv->ips.fstart); + ironlake_set_drps(dev_priv, dev_priv->ips.fstart); mdelay(1); rgvswctl |= MEMCTL_CMD_STS; I915_WRITE(MEMSWCTL, rgvswctl); @@ -4424,12 +4728,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) /* gen6_set_rps is called to update the frequency request, but should also be * called when the range (min_delay and max_delay) is modified so that we can * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ -static void gen6_set_rps(struct drm_device *dev, u8 val) +static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) return; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -4442,10 +4744,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) if (val != dev_priv->rps.cur_freq) { gen6_set_rps_thresholds(dev_priv, val); - if (IS_GEN9(dev)) + if (IS_GEN9(dev_priv)) I915_WRITE(GEN6_RPNSWREQ, GEN9_FREQUENCY(val)); - else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(val)); else @@ -4467,15 +4769,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); } -static void valleyview_set_rps(struct drm_device *dev, u8 val) +static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val) { - struct drm_i915_private *dev_priv = dev->dev_private; - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(val > dev_priv->rps.max_freq); WARN_ON(val < dev_priv->rps.min_freq); - if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), + if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1), "Odd GPU freq value\n")) val &= ~1; @@ -4508,7 +4808,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) /* Wake up the media well, as that takes a lot less * power than the Render well. */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); - valleyview_set_rps(dev_priv->dev, val); + valleyview_set_rps(dev_priv, val); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); } @@ -4526,14 +4826,12 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) void gen6_rps_idle(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; - mutex_lock(&dev_priv->rps.hw_lock); if (dev_priv->rps.enabled) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_set_rps_idle(dev_priv); else - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); dev_priv->rps.last_adj = 0; I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); } @@ -4581,49 +4879,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv, spin_unlock(&dev_priv->rps.client_lock); } -void intel_set_rps(struct drm_device *dev, u8 val) +void intel_set_rps(struct drm_i915_private *dev_priv, u8 val) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - valleyview_set_rps(dev, val); + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + valleyview_set_rps(dev_priv, val); else - gen6_set_rps(dev, val); + gen6_set_rps(dev_priv, val); } -static void gen9_disable_rc6(struct drm_device *dev) +static void gen9_disable_rc6(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN9_PG_ENABLE, 0); } -static void gen9_disable_rps(struct drm_device *dev) +static void gen9_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RP_CONTROL, 0); } -static void gen6_disable_rps(struct drm_device *dev) +static void gen6_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_RP_CONTROL, 0); } -static void cherryview_disable_rps(struct drm_device *dev) +static void cherryview_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); } -static void valleyview_disable_rps(struct drm_device *dev) +static void valleyview_disable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* we're doing forcewake before Disabling RC6, * This what the BIOS expects when going into suspend */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -4633,15 +4921,15 @@ static void valleyview_disable_rps(struct drm_device *dev) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void intel_print_rc6_info(struct drm_device *dev, u32 mode) +static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode) { - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) mode = GEN6_RC_CTL_RC6_ENABLE; else mode = 0; } - if (HAS_RC6p(dev)) + if (HAS_RC6p(dev_priv)) DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", onoff(mode & GEN6_RC_CTL_RC6_ENABLE), onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), @@ -4652,9 +4940,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); } -static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) +static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; bool enable_rc6 = true; unsigned long rc6_ctx_base; @@ -4695,16 +4982,16 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) return enable_rc6; } -int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) +int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6) { /* No RC6 before Ironlake and code is gone for ilk. */ - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return 0; if (!enable_rc6) return 0; - if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) { + if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) { DRM_INFO("RC6 disabled by BIOS\n"); return 0; } @@ -4713,7 +5000,7 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) if (enable_rc6 >= 0) { int mask; - if (HAS_RC6p(dev)) + if (HAS_RC6p(dev_priv)) mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | INTEL_RC6pp_ENABLE; else @@ -4726,20 +5013,14 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) return enable_rc6 & mask; } - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev_priv)) return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); return INTEL_RC6_ENABLE; } -int intel_enable_rc6(const struct drm_device *dev) -{ - return i915.enable_rc6; -} - -static void gen6_init_rps_frequencies(struct drm_device *dev) +static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t rp_state_cap; u32 ddcc_status = 0; int ret; @@ -4747,7 +5028,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) /* All of these values are in units of 50MHz */ dev_priv->rps.cur_freq = 0; /* static values from HW: RP0 > RP1 > RPn (min_freq) */ - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { rp_state_cap = I915_READ(BXT_RP_STATE_CAP); dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; @@ -4763,8 +5044,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; - if (IS_HASWELL(dev) || IS_BROADWELL(dev) || - IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || + IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { ret = sandybridge_pcode_read(dev_priv, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, &ddcc_status); @@ -4776,7 +5057,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq); } - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* Store the frequency values in 16.66 MHZ units, which is the natural hardware unit for SKL */ dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; @@ -4793,7 +5074,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; if (dev_priv->rps.min_freq_softlimit == 0) { - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) dev_priv->rps.min_freq_softlimit = max_t(int, dev_priv->rps.efficient_freq, intel_freq_opcode(dev_priv, 450)); @@ -4804,16 +5085,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) } /* See the Gen9_GT_PM_Programming_Guide doc for the below */ -static void gen9_enable_rps(struct drm_device *dev) +static void gen9_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { /* * BIOS could leave the Hw Turbo enabled, so need to explicitly * clear out the Control register just to avoid inconsitency @@ -4823,7 +5102,7 @@ static void gen9_enable_rps(struct drm_device *dev) * if the Turbo is left enabled in the Control register, as the * Up/Down interrupts would remain masked. */ - gen9_disable_rps(dev); + gen9_disable_rps(dev_priv); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return; } @@ -4842,14 +5121,13 @@ static void gen9_enable_rps(struct drm_device *dev) * Up/Down EI & threshold registers, as well as the RP_CONTROL, * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void gen9_enable_rc6(struct drm_device *dev) +static void gen9_enable_rc6(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t rc6_mask = 0; @@ -4866,7 +5144,7 @@ static void gen9_enable_rc6(struct drm_device *dev) /* 2b: Program RC6 thresholds.*/ /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev_priv)) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); else I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); @@ -4875,7 +5153,7 @@ static void gen9_enable_rc6(struct drm_device *dev) for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - if (HAS_GUC_UCODE(dev)) + if (HAS_GUC(dev_priv)) I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -4885,12 +5163,12 @@ static void gen9_enable_rc6(struct drm_device *dev) I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); /* 3a: Enable RC6 */ - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); /* WaRsUseTimeoutMode */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | GEN7_RC_CTL_TO_MODE | @@ -4906,19 +5184,17 @@ static void gen9_enable_rc6(struct drm_device *dev) * 3b: Enable Coarse Power Gating only when RC6 is enabled. * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. */ - if (NEEDS_WaRsDisableCoarsePowerGating(dev)) + if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) I915_WRITE(GEN9_PG_ENABLE, 0); else I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - } -static void gen8_enable_rps(struct drm_device *dev) +static void gen8_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t rc6_mask = 0; @@ -4933,7 +5209,7 @@ static void gen8_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_CONTROL, 0); /* Initialize rps frequencies */ - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* 2b: Program RC6 thresholds.*/ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); @@ -4942,16 +5218,16 @@ static void gen8_enable_rps(struct drm_device *dev) for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); - if (IS_BROADWELL(dev)) + if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ /* 3: Enable RC6 */ - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mask = GEN6_RC_CTL_RC6_ENABLE; - intel_print_rc6_info(dev, rc6_mask); - if (IS_BROADWELL(dev)) + intel_print_rc6_info(dev_priv, rc6_mask); + if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | GEN7_RC_CTL_TO_MODE | rc6_mask); @@ -4992,14 +5268,13 @@ static void gen8_enable_rps(struct drm_device *dev) /* 6: Ring frequency + overclocking (our driver does this later */ dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void gen6_enable_rps(struct drm_device *dev) +static void gen6_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; u32 gtfifodbg; @@ -5026,7 +5301,7 @@ static void gen6_enable_rps(struct drm_device *dev) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); /* Initialize rps frequencies */ - gen6_init_rps_frequencies(dev); + gen6_init_rps_frequencies(dev_priv); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -5042,7 +5317,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev_priv)) I915_WRITE(GEN6_RC6_THRESHOLD, 125000); else I915_WRITE(GEN6_RC6_THRESHOLD, 50000); @@ -5050,12 +5325,12 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ /* Check if we are enabling RC6 */ - rc6_mode = intel_enable_rc6(dev_priv->dev); + rc6_mode = intel_enable_rc6(); if (rc6_mode & INTEL_RC6_ENABLE) rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; /* We don't use those on Haswell */ - if (!IS_HASWELL(dev)) { + if (!IS_HASWELL(dev_priv)) { if (rc6_mode & INTEL_RC6p_ENABLE) rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; @@ -5063,7 +5338,7 @@ static void gen6_enable_rps(struct drm_device *dev) rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; } - intel_print_rc6_info(dev, rc6_mask); + intel_print_rc6_info(dev_priv, rc6_mask); I915_WRITE(GEN6_RC_CONTROL, rc6_mask | @@ -5087,13 +5362,13 @@ static void gen6_enable_rps(struct drm_device *dev) } dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); rc6vids = 0; ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); - if (IS_GEN6(dev) && ret) { + if (IS_GEN6(dev_priv) && ret) { DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); - } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { + } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); rc6vids &= 0xffff00; @@ -5106,9 +5381,8 @@ static void gen6_enable_rps(struct drm_device *dev) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void __gen6_update_ring_freq(struct drm_device *dev) +static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; int min_freq = 15; unsigned int gpu_freq; unsigned int max_ia_freq, min_ring_freq; @@ -5137,7 +5411,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev) /* convert DDR frequency from units of 266.6MHz to bandwidth */ min_ring_freq = mult_frac(min_ring_freq, 8, 3); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* Convert GT frequency to 50 HZ units */ min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; @@ -5155,16 +5429,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev) int diff = max_gpu_freq - gpu_freq; unsigned int ia_freq = 0, ring_freq = 0; - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { /* * ring_freq = 2 * GT. ring_freq is in 100MHz units * No floor required for ring frequency on SKL. */ ring_freq = gpu_freq; - } else if (INTEL_INFO(dev)->gen >= 8) { + } else if (INTEL_INFO(dev_priv)->gen >= 8) { /* max(2 * GT, DDR). NB: GT is 50MHz units */ ring_freq = max(min_ring_freq, gpu_freq); - } else if (IS_HASWELL(dev)) { + } else if (IS_HASWELL(dev_priv)) { ring_freq = mult_frac(gpu_freq, 5, 4); ring_freq = max(min_ring_freq, ring_freq); /* leave ia_freq as the default, chosen by cpufreq */ @@ -5191,26 +5465,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev) } } -void gen6_update_ring_freq(struct drm_device *dev) +void gen6_update_ring_freq(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!HAS_CORE_RING_FREQ(dev)) + if (!HAS_CORE_RING_FREQ(dev_priv)) return; mutex_lock(&dev_priv->rps.hw_lock); - __gen6_update_ring_freq(dev); + __gen6_update_ring_freq(dev_priv); mutex_unlock(&dev_priv->rps.hw_lock); } static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; u32 val, rp0; val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); - switch (INTEL_INFO(dev)->eu_total) { + switch (INTEL_INFO(dev_priv)->eu_total) { case 8: /* (2 * 4) config */ rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); @@ -5321,9 +5592,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv) WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); } -static void cherryview_setup_pctx(struct drm_device *dev) +static void cherryview_setup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long pctx_paddr, paddr; u32 pcbr; @@ -5342,15 +5612,14 @@ static void cherryview_setup_pctx(struct drm_device *dev) DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); } -static void valleyview_setup_pctx(struct drm_device *dev) +static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *pctx; unsigned long pctx_paddr; u32 pcbr; int pctx_size = 24*1024; - mutex_lock(&dev->struct_mutex); + mutex_lock(&dev_priv->dev->struct_mutex); pcbr = I915_READ(VLV_PCBR); if (pcbr) { @@ -5375,7 +5644,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) * overlap with other ranges, such as the frame buffer, protected * memory, or any other relevant ranges. */ - pctx = i915_gem_object_create_stolen(dev, pctx_size); + pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size); if (!pctx) { DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); goto out; @@ -5387,13 +5656,11 @@ static void valleyview_setup_pctx(struct drm_device *dev) out: DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); dev_priv->vlv_pctx = pctx; - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev_priv->dev->struct_mutex); } -static void valleyview_cleanup_pctx(struct drm_device *dev) +static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (WARN_ON(!dev_priv->vlv_pctx)) return; @@ -5412,12 +5679,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) dev_priv->rps.gpll_ref_freq); } -static void valleyview_init_gt_powersave(struct drm_device *dev) +static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 val; - valleyview_setup_pctx(dev); + valleyview_setup_pctx(dev_priv); vlv_init_gpll_ref_freq(dev_priv); @@ -5471,12 +5737,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } -static void cherryview_init_gt_powersave(struct drm_device *dev) +static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 val; - cherryview_setup_pctx(dev); + cherryview_setup_pctx(dev_priv); vlv_init_gpll_ref_freq(dev_priv); @@ -5536,14 +5801,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev) mutex_unlock(&dev_priv->rps.hw_lock); } -static void valleyview_cleanup_gt_powersave(struct drm_device *dev) +static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - valleyview_cleanup_pctx(dev); + valleyview_cleanup_pctx(dev_priv); } -static void cherryview_enable_rps(struct drm_device *dev) +static void cherryview_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0, pcbr; @@ -5588,8 +5852,8 @@ static void cherryview_enable_rps(struct drm_device *dev) pcbr = I915_READ(VLV_PCBR); /* 3: Enable RC6 */ - if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && - (pcbr >> VLV_PCBR_ADDR_SHIFT)) + if ((intel_enable_rc6() & INTEL_RC6_ENABLE) && + (pcbr >> VLV_PCBR_ADDR_SHIFT)) rc6_mode = GEN7_RC_CTL_TO_MODE; I915_WRITE(GEN6_RC_CONTROL, rc6_mode); @@ -5634,14 +5898,13 @@ static void cherryview_enable_rps(struct drm_device *dev) intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), dev_priv->rps.idle_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -static void valleyview_enable_rps(struct drm_device *dev) +static void valleyview_enable_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0; @@ -5694,10 +5957,10 @@ static void valleyview_enable_rps(struct drm_device *dev) VLV_MEDIA_RC6_COUNT_EN | VLV_RENDER_RC6_COUNT_EN)); - if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) + if (intel_enable_rc6() & INTEL_RC6_ENABLE) rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; - intel_print_rc6_info(dev, rc6_mode); + intel_print_rc6_info(dev_priv, rc6_mode); I915_WRITE(GEN6_RC_CONTROL, rc6_mode); @@ -5724,7 +5987,7 @@ static void valleyview_enable_rps(struct drm_device *dev) intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), dev_priv->rps.idle_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); + valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -5814,10 +6077,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; unsigned long val; - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return 0; spin_lock_irq(&mchdev_lock); @@ -5857,11 +6119,10 @@ static int _pxvid_to_vd(u8 pxvid) static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) { - struct drm_device *dev = dev_priv->dev; const int vd = _pxvid_to_vd(pxvid); const int vm = vd - 1125; - if (INTEL_INFO(dev)->is_mobile) + if (INTEL_INFO(dev_priv)->is_mobile) return vm > 0 ? vm : 0; return vd; @@ -5902,9 +6163,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) void i915_update_gfx_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; - - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return; spin_lock_irq(&mchdev_lock); @@ -5953,10 +6212,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; unsigned long val; - if (INTEL_INFO(dev)->gen != 5) + if (INTEL_INFO(dev_priv)->gen != 5) return 0; spin_lock_irq(&mchdev_lock); @@ -6097,7 +6355,7 @@ bool i915_gpu_turbo_disable(void) dev_priv->ips.max_delay = dev_priv->ips.fstart; - if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) + if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart)) ret = false; out_unlock: @@ -6145,9 +6403,8 @@ void intel_gpu_ips_teardown(void) spin_unlock_irq(&mchdev_lock); } -static void intel_init_emon(struct drm_device *dev) +static void intel_init_emon(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; u32 lcfuse; u8 pxw[16]; int i; @@ -6216,10 +6473,8 @@ static void intel_init_emon(struct drm_device *dev) dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); } -void intel_init_gt_powersave(struct drm_device *dev) +void intel_init_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* * RPM depends on RC6 to save restore the GT HW context, so make RC6 a * requirement. @@ -6229,74 +6484,66 @@ void intel_init_gt_powersave(struct drm_device *dev) intel_runtime_pm_get(dev_priv); } - if (IS_CHERRYVIEW(dev)) - cherryview_init_gt_powersave(dev); - else if (IS_VALLEYVIEW(dev)) - valleyview_init_gt_powersave(dev); + if (IS_CHERRYVIEW(dev_priv)) + cherryview_init_gt_powersave(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_init_gt_powersave(dev_priv); } -void intel_cleanup_gt_powersave(struct drm_device *dev) +void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) return; - else if (IS_VALLEYVIEW(dev)) - valleyview_cleanup_gt_powersave(dev); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_cleanup_gt_powersave(dev_priv); if (!i915.enable_rc6) intel_runtime_pm_put(dev_priv); } -static void gen6_suspend_rps(struct drm_device *dev) +static void gen6_suspend_rps(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - flush_delayed_work(&dev_priv->rps.delayed_resume_work); - gen6_disable_rps_interrupts(dev); + gen6_disable_rps_interrupts(dev_priv); } /** * intel_suspend_gt_powersave - suspend PM work and helper threads - * @dev: drm device + * @dev_priv: i915 device * * We don't want to disable RC6 or other features here, we just want * to make sure any work we've queued has finished and won't bother * us while we're suspended. */ -void intel_suspend_gt_powersave(struct drm_device *dev) +void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_GEN(dev_priv) < 6) return; - gen6_suspend_rps(dev); + gen6_suspend_rps(dev_priv); /* Force GPU to min freq during suspend */ gen6_rps_idle(dev_priv); } -void intel_disable_gt_powersave(struct drm_device *dev) +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_IRONLAKE_M(dev)) { - ironlake_disable_drps(dev); - } else if (INTEL_INFO(dev)->gen >= 6) { - intel_suspend_gt_powersave(dev); + if (IS_IRONLAKE_M(dev_priv)) { + ironlake_disable_drps(dev_priv); + } else if (INTEL_INFO(dev_priv)->gen >= 6) { + intel_suspend_gt_powersave(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); - if (INTEL_INFO(dev)->gen >= 9) { - gen9_disable_rc6(dev); - gen9_disable_rps(dev); - } else if (IS_CHERRYVIEW(dev)) - cherryview_disable_rps(dev); - else if (IS_VALLEYVIEW(dev)) - valleyview_disable_rps(dev); + if (INTEL_INFO(dev_priv)->gen >= 9) { + gen9_disable_rc6(dev_priv); + gen9_disable_rps(dev_priv); + } else if (IS_CHERRYVIEW(dev_priv)) + cherryview_disable_rps(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_disable_rps(dev_priv); else - gen6_disable_rps(dev); + gen6_disable_rps(dev_priv); dev_priv->rps.enabled = false; mutex_unlock(&dev_priv->rps.hw_lock); @@ -6308,27 +6555,26 @@ static void intel_gen6_powersave_work(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, rps.delayed_resume_work.work); - struct drm_device *dev = dev_priv->dev; mutex_lock(&dev_priv->rps.hw_lock); - gen6_reset_rps_interrupts(dev); + gen6_reset_rps_interrupts(dev_priv); - if (IS_CHERRYVIEW(dev)) { - cherryview_enable_rps(dev); - } else if (IS_VALLEYVIEW(dev)) { - valleyview_enable_rps(dev); - } else if (INTEL_INFO(dev)->gen >= 9) { - gen9_enable_rc6(dev); - gen9_enable_rps(dev); - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) - __gen6_update_ring_freq(dev); - } else if (IS_BROADWELL(dev)) { - gen8_enable_rps(dev); - __gen6_update_ring_freq(dev); + if (IS_CHERRYVIEW(dev_priv)) { + cherryview_enable_rps(dev_priv); + } else if (IS_VALLEYVIEW(dev_priv)) { + valleyview_enable_rps(dev_priv); + } else if (INTEL_INFO(dev_priv)->gen >= 9) { + gen9_enable_rc6(dev_priv); + gen9_enable_rps(dev_priv); + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + __gen6_update_ring_freq(dev_priv); + } else if (IS_BROADWELL(dev_priv)) { + gen8_enable_rps(dev_priv); + __gen6_update_ring_freq(dev_priv); } else { - gen6_enable_rps(dev); - __gen6_update_ring_freq(dev); + gen6_enable_rps(dev_priv); + __gen6_update_ring_freq(dev_priv); } WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); @@ -6339,27 +6585,25 @@ static void intel_gen6_powersave_work(struct work_struct *work) dev_priv->rps.enabled = true; - gen6_enable_rps_interrupts(dev); + gen6_enable_rps_interrupts(dev_priv); mutex_unlock(&dev_priv->rps.hw_lock); intel_runtime_pm_put(dev_priv); } -void intel_enable_gt_powersave(struct drm_device *dev) +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(dev)) + if (intel_vgpu_active(dev_priv)) return; - if (IS_IRONLAKE_M(dev)) { - ironlake_enable_drps(dev); - mutex_lock(&dev->struct_mutex); - intel_init_emon(dev); - mutex_unlock(&dev->struct_mutex); - } else if (INTEL_INFO(dev)->gen >= 6) { + if (IS_IRONLAKE_M(dev_priv)) { + ironlake_enable_drps(dev_priv); + mutex_lock(&dev_priv->dev->struct_mutex); + intel_init_emon(dev_priv); + mutex_unlock(&dev_priv->dev->struct_mutex); + } else if (INTEL_INFO(dev_priv)->gen >= 6) { /* * PCU communication is slow and this doesn't need to be * done at any specific time, so do this out of our fast path @@ -6378,14 +6622,12 @@ void intel_enable_gt_powersave(struct drm_device *dev) } } -void intel_reset_gt_powersave(struct drm_device *dev) +void intel_reset_gt_powersave(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen < 6) + if (INTEL_INFO(dev_priv)->gen < 6) return; - gen6_suspend_rps(dev); + gen6_suspend_rps(dev_priv); dev_priv->rps.enabled = false; } @@ -6698,11 +6940,42 @@ static void lpt_suspend_hw(struct drm_device *dev) } } +static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, + int general_prio_credits, + int high_prio_credits) +{ + u32 misccpctl; + + /* WaTempDisableDOPClkGating:bdw */ + misccpctl = I915_READ(GEN7_MISCCPCTL); + I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + + I915_WRITE(GEN8_L3SQCREG1, + L3_GENERAL_PRIO_CREDITS(general_prio_credits) | + L3_HIGH_PRIO_CREDITS(high_prio_credits)); + + /* + * Wait at least 100 clocks before re-enabling clock gating. + * See the definition of L3SQCREG1 in BSpec. + */ + POSTING_READ(GEN8_L3SQCREG1); + udelay(1); + I915_WRITE(GEN7_MISCCPCTL, misccpctl); +} + +static void skylake_init_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,kbl */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); +} + static void broadwell_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe; - uint32_t misccpctl; ilk_init_lp_watermarks(dev); @@ -6733,20 +7006,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); - /* - * WaProgramL3SqcReg1Default:bdw - * WaTempDisableDOPClkGating:bdw - */ - misccpctl = I915_READ(GEN7_MISCCPCTL); - I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); - I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT); - /* - * Wait at least 100 clocks before re-enabling clock gating. See - * the definition of L3SQCREG1 in BSpec. - */ - POSTING_READ(GEN8_L3SQCREG1); - udelay(1); - I915_WRITE(GEN7_MISCCPCTL, misccpctl); + /* WaProgramL3SqcReg1Default:bdw */ + gen8_set_l3sqc_credits(dev_priv, 30, 2); /* * WaGttCachingOffByDefault:bdw @@ -7017,6 +7278,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev) GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* + * WaProgramL3SqcReg1Default:chv + * See gfxspecs/Related Documents/Performance Guide/ + * LSQC Setting Recommendations. + */ + gen8_set_l3sqc_credits(dev_priv, 38, 2); + + /* * GTT cache may not work with big pages, so if those * are ever enabled GTT cache may need to be disabled. */ @@ -7163,9 +7431,9 @@ static void nop_init_clock_gating(struct drm_device *dev) void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { if (IS_SKYLAKE(dev_priv)) - dev_priv->display.init_clock_gating = nop_init_clock_gating; + dev_priv->display.init_clock_gating = skylake_init_clock_gating; else if (IS_KABYLAKE(dev_priv)) - dev_priv->display.init_clock_gating = nop_init_clock_gating; + dev_priv->display.init_clock_gating = skylake_init_clock_gating; else if (IS_BROXTON(dev_priv)) dev_priv->display.init_clock_gating = bxt_init_clock_gating; else if (IS_BROADWELL(dev_priv)) @@ -7217,6 +7485,7 @@ void intel_init_pm(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 9) { skl_setup_wm_latency(dev); dev_priv->display.update_wm = skl_update_wm; + dev_priv->display.compute_global_watermarks = skl_compute_wm; } else if (HAS_PCH_SPLIT(dev)) { ilk_setup_wm_latency(dev); @@ -7390,19 +7659,17 @@ static void __intel_rps_boost_work(struct work_struct *work) struct drm_i915_gem_request *req = boost->req; if (!i915_gem_request_completed(req, true)) - gen6_rps_boost(to_i915(req->engine->dev), NULL, - req->emitted_jiffies); + gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); - i915_gem_request_unreference__unlocked(req); + i915_gem_request_unreference(req); kfree(boost); } -void intel_queue_rps_boost_for_request(struct drm_device *dev, - struct drm_i915_gem_request *req) +void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) { struct request_boost *boost; - if (req == NULL || INTEL_INFO(dev)->gen < 6) + if (req == NULL || INTEL_GEN(req->i915) < 6) return; if (i915_gem_request_completed(req, true)) @@ -7416,7 +7683,7 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev, boost->req = req; INIT_WORK(&boost->work, __intel_rps_boost_work); - queue_work(to_i915(dev)->wq, &boost->work); + queue_work(req->i915->wq, &boost->work); } void intel_pm_setup(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index a788d1e9589b..29a09bf6bd18 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -176,7 +176,6 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t aux_clock_divider; i915_reg_t aux_ctl_reg; - int precharge = 0x3; static const uint8_t aux_msg[] = { [0] = DP_AUX_NATIVE_WRITE << 4, [1] = DP_SET_POWER >> 8, @@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) [4] = DP_SET_POWER_D0, }; enum port port = dig_port->port; + u32 aux_ctl; int i; BUILD_BUG_ON(sizeof(aux_msg) > 20); @@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, DP_AUX_FRAME_SYNC_ENABLE); + if (dev_priv->psr.link_standby) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); + else + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE); + aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port); /* Setup AUX registers */ @@ -204,33 +211,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2), intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); - if (INTEL_INFO(dev)->gen >= 9) { - uint32_t val; - - val = I915_READ(aux_ctl_reg); - val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK; - val |= DP_AUX_CH_CTL_TIME_OUT_1600us; - val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK; - val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); - /* Use hardcoded data values for PSR, frame sync and GTC */ - val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL; - val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL; - val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL; - I915_WRITE(aux_ctl_reg, val); - } else { - I915_WRITE(aux_ctl_reg, - DP_AUX_CH_CTL_TIME_OUT_400us | - (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); - } - - if (dev_priv->psr.link_standby) - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); - else - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, - DP_PSR_ENABLE); + aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg), + aux_clock_divider); + I915_WRITE(aux_ctl_reg, aux_ctl); } static void vlv_psr_enable_source(struct intel_dp *intel_dp) @@ -272,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t max_sleep_time = 0x1f; - /* - * Let's respect VBT in case VBT asks a higher idle_frame value. - * Let's use 6 as the minimum to cover all known cases including - * the off-by-one issue that HW has in some cases. Also there are - * cases where sink should be able to train - * with the 5 or 6 idle patterns. + /* Lately it was identified that depending on panel idle frame count + * calculated at HW can be off by 1. So let's use what came + * from VBT + 1. + * There are also other cases where panel demands at least 4 + * but VBT is not being set. To cover these 2 cases lets use + * at least 5 when VBT isn't set to be on the safest side. */ - uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); + uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1; uint32_t val = EDP_PSR_ENABLE; val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 04402bb9d26b..8d35a3978f9b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -34,6 +34,11 @@ #include "i915_trace.h" #include "intel_drv.h" +/* Rough estimate of the typical request size, performing a flush, + * set-context and then emitting the batch. + */ +#define LEGACY_REQUEST_SIZE 200 + int __intel_ring_space(int head, int tail, int size) { int space = head - tail; @@ -55,7 +60,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf) bool intel_engine_stopped(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); } @@ -101,7 +106,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 flush_domains) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; u32 cmd; int ret; @@ -140,7 +144,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, cmd |= MI_EXE_FLUSH; if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && - (IS_G4X(dev) || IS_GEN5(dev))) + (IS_G4X(req->i915) || IS_GEN5(req->i915))) cmd |= MI_INVALIDATE_ISP; ret = intel_ring_begin(req, 2); @@ -426,19 +430,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, static void ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE_TAIL(engine, value); } u64 intel_ring_get_active_head(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u64 acthd; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_GEN(dev_priv) >= 8) acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), RING_ACTHD_UDW(engine->mmio_base)); - else if (INTEL_INFO(engine->dev)->gen >= 4) + else if (INTEL_GEN(dev_priv) >= 4) acthd = I915_READ(RING_ACTHD(engine->mmio_base)); else acthd = I915_READ(ACTHD); @@ -448,25 +452,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine) static void ring_setup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u32 addr; addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(engine->dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; I915_WRITE(HWS_PGA, addr); } static void intel_ring_setup_status_page(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; i915_reg_t mmio; /* The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ - if (IS_GEN7(dev)) { + if (IS_GEN7(dev_priv)) { switch (engine->id) { case RCS: mmio = RENDER_HWS_PGA_GEN7; @@ -486,7 +489,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(engine->dev)) { + } else if (IS_GEN6(dev_priv)) { mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ @@ -503,7 +506,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) * arises: do we still need this and if so how should we go about * invalidating the TLB? */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { + if (IS_GEN(dev_priv, 6, 7)) { i915_reg_t reg = RING_INSTPM(engine->mmio_base); /* ring should be idle before issuing a sync flush*/ @@ -521,9 +524,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) static bool stop_ring(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(dev_priv)) { I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { DRM_ERROR("%s : timed out trying to stop ring\n", @@ -541,7 +544,7 @@ static bool stop_ring(struct intel_engine_cs *engine) I915_WRITE_HEAD(engine, 0); engine->write_tail(engine, 0); - if (!IS_GEN2(engine->dev)) { + if (!IS_GEN2(dev_priv)) { (void)I915_READ_CTL(engine); I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } @@ -556,8 +559,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine) static int init_ring_common(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct intel_ringbuffer *ringbuf = engine->buffer; struct drm_i915_gem_object *obj = ringbuf->obj; int ret = 0; @@ -587,7 +589,7 @@ static int init_ring_common(struct intel_engine_cs *engine) } } - if (I915_NEED_GFX_HWS(dev)) + if (I915_NEED_GFX_HWS(dev_priv)) intel_ring_setup_status_page(engine); else ring_setup_phys_status_page(engine); @@ -644,12 +646,10 @@ out: void intel_fini_pipe_control(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - if (engine->scratch.obj == NULL) return; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(engine->i915) >= 5) { kunmap(sg_page(engine->scratch.obj->pages->sgl)); i915_gem_object_ggtt_unpin(engine->scratch.obj); } @@ -665,10 +665,11 @@ intel_init_pipe_control(struct intel_engine_cs *engine) WARN_ON(engine->scratch.obj); - engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096); - if (engine->scratch.obj == NULL) { + engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096); + if (IS_ERR(engine->scratch.obj)) { DRM_ERROR("Failed to allocate seqno page\n"); - ret = -ENOMEM; + ret = PTR_ERR(engine->scratch.obj); + engine->scratch.obj = NULL; goto err; } @@ -702,11 +703,9 @@ err: static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { - int ret, i; struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_workarounds *w = &dev_priv->workarounds; + struct i915_workarounds *w = &req->i915->workarounds; + int ret, i; if (w->count == 0) return 0; @@ -795,7 +794,7 @@ static int wa_add(struct drm_i915_private *dev_priv, static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, i915_reg_t reg) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; struct i915_workarounds *wa = &dev_priv->workarounds; const uint32_t index = wa->hw_whitelist_count[engine->id]; @@ -811,8 +810,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, static int gen8_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); @@ -863,9 +861,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine) static int bdw_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -885,16 +882,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine) /* WaForceContextSaveRestoreNonCoherent:bdw */ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); + (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); return 0; } static int chv_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen8_init_workarounds(engine); if (ret) @@ -911,8 +907,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine) static int gen9_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; uint32_t tmp; int ret; @@ -935,14 +930,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, GEN9_DG_MIRROR_FIX_ENABLE); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, GEN9_RHWO_OPTIMIZATION_DISABLE); /* @@ -968,20 +963,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) GEN9_CCS_TLB_PREFETCH_ENABLE); /* WaDisableMaskBasedCammingInRCC:skl,bxt */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || - IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) || + IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, PIXEL_MASK_CAMMING_DISABLE); /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; - if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || - IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_F0, REVID_FOREVER) || + IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ - if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) + if (IS_SKYLAKE(dev_priv) || IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, GEN8_SAMPLER_POWER_BYPASS_DIS); @@ -1007,8 +1002,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) static int skl_tune_iz_hashing(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; u8 vals[3] = { 0, 0, 0 }; unsigned int i; @@ -1049,9 +1043,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine) static int skl_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1062,12 +1055,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) * until D0 which is the default case so this is equivalent to * !WaDisablePerCtxtPreemptionGranularityControl:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) { I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); } - if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0)) { /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ I915_WRITE(FF_SLICE_CS_CHICKEN2, _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); @@ -1076,24 +1069,24 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes * involving this register should also be added to WA batch as required. */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) /* WaDisableLSQCROPERFforOCL:skl */ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_RO_PERF_DIS); /* WaEnableGapsTsvCreditFix:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) { I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE)); } /* WaDisablePowerCompilerClockGating:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0)) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); /* This is tied to WaForceContextSaveRestoreNonCoherent */ - if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { + if (IS_SKL_REVID(dev_priv, 0, REVID_FOREVER)) { /* *Use Force Non-Coherent whenever executing a 3D context. This * is a workaround for a possible hang in the unlikely event @@ -1109,13 +1102,13 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) } /* WaBarrierPerformanceFixDisable:skl */ - if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) + if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0)) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FENCE_DEST_SLM_DISABLE | HDC_BARRIER_PERFORMANCE_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:skl */ - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) + if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0)) WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1130,9 +1123,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) static int bxt_init_workarounds(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; ret = gen9_init_workarounds(engine); if (ret) @@ -1140,11 +1132,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaStoreMultiplePTEenable:bxt */ /* This is a requirement according to Hardware specification */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); /* WaSetClckGatingDisableMedia:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); } @@ -1154,7 +1146,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) STALL_DOP_GATING_DISABLE); /* WaDisableSbeCacheDispatchPortSharing:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { WA_SET_BIT_MASKED( GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); @@ -1164,7 +1156,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ /* WaDisableLSQCROPERFforOCL:bxt */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); if (ret) return ret; @@ -1174,29 +1166,33 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) return ret; } + /* WaProgramL3SqcReg1DefaultForPerf:bxt */ + if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) + I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | + L3_HIGH_PRIO_CREDITS(2)); + return 0; } int init_workarounds_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; WARN_ON(engine->id != RCS); dev_priv->workarounds.count = 0; dev_priv->workarounds.hw_whitelist_count[RCS] = 0; - if (IS_BROADWELL(dev)) + if (IS_BROADWELL(dev_priv)) return bdw_init_workarounds(engine); - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) return chv_init_workarounds(engine); - if (IS_SKYLAKE(dev)) + if (IS_SKYLAKE(dev_priv)) return skl_init_workarounds(engine); - if (IS_BROXTON(dev)) + if (IS_BROXTON(dev_priv)) return bxt_init_workarounds(engine); return 0; @@ -1204,14 +1200,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine) static int init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; int ret = init_ring_common(engine); if (ret) return ret; /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ - if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) + if (IS_GEN(dev_priv, 4, 6)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); /* We need to disable the AsyncFlip performance optimisations in order @@ -1220,22 +1215,22 @@ static int init_render_ring(struct intel_engine_cs *engine) * * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv */ - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (IS_GEN(dev_priv, 6, 7)) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); /* Required for the hardware to program scanline values for waiting */ /* WaEnableFlushTlbInvalidationMode:snb */ - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ - if (IS_GEN7(dev)) + if (IS_GEN7(dev_priv)) I915_WRITE(GFX_MODE_GEN7, _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); - if (IS_GEN6(dev)) { + if (IS_GEN6(dev_priv)) { /* From the Sandybridge PRM, volume 1 part 3, page 24: * "If this bit is set, STCunit will have LRA as replacement * policy. [...] This bit must be reset. LRA replacement @@ -1245,19 +1240,18 @@ static int init_render_ring(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); } - if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) + if (IS_GEN(dev_priv, 6, 7)) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - if (HAS_L3_DPF(dev)) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); + if (HAS_L3_DPF(dev_priv)) + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); return init_workarounds_ring(engine); } static void render_ring_cleanup(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; if (dev_priv->semaphore_obj) { i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); @@ -1273,13 +1267,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 8 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1297,7 +1290,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_FLUSH_ENABLE); + PIPE_CONTROL_CS_STALL); intel_ring_emit(signaller, lower_32_bits(gtt_offset)); intel_ring_emit(signaller, upper_32_bits(gtt_offset)); intel_ring_emit(signaller, seqno); @@ -1315,13 +1308,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, { #define MBOX_UPDATE_DWORDS 6 struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *waiter; enum intel_engine_id id; int ret, num_rings; - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; #undef MBOX_UPDATE_DWORDS @@ -1354,14 +1346,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { struct intel_engine_cs *signaller = signaller_req->engine; - struct drm_device *dev = signaller->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = signaller_req->i915; struct intel_engine_cs *useless; enum intel_engine_id id; int ret, num_rings; #define MBOX_UPDATE_DWORDS 3 - num_rings = hweight32(INTEL_INFO(dev)->ring_mask); + num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask); num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); #undef MBOX_UPDATE_DWORDS @@ -1420,10 +1411,38 @@ gen6_add_request(struct drm_i915_gem_request *req) return 0; } -static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, +static int +gen8_render_add_request(struct drm_i915_gem_request *req) +{ + struct intel_engine_cs *engine = req->engine; + int ret; + + if (engine->semaphore.signal) + ret = engine->semaphore.signal(req, 8); + else + ret = intel_ring_begin(req, 8); + if (ret) + return ret; + + intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_QW_WRITE)); + intel_ring_emit(engine, intel_hws_seqno_address(req->engine)); + intel_ring_emit(engine, 0); + intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + /* We're thrashing one dword of HWS. */ + intel_ring_emit(engine, 0); + intel_ring_emit(engine, MI_USER_INTERRUPT); + intel_ring_emit(engine, MI_NOOP); + __intel_ring_advance(engine); + + return 0; +} + +static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv, u32 seqno) { - struct drm_i915_private *dev_priv = dev->dev_private; return dev_priv->last_seqno < seqno; } @@ -1441,7 +1460,8 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, u32 seqno) { struct intel_engine_cs *waiter = waiter_req->engine; - struct drm_i915_private *dev_priv = waiter->dev->dev_private; + struct drm_i915_private *dev_priv = waiter_req->i915; + struct i915_hw_ppgtt *ppgtt; int ret; ret = intel_ring_begin(waiter_req, 4); @@ -1450,7 +1470,6 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | MI_SEMAPHORE_SAD_GTE_SDD); intel_ring_emit(waiter, seqno); intel_ring_emit(waiter, @@ -1458,6 +1477,15 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, intel_ring_emit(waiter, upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); intel_ring_advance(waiter); + + /* When the !RCS engines idle waiting upon a semaphore, they lose their + * pagetables and we must reload them before executing the batch. + * We do this on the i915_switch_context() following the wait and + * before the dispatch. + */ + ppgtt = waiter_req->ctx->ppgtt; + if (ppgtt && waiter_req->engine->id != RCS) + ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine); return 0; } @@ -1486,7 +1514,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, return ret; /* If seqno wrap happened, omit the wait with no-ops */ - if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { + if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) { intel_ring_emit(waiter, dw1 | wait_mbox); intel_ring_emit(waiter, seqno); intel_ring_emit(waiter, 0); @@ -1567,7 +1595,7 @@ pc_render_add_request(struct drm_i915_gem_request *req) static void gen6_seqno_barrier(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like @@ -1616,8 +1644,7 @@ pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) static bool gen5_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1634,8 +1661,7 @@ gen5_ring_get_irq(struct intel_engine_cs *engine) static void gen5_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1647,8 +1673,7 @@ gen5_ring_put_irq(struct intel_engine_cs *engine) static bool i9xx_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (!intel_irqs_enabled(dev_priv)) @@ -1668,8 +1693,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *engine) static void i9xx_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1684,8 +1708,7 @@ i9xx_ring_put_irq(struct intel_engine_cs *engine) static bool i8xx_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (!intel_irqs_enabled(dev_priv)) @@ -1705,8 +1728,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *engine) static void i8xx_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1759,8 +1781,7 @@ i9xx_add_request(struct drm_i915_gem_request *req) static bool gen6_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1768,10 +1789,10 @@ gen6_ring_get_irq(struct intel_engine_cs *engine) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) I915_WRITE_IMR(engine, ~(engine->irq_enable_mask | - GT_PARITY_ERROR(dev))); + GT_PARITY_ERROR(dev_priv))); else I915_WRITE_IMR(engine, ~engine->irq_enable_mask); gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); @@ -1784,14 +1805,13 @@ gen6_ring_get_irq(struct intel_engine_cs *engine) static void gen6_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); else I915_WRITE_IMR(engine, ~0); gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); @@ -1802,8 +1822,7 @@ gen6_ring_put_irq(struct intel_engine_cs *engine) static bool hsw_vebox_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1822,8 +1841,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *engine) static void hsw_vebox_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -1837,8 +1855,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *engine) static bool gen8_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; if (WARN_ON(!intel_irqs_enabled(dev_priv))) @@ -1846,7 +1863,7 @@ gen8_ring_get_irq(struct intel_engine_cs *engine) spin_lock_irqsave(&dev_priv->irq_lock, flags); if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { I915_WRITE_IMR(engine, ~(engine->irq_enable_mask | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); @@ -1863,13 +1880,12 @@ gen8_ring_get_irq(struct intel_engine_cs *engine) static void gen8_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = engine->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && engine->id == RCS) { + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { I915_WRITE_IMR(engine, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); } else { @@ -1991,12 +2007,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, static void cleanup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; if (!dev_priv->status_page_dmah) return; - drm_pci_free(engine->dev, dev_priv->status_page_dmah); + drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah); engine->status_page.page_addr = NULL; } @@ -2022,10 +2038,10 @@ static int init_status_page(struct intel_engine_cs *engine) unsigned flags; int ret; - obj = i915_gem_alloc_object(engine->dev, 4096); - if (obj == NULL) { + obj = i915_gem_object_create(engine->i915->dev, 4096); + if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate status page\n"); - return -ENOMEM; + return PTR_ERR(obj); } ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); @@ -2033,7 +2049,7 @@ static int init_status_page(struct intel_engine_cs *engine) goto err_unref; flags = 0; - if (!HAS_LLC(engine->dev)) + if (!HAS_LLC(engine->i915)) /* On g33, we cannot place HWS above 256MiB, so * restrict its pinning to the low mappable arena. * Though this restriction is not documented for @@ -2067,11 +2083,11 @@ err_unref: static int init_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = - drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); + drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) return -ENOMEM; } @@ -2084,20 +2100,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine) void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) { + GEM_BUG_ON(ringbuf->vma == NULL); + GEM_BUG_ON(ringbuf->virtual_start == NULL); + if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) i915_gem_object_unpin_map(ringbuf->obj); else - iounmap(ringbuf->virtual_start); + i915_vma_unpin_iomap(ringbuf->vma); ringbuf->virtual_start = NULL; - ringbuf->vma = NULL; + i915_gem_object_ggtt_unpin(ringbuf->obj); + ringbuf->vma = NULL; } -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, +int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, struct intel_ringbuffer *ringbuf) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj = ringbuf->obj; /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ unsigned flags = PIN_OFFSET_BIAS | 4096; @@ -2131,10 +2149,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, /* Access through the GTT requires the device to be awake. */ assert_rpm_wakelock_held(dev_priv); - addr = ioremap_wc(ggtt->mappable_base + - i915_gem_obj_ggtt_offset(obj), ringbuf->size); - if (addr == NULL) { - ret = -ENOMEM; + addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj)); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); goto err_unpin; } } @@ -2163,9 +2180,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev, if (!HAS_LLC(dev)) obj = i915_gem_object_create_stolen(dev, ringbuf->size); if (obj == NULL) - obj = i915_gem_alloc_object(dev, ringbuf->size); - if (obj == NULL) - return -ENOMEM; + obj = i915_gem_object_create(dev, ringbuf->size); + if (IS_ERR(obj)) + return PTR_ERR(obj); /* mark ring buffers as read-only from GPU side by default */ obj->gt_ro = 1; @@ -2197,13 +2214,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) * of the buffer. */ ring->effective_size = size; - if (IS_I830(engine->dev) || IS_845G(engine->dev)) + if (IS_I830(engine->i915) || IS_845G(engine->i915)) ring->effective_size -= 2 * CACHELINE_BYTES; ring->last_retired_head = -1; intel_ring_update_space(ring); - ret = intel_alloc_ringbuffer_obj(engine->dev, ring); + ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring); if (ret) { DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", engine->name, ret); @@ -2226,12 +2243,13 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring) static int intel_init_ring_buffer(struct drm_device *dev, struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_ringbuffer *ringbuf; int ret; WARN_ON(engine->buffer); - engine->dev = dev; + engine->i915 = dev_priv; INIT_LIST_HEAD(&engine->active_list); INIT_LIST_HEAD(&engine->request_list); INIT_LIST_HEAD(&engine->execlist_queue); @@ -2249,7 +2267,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, } engine->buffer = ringbuf; - if (I915_NEED_GFX_HWS(dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { ret = init_status_page(engine); if (ret) goto error; @@ -2260,7 +2278,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto error; } - ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); + ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf); if (ret) { DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", engine->name, ret); @@ -2286,11 +2304,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (!intel_engine_initialized(engine)) return; - dev_priv = to_i915(engine->dev); + dev_priv = engine->i915; if (engine->buffer) { intel_stop_engine(engine); - WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); + WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); intel_unpin_ringbuffer_obj(engine->buffer); intel_ringbuffer_free(engine->buffer); @@ -2300,7 +2318,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (I915_NEED_GFX_HWS(engine->dev)) { + if (I915_NEED_GFX_HWS(dev_priv)) { cleanup_status_page(engine); } else { WARN_ON(engine->id != RCS); @@ -2309,7 +2327,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine) i915_cmd_parser_fini_ring(engine); i915_gem_batch_pool_fini(&engine->batch_pool); - engine->dev = NULL; + engine->i915 = NULL; } int intel_engine_idle(struct intel_engine_cs *engine) @@ -2332,46 +2350,22 @@ int intel_engine_idle(struct intel_engine_cs *engine) int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - request->ringbuf = request->engine->buffer; - return 0; -} + int ret; -int intel_ring_reserve_space(struct drm_i915_gem_request *request) -{ - /* - * The first call merely notes the reserve request and is common for - * all back ends. The subsequent localised _begin() call actually - * ensures that the reservation is available. Without the begin, if - * the request creator immediately submitted the request without - * adding any commands to it then there might not actually be - * sufficient room for the submission commands. + /* Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. */ - intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); + request->reserved_space += LEGACY_REQUEST_SIZE; - return intel_ring_begin(request, 0); -} - -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) -{ - GEM_BUG_ON(ringbuf->reserved_size); - ringbuf->reserved_size = size; -} - -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(!ringbuf->reserved_size); - ringbuf->reserved_size = 0; -} + request->ringbuf = request->engine->buffer; -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(!ringbuf->reserved_size); - ringbuf->reserved_size = 0; -} + ret = intel_ring_begin(request, 0); + if (ret) + return ret; -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) -{ - GEM_BUG_ON(ringbuf->reserved_size); + request->reserved_space -= LEGACY_REQUEST_SIZE; + return 0; } static int wait_for_space(struct drm_i915_gem_request *req, int bytes) @@ -2393,7 +2387,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes) * * See also i915_gem_request_alloc() and i915_add_request(). */ - GEM_BUG_ON(!ringbuf->reserved_size); + GEM_BUG_ON(!req->reserved_space); list_for_each_entry(target, &engine->request_list, list) { unsigned space; @@ -2428,7 +2422,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int total_bytes, wait_bytes; bool need_wrap = false; - total_bytes = bytes + ringbuf->reserved_size; + total_bytes = bytes + req->reserved_space; if (unlikely(bytes > remain_usable)) { /* @@ -2444,7 +2438,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) * and only need to effectively wait for the reserved * size space from the start of ringbuffer. */ - wait_bytes = remain_actual + ringbuf->reserved_size; + wait_bytes = remain_actual + req->reserved_space; } else { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; @@ -2501,7 +2495,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) { - struct drm_i915_private *dev_priv = to_i915(engine->dev); + struct drm_i915_private *dev_priv = engine->i915; /* Our semaphore implementation is strictly monotonic (i.e. we proceed * so long as the semaphore value in the register/page is greater @@ -2511,7 +2505,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) * the semaphore value, then when the seqno moves backwards all * future waits will complete instantly (causing rendering corruption). */ - if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); if (HAS_VEBOX(dev_priv)) @@ -2537,7 +2531,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; /* Every tail move must follow the sequence below */ @@ -2579,7 +2573,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(engine->dev)->gen >= 8) + if (INTEL_GEN(req->i915) >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2601,7 +2595,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(engine->dev)->gen >= 8) { + if (INTEL_GEN(req->i915) >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2692,7 +2686,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { struct intel_engine_cs *engine = req->engine; - struct drm_device *dev = engine->dev; uint32_t cmd; int ret; @@ -2701,7 +2694,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_GEN(req->i915) >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2722,7 +2715,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, intel_ring_emit(engine, cmd); intel_ring_emit(engine, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(req->i915) >= 8) { intel_ring_emit(engine, 0); /* upper addr */ intel_ring_emit(engine, 0); /* value */ } else { @@ -2747,10 +2740,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->hw_id = 0; engine->mmio_base = RENDER_RING_BASE; - if (INTEL_INFO(dev)->gen >= 8) { - if (i915_semaphore_is_enabled(dev)) { - obj = i915_gem_alloc_object(dev, 4096); - if (obj == NULL) { + if (INTEL_GEN(dev_priv) >= 8) { + if (i915_semaphore_is_enabled(dev_priv)) { + obj = i915_gem_object_create(dev, 4096); + if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); i915.semaphores = 0; } else { @@ -2766,25 +2759,24 @@ int intel_init_render_ring_buffer(struct drm_device *dev) } engine->init_context = intel_rcs_ctx_init; - engine->add_request = gen6_add_request; + engine->add_request = gen8_render_add_request; engine->flush = gen8_render_ring_flush; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { WARN_ON(!dev_priv->semaphore_obj); engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_rcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); } - } else if (INTEL_INFO(dev)->gen >= 6) { + } else if (INTEL_GEN(dev_priv) >= 6) { engine->init_context = intel_rcs_ctx_init; engine->add_request = gen6_add_request; engine->flush = gen7_render_ring_flush; - if (INTEL_INFO(dev)->gen == 6) + if (IS_GEN6(dev_priv)) engine->flush = gen6_render_ring_flush; engine->irq_get = gen6_ring_get_irq; engine->irq_put = gen6_ring_put_irq; @@ -2792,7 +2784,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen6_ring_sync; engine->semaphore.signal = gen6_signal; /* @@ -2813,7 +2805,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; } - } else if (IS_GEN5(dev)) { + } else if (IS_GEN5(dev_priv)) { engine->add_request = pc_render_add_request; engine->flush = gen4_render_ring_flush; engine->get_seqno = pc_render_get_seqno; @@ -2824,13 +2816,13 @@ int intel_init_render_ring_buffer(struct drm_device *dev) GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; } else { engine->add_request = i9xx_add_request; - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(dev_priv) < 4) engine->flush = gen2_render_ring_flush; else engine->flush = gen4_render_ring_flush; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { engine->irq_get = i8xx_ring_get_irq; engine->irq_put = i8xx_ring_put_irq; } else { @@ -2841,15 +2833,15 @@ int intel_init_render_ring_buffer(struct drm_device *dev) } engine->write_tail = ring_write_tail; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; - else if (IS_GEN8(dev)) + else if (IS_GEN8(dev_priv)) engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 6) + else if (INTEL_GEN(dev_priv) >= 6) engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - else if (INTEL_INFO(dev)->gen >= 4) + else if (INTEL_GEN(dev_priv) >= 4) engine->dispatch_execbuffer = i965_dispatch_execbuffer; - else if (IS_I830(dev) || IS_845G(dev)) + else if (IS_I830(dev_priv) || IS_845G(dev_priv)) engine->dispatch_execbuffer = i830_dispatch_execbuffer; else engine->dispatch_execbuffer = i915_dispatch_execbuffer; @@ -2857,11 +2849,11 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->cleanup = render_ring_cleanup; /* Workaround batchbuffer to combat CS tlb bug. */ - if (HAS_BROKEN_CS_TLB(dev)) { - obj = i915_gem_alloc_object(dev, I830_WA_SIZE); - if (obj == NULL) { + if (HAS_BROKEN_CS_TLB(dev_priv)) { + obj = i915_gem_object_create(dev, I830_WA_SIZE); + if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate batch bo\n"); - return -ENOMEM; + return PTR_ERR(obj); } ret = i915_gem_obj_ggtt_pin(obj, 0, 0); @@ -2879,7 +2871,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) if (ret) return ret; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(dev_priv) >= 5) { ret = intel_init_pipe_control(engine); if (ret) return ret; @@ -2899,24 +2891,24 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->hw_id = 1; engine->write_tail = ring_write_tail; - if (INTEL_INFO(dev)->gen >= 6) { + if (INTEL_GEN(dev_priv) >= 6) { engine->mmio_base = GEN6_BSD_RING_BASE; /* gen6 bsd needs a special wa for tail updates */ - if (IS_GEN6(dev)) + if (IS_GEN6(dev_priv)) engine->write_tail = gen6_bsd_ring_write_tail; engine->flush = gen6_bsd_ring_flush; engine->add_request = gen6_add_request; engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -2927,7 +2919,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->irq_put = gen6_ring_put_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen6_ring_sync; engine->semaphore.signal = gen6_signal; engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; @@ -2948,7 +2940,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->add_request = i9xx_add_request; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (IS_GEN5(dev)) { + if (IS_GEN5(dev_priv)) { engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; engine->irq_get = gen5_ring_get_irq; engine->irq_put = gen5_ring_put_irq; @@ -2990,7 +2982,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -3017,13 +3009,13 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) engine->irq_seqno_barrier = gen6_seqno_barrier; engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -3033,7 +3025,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) engine->irq_get = gen6_ring_get_irq; engine->irq_put = gen6_ring_put_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.signal = gen6_signal; engine->semaphore.sync_to = gen6_ring_sync; /* @@ -3078,13 +3070,13 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) engine->get_seqno = ring_get_seqno; engine->set_seqno = ring_set_seqno; - if (INTEL_INFO(dev)->gen >= 8) { + if (INTEL_GEN(dev_priv) >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; engine->irq_get = gen8_ring_get_irq; engine->irq_put = gen8_ring_put_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; engine->semaphore.signal = gen8_xcs_signal; GEN8_RING_SEMAPHORE_INIT(engine); @@ -3094,7 +3086,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) engine->irq_get = hsw_vebox_get_irq; engine->irq_put = hsw_vebox_put_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; - if (i915_semaphore_is_enabled(dev)) { + if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen6_ring_sync; engine->semaphore.signal = gen6_signal; engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index ff126485d398..b33c876fed20 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -107,7 +107,6 @@ struct intel_ringbuffer { int space; int size; int effective_size; - int reserved_size; /** We track the position of the requests in the ring buffer, and * when each is retired we increment last_retired_head as the GPU @@ -120,7 +119,7 @@ struct intel_ringbuffer { u32 last_retired_head; }; -struct intel_context; +struct i915_gem_context; struct drm_i915_reg_table; /* @@ -142,7 +141,8 @@ struct i915_ctx_workarounds { struct drm_i915_gem_object *obj; }; -struct intel_engine_cs { +struct intel_engine_cs { + struct drm_i915_private *i915; const char *name; enum intel_engine_id { RCS = 0, @@ -157,7 +157,6 @@ struct intel_engine_cs { unsigned int hw_id; unsigned int guc_id; /* XXX same as hw_id? */ u32 mmio_base; - struct drm_device *dev; struct intel_ringbuffer *buffer; struct list_head buffers; @@ -268,7 +267,6 @@ struct intel_engine_cs { struct tasklet_struct irq_tasklet; spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ struct list_head execlist_queue; - struct list_head execlist_retired_req_list; unsigned int fw_domains; unsigned int next_context_status_buffer; unsigned int idle_lite_restore_wa; @@ -312,7 +310,7 @@ struct intel_engine_cs { wait_queue_head_t irq_queue; - struct intel_context *last_context; + struct i915_gem_context *last_context; struct intel_ring_hangcheck hangcheck; @@ -352,7 +350,7 @@ struct intel_engine_cs { static inline bool intel_engine_initialized(struct intel_engine_cs *engine) { - return engine->dev != NULL; + return engine->i915 != NULL; } static inline unsigned @@ -427,7 +425,7 @@ intel_write_status_page(struct intel_engine_cs *engine, struct intel_ringbuffer * intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); -int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, +int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv, struct intel_ringbuffer *ringbuf); void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); void intel_ringbuffer_free(struct intel_ringbuffer *ring); @@ -486,26 +484,15 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) /* * Arbitrary size for largest possible 'add request' sequence. The code paths * are complex and variable. Empirical measurement shows that the worst case - * is ILK at 136 words. Reserving too much is better than reserving too little - * as that allows for corner cases that might have been missed. So the figure - * has been rounded up to 160 words. + * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, + * we need to allocate double the largest single packet within that emission + * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). */ -#define MIN_SPACE_FOR_ADD_REQUEST 160 +#define MIN_SPACE_FOR_ADD_REQUEST 336 -/* - * Reserve space in the ring to guarantee that the i915_add_request() call - * will always have sufficient room to do its stuff. The request creation - * code calls this automatically. - */ -void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); -/* Cancel the reservation, e.g. because the request is being discarded. */ -void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); -/* Use the reserved space - for use by i915_add_request() only. */ -void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); -/* Finish with the reserved space - for use by i915_add_request() only. */ -void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); - -/* Legacy ringbuffer specific portion of reservation code: */ -int intel_ring_reserve_space(struct drm_i915_gem_request *request); +static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) +{ + return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; +} #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 7fb1da4e7fc3..fe8faf30bda7 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -806,15 +806,27 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; } +static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) +{ + u32 tmp = I915_READ(DBUF_CTL); + + WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) != + (DBUF_POWER_STATE | DBUF_POWER_REQUEST), + "Unexpected DBuf power power state (0x%08x)\n", tmp); +} + static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - if (IS_BROXTON(dev_priv)) { - broxton_cdclk_verify_state(dev_priv); + WARN_ON(dev_priv->cdclk_freq != + dev_priv->display.get_display_clock_speed(dev_priv->dev)); + + gen9_assert_dbuf_enabled(dev_priv); + + if (IS_BROXTON(dev_priv)) broxton_ddi_phy_verify_state(dev_priv); - } } static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, @@ -948,6 +960,11 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) */ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); I915_WRITE(CBR1_VLV, 0); + + WARN_ON(dev_priv->rawclk_freq == 0); + + I915_WRITE(RAWCLK_FREQ_VLV, + DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000)); } static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) @@ -2171,6 +2188,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) mutex_unlock(&power_domains->lock); } +static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); + POSTING_READ(DBUF_CTL); + + udelay(10); + + if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) + DRM_ERROR("DBuf power enable timeout\n"); +} + +static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) +{ + I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); + POSTING_READ(DBUF_CTL); + + udelay(10); + + if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) + DRM_ERROR("DBuf power disable timeout!\n"); +} + static void skl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { @@ -2195,12 +2234,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); - if (!resume) - return; - skl_init_cdclk(dev_priv); - if (dev_priv->csr.dmc_payload) + gen9_dbuf_enable(dev_priv); + + if (resume && dev_priv->csr.dmc_payload) intel_csr_load_program(dev_priv); } @@ -2211,6 +2249,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv) gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + gen9_dbuf_disable(dev_priv); + skl_uninit_cdclk(dev_priv); /* The spec doesn't call for removing the reset handshake flag */ @@ -2255,9 +2295,11 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); broxton_init_cdclk(dev_priv); + + gen9_dbuf_enable(dev_priv); + broxton_ddi_phy_init(dev_priv); - broxton_cdclk_verify_state(dev_priv); broxton_ddi_phy_verify_state(dev_priv); if (resume && dev_priv->csr.dmc_payload) @@ -2272,6 +2314,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv) gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); broxton_ddi_phy_uninit(dev_priv); + + gen9_dbuf_disable(dev_priv); + broxton_uninit_cdclk(dev_priv); /* The spec doesn't call for removing the reset handshake flag */ diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 2128fae5687d..ab2d0658abe6 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2191,7 +2191,6 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { .get_modes = intel_sdvo_get_modes, .mode_valid = intel_sdvo_mode_valid, - .best_encoder = intel_best_encoder, }; static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) @@ -2981,7 +2980,7 @@ bool intel_sdvo_init(struct drm_device *dev, intel_encoder = &intel_sdvo->base; intel_encoder->type = INTEL_OUTPUT_SDVO; drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, - NULL); + "SDVO %c", port_name(port)); /* Read the regs to test if we can talk to the device */ for (i = 0; i < 0x40; i++) { diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0f3e2303e0e9..324ccb06397d 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, */ void intel_pipe_update_start(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; - enum pipe pipe = crtc->pipe; long timeout = msecs_to_jiffies_timeout(1); int scanline, min, max, vblank_start; wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); @@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc) crtc->debug.scanline_start = scanline; crtc->debug.start_vbl_time = ktime_get(); - crtc->debug.start_vbl_count = - dev->driver->get_vblank_counter(dev, pipe); + crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); trace_i915_pipe_update_vblank_evaded(crtc); } @@ -154,14 +151,19 @@ void intel_pipe_update_start(struct intel_crtc *crtc) * re-enables interrupts and verifies the update was actually completed * before a vblank using the value of @start_vbl_count. */ -void intel_pipe_update_end(struct intel_crtc *crtc) +void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work) { - struct drm_device *dev = crtc->base.dev; enum pipe pipe = crtc->pipe; int scanline_end = intel_get_crtc_scanline(crtc); - u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); + u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); ktime_t end_vbl_time = ktime_get(); + if (work) { + work->flip_queued_vblank = end_vbl_count; + smp_mb__before_atomic(); + atomic_set(&work->pending, 1); + } + trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); local_irq_enable(); @@ -203,8 +205,6 @@ skl_update_plane(struct drm_plane *drm_plane, uint32_t y = plane_state->src.y1 >> 16; uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; - const struct intel_scaler *scaler = - &crtc_state->scaler_state.scalers[plane_state->scaler_id]; plane_ctl = PLANE_CTL_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE | @@ -260,13 +260,16 @@ skl_update_plane(struct drm_plane *drm_plane, /* program plane scaler */ if (plane_state->scaler_id >= 0) { - uint32_t ps_ctrl = 0; int scaler_id = plane_state->scaler_id; + const struct intel_scaler *scaler; DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, PS_PLANE_SEL(plane)); - ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode; - I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); + + scaler = &crtc_state->scaler_state.scalers[scaler_id]; + + I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), + PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode); I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), @@ -1111,10 +1114,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) possible_crtcs = (1 << pipe); - ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, - &intel_plane_funcs, - plane_formats, num_plane_formats, - DRM_PLANE_TYPE_OVERLAY, NULL); + if (INTEL_INFO(dev)->gen >= 9) + ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, + &intel_plane_funcs, + plane_formats, num_plane_formats, + DRM_PLANE_TYPE_OVERLAY, + "plane %d%c", plane + 2, pipe_name(pipe)); + else + ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, + &intel_plane_funcs, + plane_formats, num_plane_formats, + DRM_PLANE_TYPE_OVERLAY, + "sprite %c", sprite_name(pipe, plane)); if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 223129d3c765..7ac9e9b0e2c3 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1512,7 +1512,6 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { .mode_valid = intel_tv_mode_valid, .get_modes = intel_tv_get_modes, - .best_encoder = intel_best_encoder, }; static const struct drm_encoder_funcs intel_tv_enc_funcs = { @@ -1591,7 +1590,7 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_CONNECTOR_SVIDEO); drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, - DRM_MODE_ENCODER_TVDAC, NULL); + DRM_MODE_ENCODER_TVDAC, "TV"); intel_encoder->compute_config = intel_tv_compute_config; intel_encoder->get_config = intel_tv_get_config; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4f1dfe616856..c1ca458d688e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) return HRTIMER_NORESTART; } -void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) +void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, + bool restore) { - struct drm_i915_private *dev_priv = dev->dev_private; unsigned long irqflags; struct intel_uncore_forcewake_domain *domain; int retry_count = 100; @@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) if (fw) dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); - if (IS_GEN6(dev) || IS_GEN7(dev)) + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); } @@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) return false; } -static void __intel_uncore_early_sanitize(struct drm_device *dev, +static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, bool restore_forcewake) { - struct drm_i915_private *dev_priv = dev->dev_private; - /* clear out unclaimed reg detection bit */ if (check_for_unclaimed_mmio(dev_priv)) DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); /* clear out old GT FIFO errors */ - if (IS_GEN6(dev) || IS_GEN7(dev)) + if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) __raw_i915_write32(dev_priv, GTFIFODBG, __raw_i915_read32(dev_priv, GTFIFODBG)); /* WaDisableShadowRegForCpd:chv */ - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { __raw_i915_write32(dev_priv, GTFIFOCTL, __raw_i915_read32(dev_priv, GTFIFOCTL) | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | GT_FIFO_CTL_RC6_POLICY_STALL); } - intel_uncore_forcewake_reset(dev, restore_forcewake); + intel_uncore_forcewake_reset(dev_priv, restore_forcewake); } -void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) +void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, + bool restore_forcewake) { - __intel_uncore_early_sanitize(dev, restore_forcewake); - i915_check_and_clear_faults(dev); + __intel_uncore_early_sanitize(dev_priv, restore_forcewake); + i915_check_and_clear_faults(dev_priv); } -void intel_uncore_sanitize(struct drm_device *dev) +void intel_uncore_sanitize(struct drm_i915_private *dev_priv) { - i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); + i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); /* BIOS often leaves RC6 enabled, but disable it for hw init */ - intel_disable_gt_powersave(dev); + intel_disable_gt_powersave(dev_priv); } static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, @@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, fw_domain_reset(d); } -static void intel_uncore_fw_domains_init(struct drm_device *dev) +static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (INTEL_INFO(dev_priv)->gen <= 5) return; - if (IS_GEN9(dev)) { + if (IS_GEN9(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, @@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) FORCEWAKE_ACK_BLITTER_GEN9); fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get; - if (!IS_CHERRYVIEW(dev)) + if (!IS_CHERRYVIEW(dev_priv)) dev_priv->uncore.funcs.force_wake_put = fw_domains_put_with_fifo; else @@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); - } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_thread_status; - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) dev_priv->uncore.funcs.force_wake_put = fw_domains_put_with_fifo; else dev_priv->uncore.funcs.force_wake_put = fw_domains_put; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_ACK_HSW); - } else if (IS_IVYBRIDGE(dev)) { + } else if (IS_IVYBRIDGE(dev_priv)) { u32 ecobus; /* IVB configs may use multi-threaded forcewake */ @@ -1302,11 +1299,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); - mutex_lock(&dev->struct_mutex); fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); ecobus = __raw_i915_read32(dev_priv, ECOBUS); fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); - mutex_unlock(&dev->struct_mutex); if (!(ecobus & FORCEWAKE_MT_ENABLE)) { DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); @@ -1314,7 +1309,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } - } else if (IS_GEN6(dev)) { + } else if (IS_GEN6(dev_priv)) { dev_priv->uncore.funcs.force_wake_get = fw_domains_get_with_thread_status; dev_priv->uncore.funcs.force_wake_put = @@ -1327,26 +1322,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) WARN_ON(dev_priv->uncore.fw_domains == 0); } -void intel_uncore_init(struct drm_device *dev) +void intel_uncore_init(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - i915_check_vgpu(dev); + i915_check_vgpu(dev_priv); intel_uncore_edram_detect(dev_priv); - intel_uncore_fw_domains_init(dev); - __intel_uncore_early_sanitize(dev, false); + intel_uncore_fw_domains_init(dev_priv); + __intel_uncore_early_sanitize(dev_priv, false); dev_priv->uncore.unclaimed_mmio_check = 1; - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_INFO(dev_priv)->gen) { default: case 9: ASSIGN_WRITE_MMIO_VFUNCS(gen9); ASSIGN_READ_MMIO_VFUNCS(gen9); break; case 8: - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(chv); ASSIGN_READ_MMIO_VFUNCS(chv); @@ -1357,13 +1350,13 @@ void intel_uncore_init(struct drm_device *dev) break; case 7: case 6: - if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(hsw); } else { ASSIGN_WRITE_MMIO_VFUNCS(gen6); } - if (IS_VALLEYVIEW(dev)) { + if (IS_VALLEYVIEW(dev_priv)) { ASSIGN_READ_MMIO_VFUNCS(vlv); } else { ASSIGN_READ_MMIO_VFUNCS(gen6); @@ -1381,24 +1374,24 @@ void intel_uncore_init(struct drm_device *dev) break; } - if (intel_vgpu_active(dev)) { + if (intel_vgpu_active(dev_priv)) { ASSIGN_WRITE_MMIO_VFUNCS(vgpu); ASSIGN_READ_MMIO_VFUNCS(vgpu); } - i915_check_and_clear_faults(dev); + i915_check_and_clear_faults(dev_priv); } #undef ASSIGN_WRITE_MMIO_VFUNCS #undef ASSIGN_READ_MMIO_VFUNCS -void intel_uncore_fini(struct drm_device *dev) +void intel_uncore_fini(struct drm_i915_private *dev_priv) { /* Paranoia: make sure we have disabled everything before we exit. */ - intel_uncore_sanitize(dev); - intel_uncore_forcewake_reset(dev, false); + intel_uncore_sanitize(dev_priv); + intel_uncore_forcewake_reset(dev_priv, false); } -#define GEN_RANGE(l, h) GENMASK(h, l) +#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) static const struct register_whitelist { i915_reg_t offset_ldw, offset_udw; @@ -1423,7 +1416,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && - (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) + (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask)) break; } @@ -1467,83 +1460,47 @@ out: return ret; } -int i915_get_reset_stats_ioctl(struct drm_device *dev, - void *data, struct drm_file *file) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_reset_stats *args = data; - struct i915_ctx_hang_stats *hs; - struct intel_context *ctx; - int ret; - - if (args->flags || args->pad) - return -EINVAL; - - if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) - return -EPERM; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); - if (IS_ERR(ctx)) { - mutex_unlock(&dev->struct_mutex); - return PTR_ERR(ctx); - } - hs = &ctx->hang_stats; - - if (capable(CAP_SYS_ADMIN)) - args->reset_count = i915_reset_count(&dev_priv->gpu_error); - else - args->reset_count = 0; - - args->batch_active = hs->batch_active; - args->batch_pending = hs->batch_pending; - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - -static int i915_reset_complete(struct drm_device *dev) +static int i915_reset_complete(struct pci_dev *pdev) { u8 gdrst; - pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); + pci_read_config_byte(pdev, I915_GDRST, &gdrst); return (gdrst & GRDOM_RESET_STATUS) == 0; } -static int i915_do_reset(struct drm_device *dev, unsigned engine_mask) +static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { + struct pci_dev *pdev = dev_priv->dev->pdev; + /* assert reset for at least 20 usec */ - pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); udelay(20); - pci_write_config_byte(dev->pdev, I915_GDRST, 0); + pci_write_config_byte(pdev, I915_GDRST, 0); - return wait_for(i915_reset_complete(dev), 500); + return wait_for(i915_reset_complete(pdev), 500); } -static int g4x_reset_complete(struct drm_device *dev) +static int g4x_reset_complete(struct pci_dev *pdev) { u8 gdrst; - pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); + pci_read_config_byte(pdev, I915_GDRST, &gdrst); return (gdrst & GRDOM_RESET_ENABLE) == 0; } -static int g33_do_reset(struct drm_device *dev, unsigned engine_mask) +static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); - return wait_for(g4x_reset_complete(dev), 500); + struct pci_dev *pdev = dev_priv->dev->pdev; + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); + return wait_for(g4x_reset_complete(pdev), 500); } -static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) +static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = dev_priv->dev->pdev; int ret; - pci_write_config_byte(dev->pdev, I915_GDRST, + pci_write_config_byte(pdev, I915_GDRST, GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(dev), 500); + ret = wait_for(g4x_reset_complete(pdev), 500); if (ret) return ret; @@ -1551,9 +1508,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - pci_write_config_byte(dev->pdev, I915_GDRST, + pci_write_config_byte(pdev, I915_GDRST, GRDOM_MEDIA | GRDOM_RESET_ENABLE); - ret = wait_for(g4x_reset_complete(dev), 500); + ret = wait_for(g4x_reset_complete(pdev), 500); if (ret) return ret; @@ -1561,14 +1518,14 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - pci_write_config_byte(dev->pdev, I915_GDRST, 0); + pci_write_config_byte(pdev, I915_GDRST, 0); return 0; } -static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) +static int ironlake_do_reset(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret; I915_WRITE(ILK_GDSR, @@ -1612,7 +1569,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, /** * gen6_reset_engines - reset individual engines - * @dev: DRM device + * @dev_priv: i915 device * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset * * This function will reset the individual engines that are set in engine_mask. @@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, * * Returns 0 on success, nonzero on error. */ -static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) +static int gen6_reset_engines(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; const u32 hw_engine_mask[I915_NUM_ENGINES] = { [RCS] = GEN6_GRDOM_RENDER, @@ -1647,7 +1604,7 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) ret = gen6_hw_domain_reset(dev_priv, hw_mask); - intel_uncore_forcewake_reset(dev, true); + intel_uncore_forcewake_reset(dev_priv, true); return ret; } @@ -1663,8 +1620,8 @@ static int wait_for_register_fw(struct drm_i915_private *dev_priv, static int gen8_request_engine_reset(struct intel_engine_cs *engine) { + struct drm_i915_private *dev_priv = engine->i915; int ret; - struct drm_i915_private *dev_priv = engine->dev->dev_private; I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); @@ -1682,22 +1639,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine) static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->dev->dev_private; + struct drm_i915_private *dev_priv = engine->i915; I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); } -static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask) +static int gen8_reset_engines(struct drm_i915_private *dev_priv, + unsigned engine_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; for_each_engine_masked(engine, dev_priv, engine_mask) if (gen8_request_engine_reset(engine)) goto not_ready; - return gen6_reset_engines(dev, engine_mask); + return gen6_reset_engines(dev_priv, engine_mask); not_ready: for_each_engine_masked(engine, dev_priv, engine_mask) @@ -1706,35 +1663,35 @@ not_ready: return -EIO; } -static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *, - unsigned engine_mask) +typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); + +static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) { if (!i915.reset) return NULL; - if (INTEL_INFO(dev)->gen >= 8) + if (INTEL_INFO(dev_priv)->gen >= 8) return gen8_reset_engines; - else if (INTEL_INFO(dev)->gen >= 6) + else if (INTEL_INFO(dev_priv)->gen >= 6) return gen6_reset_engines; - else if (IS_GEN5(dev)) + else if (IS_GEN5(dev_priv)) return ironlake_do_reset; - else if (IS_G4X(dev)) + else if (IS_G4X(dev_priv)) return g4x_do_reset; - else if (IS_G33(dev)) + else if (IS_G33(dev_priv)) return g33_do_reset; - else if (INTEL_INFO(dev)->gen >= 3) + else if (INTEL_INFO(dev_priv)->gen >= 3) return i915_do_reset; else return NULL; } -int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) +int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { - struct drm_i915_private *dev_priv = to_i915(dev); - int (*reset)(struct drm_device *, unsigned); + reset_func reset; int ret; - reset = intel_get_gpu_reset(dev); + reset = intel_get_gpu_reset(dev_priv); if (reset == NULL) return -ENODEV; @@ -1742,15 +1699,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) * request may be dropped and never completes (causing -EIO). */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - ret = reset(dev, engine_mask); + ret = reset(dev_priv, engine_mask); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; } -bool intel_has_gpu_reset(struct drm_device *dev) +bool intel_has_gpu_reset(struct drm_i915_private *dev_priv) { - return intel_get_gpu_reset(dev) != NULL; + return intel_get_gpu_reset(dev_priv) != NULL; } int intel_guc_reset(struct drm_i915_private *dev_priv) @@ -1758,7 +1715,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv) int ret; unsigned long irqflags; - if (!i915.enable_guc_submission) + if (!HAS_GUC(dev_priv)) return -EINVAL; intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); @@ -1802,10 +1759,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, { enum forcewake_domains fw_domains; - if (intel_vgpu_active(dev_priv->dev)) + if (intel_vgpu_active(dev_priv)) return 0; - switch (INTEL_INFO(dev_priv)->gen) { + switch (INTEL_GEN(dev_priv)) { case 9: fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); break; @@ -1842,10 +1799,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, { enum forcewake_domains fw_domains; - if (intel_vgpu_active(dev_priv->dev)) + if (intel_vgpu_active(dev_priv)) return 0; - switch (INTEL_INFO(dev_priv)->gen) { + switch (INTEL_GEN(dev_priv)) { case 9: fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); break; diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index c15051de8023..68db9621f1f0 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -403,9 +403,10 @@ struct lvds_dvo_timing { u8 vsync_off:4; u8 rsvd0:6; u8 hsync_off_hi:2; - u8 h_image; - u8 v_image; - u8 max_hv; + u8 himage_lo; + u8 vimage_lo; + u8 vimage_hi:4; + u8 himage_hi:4; u8 h_border; u8 v_border; u8 rsvd1:3; @@ -446,10 +447,16 @@ struct bdb_lfp_backlight_data_entry { u8 obsolete3; } __packed; +struct bdb_lfp_backlight_control_method { + u8 type:4; + u8 controller:4; +} __packed; + struct bdb_lfp_backlight_data { u8 entry_size; struct bdb_lfp_backlight_data_entry data[16]; u8 level[16]; + struct bdb_lfp_backlight_control_method backlight_control[16]; } __packed; struct aimdb_header { diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 1f14b602882b..82656654fb21 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -97,8 +97,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc) return NULL; } -int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, - int hsync_pin, int vsync_pin) +int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, + int hsync_pin, int vsync_pin, u32 bus_flags) { struct imx_drm_crtc_helper_funcs *helper; struct imx_drm_crtc *imx_crtc; @@ -110,14 +110,17 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, helper = &imx_crtc->imx_drm_helper_funcs; if (helper->set_interface_pix_fmt) return helper->set_interface_pix_fmt(encoder->crtc, - bus_format, hsync_pin, vsync_pin); + bus_format, hsync_pin, vsync_pin, + bus_flags); return 0; } -EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins); +EXPORT_SYMBOL_GPL(imx_drm_set_bus_config); int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format) { - return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3); + return imx_drm_set_bus_config(encoder, bus_format, 2, 3, + DRM_BUS_FLAG_DE_HIGH | + DRM_BUS_FLAG_PIXDATA_NEGEDGE); } EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index b0241b9d1334..74320a1723b7 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h @@ -19,7 +19,8 @@ struct imx_drm_crtc_helper_funcs { int (*enable_vblank)(struct drm_crtc *crtc); void (*disable_vblank)(struct drm_crtc *crtc); int (*set_interface_pix_fmt)(struct drm_crtc *crtc, - u32 bus_format, int hsync_pin, int vsync_pin); + u32 bus_format, int hsync_pin, int vsync_pin, + u32 bus_flags); const struct drm_crtc_helper_funcs *crtc_helper_funcs; const struct drm_crtc_funcs *crtc_funcs; }; @@ -41,8 +42,8 @@ void imx_drm_mode_config_init(struct drm_device *drm); struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); -int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, - u32 bus_format, int hsync_pin, int vsync_pin); +int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, + int hsync_pin, int vsync_pin, u32 bus_flags); int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index a58eee59550a..beff793bb717 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -25,6 +25,7 @@ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/of_device.h> #include <linux/of_graph.h> +#include <video/of_display_timing.h> #include <video/of_videomode.h> #include <linux/regmap.h> #include <linux/videodev2.h> @@ -59,6 +60,7 @@ struct imx_ldb_channel { struct drm_encoder encoder; struct drm_panel *panel; struct device_node *child; + struct i2c_adapter *ddc; int chno; void *edid; int edid_len; @@ -107,6 +109,9 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector) return num_modes; } + if (!imx_ldb_ch->edid && imx_ldb_ch->ddc) + imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc); + if (imx_ldb_ch->edid) { drm_mode_connector_update_edid_property(connector, imx_ldb_ch->edid); @@ -553,7 +558,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) for_each_child_of_node(np, child) { struct imx_ldb_channel *channel; - struct device_node *port; + struct device_node *ddc_node; + struct device_node *ep; ret = of_property_read_u32(child, "reg", &i); if (ret || i < 0 || i > 1) @@ -576,33 +582,54 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) * The output port is port@4 with an external 4-port mux or * port@2 with the internal 2-port mux. */ - port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2); - if (port) { - struct device_node *endpoint, *remote; - - endpoint = of_get_child_by_name(port, "endpoint"); - if (endpoint) { - remote = of_graph_get_remote_port_parent(endpoint); - if (remote) - channel->panel = of_drm_find_panel(remote); - else - return -EPROBE_DEFER; - if (!channel->panel) { - dev_err(dev, "panel not found: %s\n", - remote->full_name); - return -EPROBE_DEFER; - } + ep = of_graph_get_endpoint_by_regs(child, + imx_ldb->lvds_mux ? 4 : 2, + -1); + if (ep) { + struct device_node *remote; + + remote = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (remote) + channel->panel = of_drm_find_panel(remote); + else + return -EPROBE_DEFER; + of_node_put(remote); + if (!channel->panel) { + dev_err(dev, "panel not found: %s\n", + remote->full_name); + return -EPROBE_DEFER; } } - edidp = of_get_property(child, "edid", &channel->edid_len); - if (edidp) { - channel->edid = kmemdup(edidp, channel->edid_len, - GFP_KERNEL); - } else if (!channel->panel) { - ret = of_get_drm_display_mode(child, &channel->mode, 0); - if (!ret) - channel->mode_valid = 1; + ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); + if (ddc_node) { + channel->ddc = of_find_i2c_adapter_by_node(ddc_node); + of_node_put(ddc_node); + if (!channel->ddc) { + dev_warn(dev, "failed to get ddc i2c adapter\n"); + return -EPROBE_DEFER; + } + } + + if (!channel->ddc) { + /* if no DDC available, fallback to hardcoded EDID */ + dev_dbg(dev, "no ddc available\n"); + + edidp = of_get_property(child, "edid", + &channel->edid_len); + if (edidp) { + channel->edid = kmemdup(edidp, + channel->edid_len, + GFP_KERNEL); + } else if (!channel->panel) { + /* fallback to display-timings node */ + ret = of_get_drm_display_mode(child, + &channel->mode, + OF_USE_NATIVE_MODE); + if (!ret) + channel->mode_valid = 1; + } } channel->bus_format = of_get_bus_format(dev, child); @@ -647,6 +674,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master, channel->encoder.funcs->destroy(&channel->encoder); kfree(channel->edid); + i2c_put_adapter(channel->ddc); } } diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index ae7a9fb3b8a2..baf788121287 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -294,8 +294,10 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder) switch (tve->mode) { case TVE_MODE_VGA: - imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24, - tve->hsync_pin, tve->vsync_pin); + imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24, + tve->hsync_pin, tve->vsync_pin, + DRM_BUS_FLAG_DE_HIGH | + DRM_BUS_FLAG_PIXDATA_NEGEDGE); break; case TVE_MODE_TVOUT: imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index b2c30b8d9816..fc040417e1e8 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -66,6 +66,7 @@ struct ipu_crtc { struct ipu_flip_work *flip_work; int irq; u32 bus_format; + u32 bus_flags; int di_hsync_pin; int di_vsync_pin; }; @@ -271,8 +272,10 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc, else sig_cfg.clkflags = 0; - sig_cfg.enable_pol = 1; - sig_cfg.clk_pol = 0; + sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW); + /* Default to driving pixel data on negative clock edges */ + sig_cfg.clk_pol = !!(ipu_crtc->bus_flags & + DRM_BUS_FLAG_PIXDATA_POSEDGE); sig_cfg.bus_format = ipu_crtc->bus_format; sig_cfg.v_to_h_sync = 0; sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; @@ -396,11 +399,12 @@ static void ipu_disable_vblank(struct drm_crtc *crtc) } static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, - u32 bus_format, int hsync_pin, int vsync_pin) + u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); ipu_crtc->bus_format = bus_format; + ipu_crtc->bus_flags = bus_flags; ipu_crtc->di_hsync_pin = hsync_pin; ipu_crtc->di_vsync_pin = vsync_pin; diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 681ec6eb77d9..a4bb44118d33 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -38,6 +38,8 @@ static const uint32_t ipu_plane_formats[] = { DRM_FORMAT_RGBX8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_BGRA8888, + DRM_FORMAT_UYVY, + DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_YUV420, @@ -428,7 +430,6 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (crtc != plane->crtc) dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", plane->crtc, crtc); - plane->crtc = crtc; if (!ipu_plane->enabled) ipu_plane_enable(ipu_plane); @@ -461,7 +462,7 @@ static void ipu_plane_destroy(struct drm_plane *plane) kfree(ipu_plane); } -static struct drm_plane_funcs ipu_plane_funcs = { +static const struct drm_plane_funcs ipu_plane_funcs = { .update_plane = ipu_update_plane, .disable_plane = ipu_disable_plane, .destroy = ipu_plane_destroy, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 363e2c7741e2..2d1fd02cd3d6 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -35,7 +35,6 @@ struct imx_parallel_display { void *edid; int edid_len; u32 bus_format; - int mode_valid; struct drm_display_mode mode; struct drm_panel *panel; }; @@ -68,17 +67,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) num_modes = drm_add_edid_modes(connector, imxpd->edid); } - if (imxpd->mode_valid) { - struct drm_display_mode *mode = drm_mode_create(connector->dev); - - if (!mode) - return -EINVAL; - drm_mode_copy(mode, &imxpd->mode); - mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, - drm_mode_probed_add(connector, mode); - num_modes++; - } - if (np) { struct drm_display_mode *mode = drm_mode_create(connector->dev); @@ -115,8 +103,8 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode) static void imx_pd_encoder_prepare(struct drm_encoder *encoder) { struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); - - imx_drm_set_bus_format(encoder, imxpd->bus_format); + imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3, + imxpd->connector.display_info.bus_flags); } static void imx_pd_encoder_commit(struct drm_encoder *encoder) @@ -203,7 +191,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct device_node *np = dev->of_node; - struct device_node *port; + struct device_node *ep; const u8 *edidp; struct imx_parallel_display *imxpd; int ret; @@ -230,18 +218,18 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) } /* port@1 is the output port */ - port = of_graph_get_port_by_id(np, 1); - if (port) { - struct device_node *endpoint, *remote; - - endpoint = of_get_child_by_name(port, "endpoint"); - if (endpoint) { - remote = of_graph_get_remote_port_parent(endpoint); - if (remote) - imxpd->panel = of_drm_find_panel(remote); - if (!imxpd->panel) - return -EPROBE_DEFER; + ep = of_graph_get_endpoint_by_regs(np, 1, -1); + if (ep) { + struct device_node *remote; + + remote = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (remote) { + imxpd->panel = of_drm_find_panel(remote); + of_node_put(remote); } + if (!imxpd->panel) + return -EPROBE_DEFER; } imxpd->dev = dev; diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index d05ca7901315..0186e500d2a5 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -432,11 +432,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, unsigned long pll_rate; unsigned int factor; - if (!dpi) { - dev_err(dpi->dev, "invalid argument\n"); - return -EINVAL; - } - pix_rate = 1000UL * mode->clock; if (mode->clock <= 74000) factor = 8 * 3; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index b1223d54d0ab..c33bf98c5d5e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -91,7 +91,7 @@ static int mtk_atomic_commit(struct drm_device *drm, mutex_lock(&private->commit.lock); flush_work(&private->commit.work); - drm_atomic_helper_swap_state(drm, state); + drm_atomic_helper_swap_state(state, true); if (async) mtk_atomic_schedule(private, state); @@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = { .enable_vblank = mtk_drm_crtc_enable_vblank, .disable_vblank = mtk_drm_crtc_disable_vblank, - .gem_free_object = mtk_drm_gem_free_object, + .gem_free_object_unlocked = mtk_drm_gem_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = mtk_drm_gem_dumb_create, .dumb_map_offset = mtk_drm_gem_dumb_map_offset, diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 2d808e59fefd..28b2044ed9f2 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -575,14 +575,6 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector) return drm_panel_get_modes(dsi->panel); } -static struct drm_encoder *mtk_dsi_connector_best_encoder( - struct drm_connector *connector) -{ - struct mtk_dsi *dsi = connector_to_dsi(connector); - - return &dsi->encoder; -} - static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = { .mode_fixup = mtk_dsi_encoder_mode_fixup, .mode_set = mtk_dsi_encoder_mode_set, @@ -603,7 +595,6 @@ static const struct drm_connector_funcs mtk_dsi_connector_funcs = { static const struct drm_connector_helper_funcs mtk_dsi_connector_helper_funcs = { .get_modes = mtk_dsi_connector_get_modes, - .best_encoder = mtk_dsi_connector_best_encoder, }; static int mtk_drm_attach_bridge(struct drm_bridge *bridge, @@ -695,10 +686,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi) { drm_encoder_cleanup(&dsi->encoder); /* Skip connector cleanup if creation was delegated to the bridge */ - if (dsi->conn.dev) { - drm_connector_unregister(&dsi->conn); + if (dsi->conn.dev) drm_connector_cleanup(&dsi->conn); - } } static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index ebb470ff7200..2b4b125eebc3 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -101,7 +101,7 @@ static struct drm_driver driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - .gem_free_object = mgag200_gem_free_object, + .gem_free_object_unlocked = mgag200_gem_free_object, .dumb_create = mgag200_dumb_create, .dumb_map_offset = mgag200_dumb_mmap_offset, .dumb_destroy = drm_gem_dumb_destroy, diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 14e64e08909e..6b21cb27e1cc 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) } } - fvv = pllreffreq * testn / testm; + fvv = pllreffreq * (n + 1) / (m + 1); fvv = (fvv - 800000) / 50000; if (fvv > 15) @@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) WREG_DAC(MGA1064_PIX_PLLC_M, m); WREG_DAC(MGA1064_PIX_PLLC_N, n); WREG_DAC(MGA1064_PIX_PLLC_P, p); + + if (mdev->unique_rev_id >= 0x04) { + WREG_DAC(0x1a, 0x09); + msleep(20); + WREG_DAC(0x1a, 0x01); + + } + return 0; } @@ -1344,19 +1352,20 @@ static void mga_crtc_commit(struct drm_crtc *crtc) * use this for 8-bit mode so can't perform smooth fades on deeper modes, * but it's a requirement that we provide the function */ -static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct mga_crtc *mga_crtc = to_mga_crtc(crtc); - int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size; int i; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { mga_crtc->lut_r[i] = red[i] >> 8; mga_crtc->lut_g[i] = green[i] >> 8; mga_crtc->lut_b[i] = blue[i] >> 8; } mga_crtc_load_lut(crtc); + + return 0; } /* Simple cleanup function */ diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c index 72360cd038c0..5960628ceb93 100644 --- a/drivers/gpu/drm/msm/edp/edp_connector.c +++ b/drivers/gpu/drm/msm/edp/edp_connector.c @@ -91,15 +91,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -edp_connector_best_encoder(struct drm_connector *connector) -{ - struct edp_connector *edp_connector = to_edp_connector(connector); - - DBG(""); - return edp_connector->edp->encoder; -} - static const struct drm_connector_funcs edp_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = edp_connector_detect, @@ -113,7 +104,6 @@ static const struct drm_connector_funcs edp_connector_funcs = { static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { .get_modes = edp_connector_get_modes, .mode_valid = edp_connector_mode_valid, - .best_encoder = edp_connector_best_encoder, }; /* initialize connector */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c index b15d72683112..a2515b466ce5 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c @@ -406,13 +406,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector, return 0; } -static struct drm_encoder * -msm_hdmi_connector_best_encoder(struct drm_connector *connector) -{ - struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector); - return hdmi_connector->hdmi->encoder; -} - static const struct drm_connector_funcs hdmi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = hdmi_connector_detect, @@ -426,7 +419,6 @@ static const struct drm_connector_funcs hdmi_connector_funcs = { static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = { .get_modes = msm_hdmi_connector_get_modes, .mode_valid = msm_hdmi_connector_mode_valid, - .best_encoder = msm_hdmi_connector_best_encoder, }; /* initialize connector */ diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 67442d50a6c2..f145d256e332 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -106,31 +106,27 @@ out: static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - int i, ncrtcs = state->dev->mode_config.num_crtc; + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; mdp4_enable(mdp4_kms); /* see 119ecb7fd */ - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - if (!crtc) - continue; + for_each_crtc_in_state(state, crtc, crtc_state, i) drm_crtc_vblank_get(crtc); - } } static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); - int i, ncrtcs = state->dev->mode_config.num_crtc; + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; /* see 119ecb7fd */ - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - if (!crtc) - continue; + for_each_crtc_in_state(state, crtc, crtc_state, i) drm_crtc_vblank_put(crtc); - } mdp4_disable(mdp4_kms); } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c index 2648cd7631ef..353429b05733 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c @@ -90,14 +90,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -mdp4_lvds_connector_best_encoder(struct drm_connector *connector) -{ - struct mdp4_lvds_connector *mdp4_lvds_connector = - to_mdp4_lvds_connector(connector); - return mdp4_lvds_connector->encoder; -} - static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = mdp4_lvds_connector_detect, @@ -111,7 +103,6 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { .get_modes = mdp4_lvds_connector_get_modes, .mode_valid = mdp4_lvds_connector_mode_valid, - .best_encoder = mdp4_lvds_connector_best_encoder, }; /* initialize connector */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 88fe256c1931..4e8ed739f558 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct plane_state pstates[STAGE_MAX + 1]; const struct mdp5_cfg_hw *hw_cfg; + const struct drm_plane_state *pstate; int cnt = 0, i; DBG("%s: check", mdp5_crtc->name); @@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, * and that we don't have conflicting mixer stages: */ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - drm_atomic_crtc_state_for_each_plane(plane, state) { - struct drm_plane_state *pstate; + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { if (cnt >= (hw_cfg->lm.nb_stages)) { dev_err(dev->dev, "too many planes!\n"); return -EINVAL; } - pstate = state->state->plane_states[drm_plane_index(plane)]; - /* plane might not have changed, in which case take - * current state: - */ - if (!pstate) - pstate = plane->state; pstates[cnt].plane = plane; pstates[cnt].state = to_mdp5_plane_state(pstate); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 484b4d15e71d..f0c285b1c027 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -78,17 +78,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s { int i; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - int nplanes = mdp5_kms->dev->mode_config.num_total_plane; - - for (i = 0; i < nplanes; i++) { - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *plane_state = state->plane_states[i]; - - if (!plane) - continue; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + for_each_plane_in_state(state, plane, plane_state, i) mdp5_plane_complete_commit(plane, plane_state); - } mdp5_disable(mdp5_kms); } diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index e3892c263f27..4a8a6f1f1151 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, struct drm_atomic_state *old_state) { struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; struct msm_drm_private *priv = old_state->dev->dev_private; struct msm_kms *kms = priv->kms; - int ncrtcs = old_state->dev->mode_config.num_crtc; int i; - for (i = 0; i < ncrtcs; i++) { - crtc = old_state->crtcs[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(old_state, crtc, crtc_state, i) { if (!crtc->state->enable) continue; @@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock) { struct msm_drm_private *priv = dev->dev_private; - int nplanes = dev->mode_config.num_total_plane; - int ncrtcs = dev->mode_config.num_crtc; struct msm_commit *c; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; int i, ret; ret = drm_atomic_helper_prepare_planes(dev, state); @@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev, /* * Figure out what crtcs we have: */ - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - if (!crtc) - continue; - c->crtc_mask |= (1 << drm_crtc_index(crtc)); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + c->crtc_mask |= drm_crtc_mask(crtc); /* * Figure out what fence to wait for: */ - for (i = 0; i < nplanes; i++) { - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *new_state = state->plane_states[i]; - - if (!plane) - continue; - - if ((plane->state->fb != new_state->fb) && new_state->fb) { - struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0); + for_each_plane_in_state(state, plane, plane_state, i) { + if ((plane->state->fb != plane_state->fb) && plane_state->fb) { + struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0); struct msm_gem_object *msm_obj = to_msm_bo(obj); - new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); + plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); } } @@ -251,7 +238,7 @@ int msm_atomic_commit(struct drm_device *dev, * the software side now. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); /* * Everything below can be run asynchronously without the need to grab diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 461dc8b873f0..7919c24c6ddd 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -56,17 +56,9 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb) kfree(msm_fb); } -static int msm_framebuffer_dirty(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned flags, unsigned color, - struct drm_clip_rect *clips, unsigned num_clips) -{ - return 0; -} - static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { .create_handle = msm_framebuffer_create_handle, .destroy = msm_framebuffer_destroy, - .dirty = msm_framebuffer_dirty, }; #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d9759bf3482e..1a061e3e8b9e 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -184,21 +184,7 @@ fail: return ret; } -static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc, - u16 red, u16 green, u16 blue, int regno) -{ - DBG("fbdev: set gamma"); -} - -static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc, - u16 *red, u16 *green, u16 *blue, int regno) -{ - DBG("fbdev: get gamma"); -} - static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { - .gamma_set = msm_crtc_fb_gamma_set, - .gamma_get = msm_crtc_fb_gamma_get, .fb_probe = msm_fbdev_create, }; diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 6f318c54da33..0cb7a18cde26 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -785,14 +785,14 @@ nv_crtc_disable(struct drm_crtc *crtc) nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); } -static void -nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, +static int +nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size) { - int end = (start + size > 256) ? 256 : start + size, i; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int i; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { nv_crtc->lut.r[i] = r[i]; nv_crtc->lut.g[i] = g[i]; nv_crtc->lut.b[i] = b[i]; @@ -805,10 +805,12 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, */ if (!nv_crtc->base.primary->fb) { nv_crtc->lut.depth = 0; - return; + return 0; } nv_crtc_gamma_load(crtc); + + return 0; } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 7c77f960c8b8..6072fe292db8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -760,12 +760,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* Initialize a page flip struct */ *s = (struct nouveau_page_flip_state) - { { }, event, nouveau_crtc(crtc)->index, - fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y, + { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0], new_bo->bo.offset }; /* Keep vblanks on during flip, for the target crtc of this flip */ - drm_vblank_get(dev, nouveau_crtc(crtc)->index); + drm_crtc_vblank_get(crtc); /* Emit a page flip */ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { @@ -810,7 +809,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, return 0; fail_unreserve: - drm_vblank_put(dev, nouveau_crtc(crtc)->index); + drm_crtc_vblank_put(crtc); ttm_bo_unreserve(&old_bo->bo); fail_unpin: mutex_unlock(&cli->mutex); @@ -842,17 +841,17 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); if (s->event) { if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { - drm_arm_vblank_event(dev, s->crtc, s->event); + drm_crtc_arm_vblank_event(s->crtc, s->event); } else { - drm_send_vblank_event(dev, s->crtc, s->event); + drm_crtc_send_vblank_event(s->crtc, s->event); /* Give up ownership of vblank for page-flipped crtc */ - drm_vblank_put(dev, s->crtc); + drm_crtc_vblank_put(s->crtc); } } else { /* Give up ownership of vblank for page-flipped crtc */ - drm_vblank_put(dev, s->crtc); + drm_crtc_vblank_put(s->crtc); } list_del(&s->head); @@ -873,9 +872,10 @@ nouveau_flip_complete(struct nvif_notify *notify) if (!nouveau_finish_page_flip(chan, &state)) { if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { - nv_set_crtc_base(drm->dev, state.crtc, state.offset + - state.y * state.pitch + - state.x * state.bpp / 8); + nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc), + state.offset + state.crtc->y * + state.pitch + state.crtc->x * + state.bpp / 8); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 24273bacd885..0420ee861ea4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -28,7 +28,8 @@ int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *, struct nouveau_page_flip_state { struct list_head head; struct drm_pending_vblank_event *event; - int crtc, bpp, pitch, x, y; + struct drm_crtc *crtc; + int bpp, pitch; u64 offset; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 11f8dd9c0edb..c00ff6e786a3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -22,13 +22,11 @@ * Authors: Ben Skeggs */ -#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> -#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include "drmP.h" @@ -315,13 +313,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, bool boot = false; int ret; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && - apple_gmux_present() && pdev != vga_default_device() && - !vga_switcheroo_handler_flags()) + if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; /* remove conflicting drivers (vesafb, efifb etc) */ @@ -970,7 +962,7 @@ driver_stub = { .gem_prime_vmap = nouveau_gem_prime_vmap, .gem_prime_vunmap = nouveau_gem_prime_vunmap, - .gem_free_object = nouveau_gem_object_del, + .gem_free_object_unlocked = nouveau_gem_object_del, .gem_open_object = nouveau_gem_object_open, .gem_close_object = nouveau_gem_object_close, diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 2e3a62d38fe9..64c4ce7115ad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -57,7 +57,8 @@ struct nouveau_fence_priv { int (*context_new)(struct nouveau_channel *); void (*context_del)(struct nouveau_channel *); - u32 contexts, context_base; + u32 contexts; + u64 context_base; bool uevent; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 675e9e077a95..08f9c6fa0f7f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -212,7 +212,6 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc) ntfy->p->base.event = &ntfy->p->e.base; ntfy->p->base.file_priv = f; ntfy->p->base.pid = current->pid; - ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree; ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF; ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 3ffc2b0057bf..7a7788212df7 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1346,21 +1346,22 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) return 0; } -static void +static int nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) + uint32_t size) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - u32 end = min_t(u32, start + size, 256); u32 i; - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { nv_crtc->lut.r[i] = r[i]; nv_crtc->lut.g[i] = g[i]; nv_crtc->lut.b[i] = b[i]; } nv50_crtc_lut_load(crtc); + + return 0; } static void diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index 73241c4eb7aa..336ad4de9981 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -2,6 +2,7 @@ config DRM_OMAP tristate "OMAP DRM" depends on DRM depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM + select OMAP2_DSS select DRM_KMS_HELPER select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig index 2a618afe0f53..c226da145fb3 100644 --- a/drivers/gpu/drm/omapdrm/displays/Kconfig +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig @@ -1,80 +1,80 @@ menu "OMAPDRM External Display Device Drivers" -config DISPLAY_ENCODER_OPA362 +config DRM_OMAP_ENCODER_OPA362 tristate "OPA362 external analog amplifier" help Driver for OPA362 external analog TV amplifier controlled through a GPIO. -config DISPLAY_ENCODER_TFP410 +config DRM_OMAP_ENCODER_TFP410 tristate "TFP410 DPI to DVI Encoder" help Driver for TFP410 DPI to DVI encoder. -config DISPLAY_ENCODER_TPD12S015 +config DRM_OMAP_ENCODER_TPD12S015 tristate "TPD12S015 HDMI ESD protection and level shifter" help Driver for TPD12S015, which offers HDMI ESD protection and level shifting. -config DISPLAY_CONNECTOR_DVI +config DRM_OMAP_CONNECTOR_DVI tristate "DVI Connector" depends on I2C help Driver for a generic DVI connector. -config DISPLAY_CONNECTOR_HDMI +config DRM_OMAP_CONNECTOR_HDMI tristate "HDMI Connector" help Driver for a generic HDMI connector. -config DISPLAY_CONNECTOR_ANALOG_TV +config DRM_OMAP_CONNECTOR_ANALOG_TV tristate "Analog TV Connector" help Driver for a generic analog TV connector. -config DISPLAY_PANEL_DPI +config DRM_OMAP_PANEL_DPI tristate "Generic DPI panel" help Driver for generic DPI panels. -config DISPLAY_PANEL_DSI_CM +config DRM_OMAP_PANEL_DSI_CM tristate "Generic DSI Command Mode Panel" depends on BACKLIGHT_CLASS_DEVICE help Driver for generic DSI command mode panels. -config DISPLAY_PANEL_SONY_ACX565AKM +config DRM_OMAP_PANEL_SONY_ACX565AKM tristate "ACX565AKM Panel" depends on SPI && BACKLIGHT_CLASS_DEVICE help This is the LCD panel used on Nokia N900 -config DISPLAY_PANEL_LGPHILIPS_LB035Q02 +config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02 tristate "LG.Philips LB035Q02 LCD Panel" depends on SPI help LCD Panel used on the Gumstix Overo Palo35 -config DISPLAY_PANEL_SHARP_LS037V7DW01 +config DRM_OMAP_PANEL_SHARP_LS037V7DW01 tristate "Sharp LS037V7DW01 LCD Panel" depends on BACKLIGHT_CLASS_DEVICE help LCD Panel used in TI's SDP3430 and EVM boards -config DISPLAY_PANEL_TPO_TD028TTEC1 +config DRM_OMAP_PANEL_TPO_TD028TTEC1 tristate "TPO TD028TTEC1 LCD Panel" depends on SPI help LCD panel used in Openmoko. -config DISPLAY_PANEL_TPO_TD043MTEA1 +config DRM_OMAP_PANEL_TPO_TD043MTEA1 tristate "TPO TD043MTEA1 LCD Panel" depends on SPI help LCD Panel used in OMAP3 Pandora -config DISPLAY_PANEL_NEC_NL8048HL11 +config DRM_OMAP_PANEL_NEC_NL8048HL11 tristate "NEC NL8048HL11 Panel" depends on SPI depends on BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile index 9aa176bfbf2e..46baafb1a83e 100644 --- a/drivers/gpu/drm/omapdrm/displays/Makefile +++ b/drivers/gpu/drm/omapdrm/displays/Makefile @@ -1,14 +1,14 @@ -obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o -obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o -obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o -obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o -obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o -obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o -obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o -obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o -obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o -obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o -obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o -obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o -obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o -obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o +obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o +obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o +obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o +obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o +obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o +obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o +obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o +obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o +obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o +obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o +obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o +obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o +obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o +obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 8511c648a15c..3485d1ecd655 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -14,9 +14,10 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + struct panel_drv_data { struct omap_dss_device dssdev; struct omap_dss_device *in; @@ -25,7 +26,6 @@ struct panel_drv_data { struct omap_video_timings timings; - enum omap_dss_venc_type connector_type; bool invert_polarity; }; @@ -45,10 +45,6 @@ static const struct omap_video_timings tvc_pal_timings = { static const struct of_device_id tvc_of_match[]; -struct tvc_of_data { - enum omap_dss_venc_type connector_type; -}; - #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) static int tvc_connect(struct omap_dss_device *dssdev) @@ -99,7 +95,7 @@ static int tvc_enable(struct omap_dss_device *dssdev) in->ops.atv->set_timings(in, &ddata->timings); if (!ddata->dev->of_node) { - in->ops.atv->set_type(in, ddata->connector_type); + in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE); in->ops.atv->invert_vid_out_polarity(in, ddata->invert_polarity); @@ -207,7 +203,6 @@ static int tvc_probe_pdata(struct platform_device *pdev) ddata->in = in; - ddata->connector_type = pdata->connector_type; ddata->invert_polarity = pdata->invert_polarity; dssdev = &ddata->dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c index 747f26a55e43..75f7827525cf 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c @@ -15,10 +15,10 @@ #include <linux/slab.h> #include <drm/drm_edid.h> - -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + static const struct omap_video_timings dvic_default_timings = { .x_res = 640, .y_res = 480, diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 225fd8d6ab31..7bdf83af9797 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -9,6 +9,7 @@ * the Free Software Foundation. */ +#include <linux/gpio/consumer.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> @@ -16,10 +17,10 @@ #include <linux/of_gpio.h> #include <drm/drm_edid.h> - -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + static const struct omap_video_timings hdmic_default_timings = { .x_res = 640, .y_res = 480, diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 8c246c213e06..fe4e7ec3bab0 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -14,13 +14,12 @@ * the Free Software Foundation. */ -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/of_gpio.h> -#include <video/omapdss.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 2fd5602880a7..d768217cefe0 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c @@ -9,14 +9,13 @@ * the Free Software Foundation. */ -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 916a89978387..46855c8f5cbf 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -16,8 +16,7 @@ #include <linux/platform_device.h> #include <linux/gpio/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index e780fd4f8b46..7f16f985ab22 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c @@ -9,17 +9,19 @@ * the Free Software Foundation. */ -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_gpio.h> +#include <linux/regulator/consumer.h> -#include <video/omapdss.h> #include <video/omap-panel-data.h> #include <video/of_display_timing.h> +#include "../dss/omapdss.h" + struct panel_drv_data { struct omap_dss_device dssdev; struct omap_dss_device *in; @@ -32,6 +34,7 @@ struct panel_drv_data { int backlight_gpio; struct gpio_desc *enable_gpio; + struct regulator *vcc_supply; }; #define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) @@ -83,6 +86,12 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev) if (r) return r; + r = regulator_enable(ddata->vcc_supply); + if (r) { + in->ops.dpi->disable(in); + return r; + } + gpiod_set_value_cansleep(ddata->enable_gpio, 1); if (gpio_is_valid(ddata->backlight_gpio)) @@ -105,6 +114,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev) gpio_set_value_cansleep(ddata->backlight_gpio, 0); gpiod_set_value_cansleep(ddata->enable_gpio, 0); + regulator_disable(ddata->vcc_supply); in->ops.dpi->disable(in); @@ -213,6 +223,20 @@ static int panel_dpi_probe_of(struct platform_device *pdev) ddata->enable_gpio = gpio; + /* + * Many different panels are supported by this driver and there are + * probably very different needs for their reset pins in regards to + * timing and order relative to the enable gpio. So for now it's just + * ensured that the reset line isn't active. + */ + gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc"); + if (IS_ERR(ddata->vcc_supply)) + return PTR_ERR(ddata->vcc_supply); + ddata->backlight_gpio = -ENOENT; r = of_get_display_timing(node, "panel-timing", &timing); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 36485c2137ce..1b0cf2d8224b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -14,7 +14,7 @@ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/fb.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/module.h> @@ -25,10 +25,10 @@ #include <linux/of_device.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> #include <video/mipi_display.h> +#include "../dss/omapdss.h" + /* DSI Virtual channel. Hardcoded for now. */ #define TCH 0 diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 458f77bc473d..6dfb96cea293 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -15,9 +15,9 @@ #include <linux/spi/spi.h> #include <linux/mutex.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include "../dss/omapdss.h" static struct omap_video_timings lb035q02_timings = { .x_res = 320, @@ -50,9 +50,6 @@ struct panel_drv_data { struct omap_video_timings videomode; - /* used for non-DT boot, to be removed */ - int backlight_gpio; - struct gpio_desc *enable_gpio; }; @@ -170,9 +167,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 1); - if (gpio_is_valid(ddata->backlight_gpio)) - gpio_set_value_cansleep(ddata->backlight_gpio, 1); - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; return 0; @@ -189,9 +183,6 @@ static void lb035q02_disable(struct omap_dss_device *dssdev) if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - if (gpio_is_valid(ddata->backlight_gpio)) - gpio_set_value_cansleep(ddata->backlight_gpio, 0); - in->ops.dpi->disable(in); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; @@ -255,8 +246,6 @@ static int lb035q02_probe_of(struct spi_device *spi) ddata->enable_gpio = gpio; - ddata->backlight_gpio = -ENOENT; - in = omapdss_of_find_source_for_first_ep(node); if (IS_ERR(in)) { dev_err(&spi->dev, "failed to find video source\n"); @@ -289,13 +278,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) if (r) return r; - if (gpio_is_valid(ddata->backlight_gpio)) { - r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio, - GPIOF_OUT_INIT_LOW, "panel backlight"); - if (r) - goto err_gpio; - } - ddata->videomode = lb035q02_timings; dssdev = &ddata->dssdev; @@ -315,7 +297,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) return 0; err_reg: -err_gpio: omap_dss_put_device(ddata->in); return r; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 780cb263a318..fc4c238c9583 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -15,10 +15,10 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/fb.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 529a017602e4..3d3efc561ea9 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -10,14 +10,14 @@ */ #include <linux/delay.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> -#include <video/omapdss.h> + +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 31efcca801bd..157c512205d1 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -29,13 +29,14 @@ #include <linux/sched.h> #include <linux/backlight.h> #include <linux/fb.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> #include <video/omap-panel-data.h> +#include "../dss/omapdss.h" + #define MIPID_CMD_READ_DISP_ID 0x04 #define MIPID_CMD_READ_RED 0x06 #define MIPID_CMD_READ_GREEN 0x07 diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index bd8d85041926..e859b3f893f7 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -28,7 +28,8 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/gpio.h> -#include <video/omapdss.h> + +#include "../dss/omapdss.h" struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 03e2beb7b4f0..66c6bbe6472b 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -14,12 +14,12 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include "../dss/omapdss.h" #define TPO_R02_MODE(x) ((x) & 7) #define TPO_R02_MODE_800x480 7 diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c index 7e4e5bebabbe..6a3ebfcd7223 100644 --- a/drivers/gpu/drm/omapdrm/dss/core.c +++ b/drivers/gpu/drm/omapdrm/dss/core.c @@ -35,8 +35,7 @@ #include <linux/suspend.h> #include <linux/slab.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -196,8 +195,6 @@ static int __init omap_dss_probe(struct platform_device *pdev) core.default_display_name = def_disp_name; else if (pdata->default_display_name) core.default_display_name = pdata->default_display_name; - else if (pdata->default_device) - core.default_display_name = pdata->default_device->name; return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index f83608b69e68..535240fba671 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -41,8 +41,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" #include "dispc.h" @@ -113,9 +112,14 @@ struct dispc_features { * never both, we can just use this flag for now. */ bool reverse_ilace_field_order:1; + + bool has_gamma_table:1; + + bool has_gamma_i734_bug:1; }; #define DISPC_MAX_NR_FIFOS 5 +#define DISPC_MAX_CHANNEL_GAMMA 4 static struct { struct platform_device *pdev; @@ -135,6 +139,8 @@ static struct { bool ctx_valid; u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; + u32 *gamma_table[DISPC_MAX_CHANNEL_GAMMA]; + const struct dispc_features *feat; bool is_enabled; @@ -178,11 +184,19 @@ struct dispc_reg_field { u8 low; }; +struct dispc_gamma_desc { + u32 len; + u32 bits; + u16 reg; + bool has_index; +}; + static const struct { const char *name; u32 vsync_irq; u32 framedone_irq; u32 sync_lost_irq; + struct dispc_gamma_desc gamma; struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM]; } mgr_desc[] = { [OMAP_DSS_CHANNEL_LCD] = { @@ -190,6 +204,12 @@ static const struct { .vsync_irq = DISPC_IRQ_VSYNC, .framedone_irq = DISPC_IRQ_FRAMEDONE, .sync_lost_irq = DISPC_IRQ_SYNC_LOST, + .gamma = { + .len = 256, + .bits = 8, + .reg = DISPC_GAMMA_TABLE0, + .has_index = true, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 }, [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 }, @@ -207,6 +227,12 @@ static const struct { .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN, .framedone_irq = DISPC_IRQ_FRAMEDONETV, .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT, + .gamma = { + .len = 1024, + .bits = 10, + .reg = DISPC_GAMMA_TABLE2, + .has_index = false, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 }, [DISPC_MGR_FLD_STNTFT] = { }, @@ -224,6 +250,12 @@ static const struct { .vsync_irq = DISPC_IRQ_VSYNC2, .framedone_irq = DISPC_IRQ_FRAMEDONE2, .sync_lost_irq = DISPC_IRQ_SYNC_LOST2, + .gamma = { + .len = 256, + .bits = 8, + .reg = DISPC_GAMMA_TABLE1, + .has_index = true, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 }, [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 }, @@ -241,6 +273,12 @@ static const struct { .vsync_irq = DISPC_IRQ_VSYNC3, .framedone_irq = DISPC_IRQ_FRAMEDONE3, .sync_lost_irq = DISPC_IRQ_SYNC_LOST3, + .gamma = { + .len = 256, + .bits = 8, + .reg = DISPC_GAMMA_TABLE3, + .has_index = true, + }, .reg_desc = { [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 }, [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 }, @@ -1084,20 +1122,6 @@ static u32 dispc_ovl_get_burst_size(enum omap_plane plane) return unit * 8; } -void dispc_enable_gamma_table(bool enable) -{ - /* - * This is partially implemented to support only disabling of - * the gamma table. - */ - if (enable) { - DSSWARN("Gamma table enabling for TV not yet supported"); - return; - } - - REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9); -} - static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable) { if (channel == OMAP_DSS_CHANNEL_DIGIT) @@ -3299,30 +3323,21 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div, static unsigned long dispc_fclk_rate(void) { - struct dss_pll *pll; - unsigned long r = 0; + unsigned long r; + enum dss_clk_source src; + + src = dss_get_dispc_clk_source(); - switch (dss_get_dispc_clk_source()) { - case OMAP_DSS_CLK_SRC_FCK: + if (src == DSS_CLK_SRC_FCK) { r = dss_get_dispc_clk_rate(); - break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi0"); - if (!pll) - pll = dss_pll_find("video0"); + } else { + struct dss_pll *pll; + unsigned clkout_idx; - r = pll->cinfo.clkout[0]; - break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi1"); - if (!pll) - pll = dss_pll_find("video1"); + pll = dss_pll_find_by_src(src); + clkout_idx = dss_pll_get_clkout_idx_for_src(src); - r = pll->cinfo.clkout[0]; - break; - default: - BUG(); - return 0; + r = pll->cinfo.clkout[clkout_idx]; } return r; @@ -3330,43 +3345,31 @@ static unsigned long dispc_fclk_rate(void) static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel) { - struct dss_pll *pll; int lcd; unsigned long r; - u32 l; - - if (dss_mgr_is_lcd(channel)) { - l = dispc_read_reg(DISPC_DIVISORo(channel)); + enum dss_clk_source src; - lcd = FLD_GET(l, 23, 16); + /* for TV, LCLK rate is the FCLK rate */ + if (!dss_mgr_is_lcd(channel)) + return dispc_fclk_rate(); - switch (dss_get_lcd_clk_source(channel)) { - case OMAP_DSS_CLK_SRC_FCK: - r = dss_get_dispc_clk_rate(); - break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi0"); - if (!pll) - pll = dss_pll_find("video0"); + src = dss_get_lcd_clk_source(channel); - r = pll->cinfo.clkout[0]; - break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - pll = dss_pll_find("dsi1"); - if (!pll) - pll = dss_pll_find("video1"); + if (src == DSS_CLK_SRC_FCK) { + r = dss_get_dispc_clk_rate(); + } else { + struct dss_pll *pll; + unsigned clkout_idx; - r = pll->cinfo.clkout[0]; - break; - default: - BUG(); - return 0; - } + pll = dss_pll_find_by_src(src); + clkout_idx = dss_pll_get_clkout_idx_for_src(src); - return r / lcd; - } else { - return dispc_fclk_rate(); + r = pll->cinfo.clkout[clkout_idx]; } + + lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16); + + return r / lcd; } static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) @@ -3426,15 +3429,14 @@ static unsigned long dispc_plane_lclk_rate(enum omap_plane plane) static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) { int lcd, pcd; - enum omap_dss_clk_source lcd_clk_src; + enum dss_clk_source lcd_clk_src; seq_printf(s, "- %s -\n", mgr_desc[channel].name); lcd_clk_src = dss_get_lcd_clk_source(channel); - seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name, - dss_get_generic_clk_source_name(lcd_clk_src), - dss_feat_get_clk_source_name(lcd_clk_src)); + seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name, + dss_get_clk_source_name(lcd_clk_src)); dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd); @@ -3448,16 +3450,15 @@ void dispc_dump_clocks(struct seq_file *s) { int lcd; u32 l; - enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); + enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); if (dispc_runtime_get()) return; seq_printf(s, "- DISPC -\n"); - seq_printf(s, "dispc fclk source = %s (%s)\n", - dss_get_generic_clk_source_name(dispc_clk_src), - dss_feat_get_clk_source_name(dispc_clk_src)); + seq_printf(s, "dispc fclk source = %s\n", + dss_get_clk_source_name(dispc_clk_src)); seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate()); @@ -3814,6 +3815,139 @@ void dispc_disable_sidle(void) REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */ } +u32 dispc_mgr_gamma_size(enum omap_channel channel) +{ + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + + if (!dispc.feat->has_gamma_table) + return 0; + + return gdesc->len; +} +EXPORT_SYMBOL(dispc_mgr_gamma_size); + +static void dispc_mgr_write_gamma_table(enum omap_channel channel) +{ + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + u32 *table = dispc.gamma_table[channel]; + unsigned int i; + + DSSDBG("%s: channel %d\n", __func__, channel); + + for (i = 0; i < gdesc->len; ++i) { + u32 v = table[i]; + + if (gdesc->has_index) + v |= i << 24; + else if (i == 0) + v |= 1 << 31; + + dispc_write_reg(gdesc->reg, v); + } +} + +static void dispc_restore_gamma_tables(void) +{ + DSSDBG("%s()\n", __func__); + + if (!dispc.feat->has_gamma_table) + return; + + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD); + + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT); + + if (dss_has_feature(FEAT_MGR_LCD2)) + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2); + + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3); +} + +static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = { + { .red = 0, .green = 0, .blue = 0, }, + { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, }, +}; + +void dispc_mgr_set_gamma(enum omap_channel channel, + const struct drm_color_lut *lut, + unsigned int length) +{ + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + u32 *table = dispc.gamma_table[channel]; + uint i; + + DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__, + channel, length, gdesc->len); + + if (!dispc.feat->has_gamma_table) + return; + + if (lut == NULL || length < 2) { + lut = dispc_mgr_gamma_default_lut; + length = ARRAY_SIZE(dispc_mgr_gamma_default_lut); + } + + for (i = 0; i < length - 1; ++i) { + uint first = i * (gdesc->len - 1) / (length - 1); + uint last = (i + 1) * (gdesc->len - 1) / (length - 1); + uint w = last - first; + u16 r, g, b; + uint j; + + if (w == 0) + continue; + + for (j = 0; j <= w; j++) { + r = (lut[i].red * (w - j) + lut[i+1].red * j) / w; + g = (lut[i].green * (w - j) + lut[i+1].green * j) / w; + b = (lut[i].blue * (w - j) + lut[i+1].blue * j) / w; + + r >>= 16 - gdesc->bits; + g >>= 16 - gdesc->bits; + b >>= 16 - gdesc->bits; + + table[first + j] = (r << (gdesc->bits * 2)) | + (g << gdesc->bits) | b; + } + } + + if (dispc.is_enabled) + dispc_mgr_write_gamma_table(channel); +} +EXPORT_SYMBOL(dispc_mgr_set_gamma); + +static int dispc_init_gamma_tables(void) +{ + int channel; + + if (!dispc.feat->has_gamma_table) + return 0; + + for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) { + const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; + u32 *gt; + + if (channel == OMAP_DSS_CHANNEL_LCD2 && + !dss_has_feature(FEAT_MGR_LCD2)) + continue; + + if (channel == OMAP_DSS_CHANNEL_LCD3 && + !dss_has_feature(FEAT_MGR_LCD3)) + continue; + + gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len, + sizeof(u32), GFP_KERNEL); + if (!gt) + return -ENOMEM; + + dispc.gamma_table[channel] = gt; + + dispc_mgr_set_gamma(channel, NULL, 0); + } + return 0; +} + static void _omap_dispc_initial_config(void) { u32 l; @@ -3829,8 +3963,15 @@ static void _omap_dispc_initial_config(void) dispc.core_clk_rate = dispc_fclk_rate(); } - /* FUNCGATED */ - if (dss_has_feature(FEAT_FUNCGATED)) + /* Use gamma table mode, instead of palette mode */ + if (dispc.feat->has_gamma_table) + REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3); + + /* For older DSS versions (FEAT_FUNCGATED) this enables + * func-clock auto-gating. For newer versions + * (dispc.feat->has_gamma_table) this enables tv-out gamma tables. + */ + if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table) REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); dispc_setup_color_conv_coef(); @@ -3934,6 +4075,8 @@ static const struct dispc_features omap44xx_dispc_feats = { .has_writeback = true, .supports_double_pixel = true, .reverse_ilace_field_order = true, + .has_gamma_table = true, + .has_gamma_i734_bug = true, }; static const struct dispc_features omap54xx_dispc_feats = { @@ -3959,6 +4102,8 @@ static const struct dispc_features omap54xx_dispc_feats = { .has_writeback = true, .supports_double_pixel = true, .reverse_ilace_field_order = true, + .has_gamma_table = true, + .has_gamma_i734_bug = true, }; static int dispc_init_features(struct platform_device *pdev) @@ -4050,6 +4195,168 @@ void dispc_free_irq(void *dev_id) } EXPORT_SYMBOL(dispc_free_irq); +/* + * Workaround for errata i734 in DSS dispc + * - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled + * + * For gamma tables to work on LCD1 the GFX plane has to be used at + * least once after DSS HW has come out of reset. The workaround + * sets up a minimal LCD setup with GFX plane and waits for one + * vertical sync irq before disabling the setup and continuing with + * the context restore. The physical outputs are gated during the + * operation. This workaround requires that gamma table's LOADMODE + * is set to 0x2 in DISPC_CONTROL1 register. + * + * For details see: + * OMAP543x Multimedia Device Silicon Revision 2.0 Silicon Errata + * Literature Number: SWPZ037E + * Or some other relevant errata document for the DSS IP version. + */ + +static const struct dispc_errata_i734_data { + struct omap_video_timings timings; + struct omap_overlay_info ovli; + struct omap_overlay_manager_info mgri; + struct dss_lcd_mgr_config lcd_conf; +} i734 = { + .timings = { + .x_res = 8, .y_res = 1, + .pixelclock = 16000000, + .hsw = 8, .hfp = 4, .hbp = 4, + .vsw = 1, .vfp = 1, .vbp = 1, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .interlace = false, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .double_pixel = false, + }, + .ovli = { + .screen_width = 1, + .width = 1, .height = 1, + .color_mode = OMAP_DSS_COLOR_RGB24U, + .rotation = OMAP_DSS_ROT_0, + .rotation_type = OMAP_DSS_ROT_DMA, + .mirror = 0, + .pos_x = 0, .pos_y = 0, + .out_width = 0, .out_height = 0, + .global_alpha = 0xff, + .pre_mult_alpha = 0, + .zorder = 0, + }, + .mgri = { + .default_color = 0, + .trans_enabled = false, + .partial_alpha_enabled = false, + .cpr_enable = false, + }, + .lcd_conf = { + .io_pad_mode = DSS_IO_PAD_MODE_BYPASS, + .stallmode = false, + .fifohandcheck = false, + .clock_info = { + .lck_div = 1, + .pck_div = 2, + }, + .video_port_width = 24, + .lcden_sig_polarity = 0, + }, +}; + +static struct i734_buf { + size_t size; + dma_addr_t paddr; + void *vaddr; +} i734_buf; + +static int dispc_errata_i734_wa_init(void) +{ + if (!dispc.feat->has_gamma_i734_bug) + return 0; + + i734_buf.size = i734.ovli.width * i734.ovli.height * + color_mode_to_bpp(i734.ovli.color_mode) / 8; + + i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size, + &i734_buf.paddr, GFP_KERNEL); + if (!i734_buf.vaddr) { + dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed", + __func__); + return -ENOMEM; + } + + return 0; +} + +static void dispc_errata_i734_wa_fini(void) +{ + if (!dispc.feat->has_gamma_i734_bug) + return; + + dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr, + i734_buf.paddr); +} + +static void dispc_errata_i734_wa(void) +{ + u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD); + struct omap_overlay_info ovli; + struct dss_lcd_mgr_config lcd_conf; + u32 gatestate; + unsigned int count; + + if (!dispc.feat->has_gamma_i734_bug) + return; + + gatestate = REG_GET(DISPC_CONFIG, 8, 4); + + ovli = i734.ovli; + ovli.paddr = i734_buf.paddr; + lcd_conf = i734.lcd_conf; + + /* Gate all LCD1 outputs */ + REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4); + + /* Setup and enable GFX plane */ + dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD); + dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false); + dispc_ovl_enable(OMAP_DSS_GFX, true); + + /* Set up and enable display manager for LCD1 */ + dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri); + dispc_calc_clock_rates(dss_get_dispc_clk_rate(), + &lcd_conf.clock_info); + dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf); + dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings); + + dispc_clear_irqstatus(framedone_irq); + + /* Enable and shut the channel to produce just one frame */ + dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true); + dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false); + + /* Busy wait for framedone. We can't fiddle with irq handlers + * in PM resume. Typically the loop runs less than 5 times and + * waits less than a micro second. + */ + count = 0; + while (!(dispc_read_irqstatus() & framedone_irq)) { + if (count++ > 10000) { + dev_err(&dispc.pdev->dev, "%s: framedone timeout\n", + __func__); + break; + } + } + dispc_ovl_enable(OMAP_DSS_GFX, false); + + /* Clear all irq bits before continuing */ + dispc_clear_irqstatus(0xffffffff); + + /* Restore the original state to LCD1 output gates */ + REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4); +} + /* DISPC HW IP initialisation */ static int dispc_bind(struct device *dev, struct device *master, void *data) { @@ -4067,6 +4374,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) if (r) return r; + r = dispc_errata_i734_wa_init(); + if (r) + return r; + dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0); if (!dispc_mem) { DSSERR("can't get IORESOURCE_MEM DISPC\n"); @@ -4100,6 +4411,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) } } + r = dispc_init_gamma_tables(); + if (r) + return r; + pm_runtime_enable(&pdev->dev); r = dispc_runtime_get(); @@ -4127,6 +4442,8 @@ static void dispc_unbind(struct device *dev, struct device *master, void *data) { pm_runtime_disable(dev); + + dispc_errata_i734_wa_fini(); } static const struct component_ops dispc_component_ops = { @@ -4169,7 +4486,11 @@ static int dispc_runtime_resume(struct device *dev) if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) { _omap_dispc_initial_config(); + dispc_errata_i734_wa(); + dispc_restore_context(); + + dispc_restore_gamma_tables(); } dispc.is_enabled = true; diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h index 483744223dd1..bc1d8126ee87 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.h +++ b/drivers/gpu/drm/omapdrm/dss/dispc.h @@ -42,6 +42,11 @@ #define DISPC_MSTANDBY_CTRL 0x0858 #define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C +#define DISPC_GAMMA_TABLE0 0x0630 +#define DISPC_GAMMA_TABLE1 0x0634 +#define DISPC_GAMMA_TABLE2 0x0638 +#define DISPC_GAMMA_TABLE3 0x0850 + /* DISPC overlay registers */ #define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ DISPC_BA0_OFFSET(n)) diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c index 038c15b04215..34fad2376f8d 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c @@ -18,8 +18,8 @@ */ #include <linux/kernel.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dispc.h" static const struct dispc_coef coef3_M8[8] = { diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 9f3dd09b0a6c..8dcdd7cf9937 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -28,7 +28,7 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" #include "dss_features.h" diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 97ea60257884..b268295b76cf 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -34,17 +34,15 @@ #include <linux/clk.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" -#define HSDIV_DISPC 0 - struct dpi_data { struct platform_device *pdev; struct regulator *vdds_dsi_reg; + enum dss_clk_source clk_src; struct dss_pll *pll; struct mutex lock; @@ -69,7 +67,7 @@ static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev) return dev_get_drvdata(&pdev->dev); } -static struct dss_pll *dpi_get_pll(enum omap_channel channel) +static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel) { /* * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL @@ -83,64 +81,51 @@ static struct dss_pll *dpi_get_pll(enum omap_channel channel) case OMAPDSS_VER_OMAP3630: case OMAPDSS_VER_AM35xx: case OMAPDSS_VER_AM43xx: - return NULL; + return DSS_CLK_SRC_FCK; case OMAPDSS_VER_OMAP4430_ES1: case OMAPDSS_VER_OMAP4430_ES2: case OMAPDSS_VER_OMAP4: switch (channel) { case OMAP_DSS_CHANNEL_LCD: - return dss_pll_find("dsi0"); + return DSS_CLK_SRC_PLL1_1; case OMAP_DSS_CHANNEL_LCD2: - return dss_pll_find("dsi1"); + return DSS_CLK_SRC_PLL2_1; default: - return NULL; + return DSS_CLK_SRC_FCK; } case OMAPDSS_VER_OMAP5: switch (channel) { case OMAP_DSS_CHANNEL_LCD: - return dss_pll_find("dsi0"); + return DSS_CLK_SRC_PLL1_1; case OMAP_DSS_CHANNEL_LCD3: - return dss_pll_find("dsi1"); + return DSS_CLK_SRC_PLL2_1; + case OMAP_DSS_CHANNEL_LCD2: default: - return NULL; + return DSS_CLK_SRC_FCK; } case OMAPDSS_VER_DRA7xx: switch (channel) { case OMAP_DSS_CHANNEL_LCD: + return DSS_CLK_SRC_PLL1_1; case OMAP_DSS_CHANNEL_LCD2: - return dss_pll_find("video0"); + return DSS_CLK_SRC_PLL1_3; case OMAP_DSS_CHANNEL_LCD3: - return dss_pll_find("video1"); + return DSS_CLK_SRC_PLL2_1; default: - return NULL; + return DSS_CLK_SRC_FCK; } default: - return NULL; - } -} - -static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel) -{ - switch (channel) { - case OMAP_DSS_CHANNEL_LCD: - return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC; - case OMAP_DSS_CHANNEL_LCD2: - return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC; - case OMAP_DSS_CHANNEL_LCD3: - return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC; - default: - /* this shouldn't happen */ - WARN_ON(1); - return OMAP_DSS_CLK_SRC_FCK; + return DSS_CLK_SRC_FCK; } } struct dpi_clk_calc_ctx { struct dss_pll *pll; + unsigned clkout_idx; /* inputs */ @@ -148,7 +133,7 @@ struct dpi_clk_calc_ctx { /* outputs */ - struct dss_pll_clock_info dsi_cinfo; + struct dss_pll_clock_info pll_cinfo; unsigned long fck; struct dispc_clock_info dispc_cinfo; }; @@ -193,8 +178,8 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc, if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000) return false; - ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; - ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; + ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc; + ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc; return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max, dpi_calc_dispc_cb, ctx); @@ -207,12 +192,12 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint, { struct dpi_clk_calc_ctx *ctx = data; - ctx->dsi_cinfo.n = n; - ctx->dsi_cinfo.m = m; - ctx->dsi_cinfo.fint = fint; - ctx->dsi_cinfo.clkdco = clkdco; + ctx->pll_cinfo.n = n; + ctx->pll_cinfo.m = m; + ctx->pll_cinfo.fint = fint; + ctx->pll_cinfo.clkdco = clkdco; - return dss_pll_hsdiv_calc(ctx->pll, clkdco, + return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), dpi_calc_hsdiv_cb, ctx); } @@ -227,25 +212,39 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data) dpi_calc_dispc_cb, ctx); } -static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck, +static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck, struct dpi_clk_calc_ctx *ctx) { unsigned long clkin; - unsigned long pll_min, pll_max; memset(ctx, 0, sizeof(*ctx)); ctx->pll = dpi->pll; - ctx->pck_min = pck - 1000; - ctx->pck_max = pck + 1000; + ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src); - pll_min = 0; - pll_max = 0; + clkin = clk_get_rate(dpi->pll->clkin); - clkin = clk_get_rate(ctx->pll->clkin); + if (dpi->pll->hw->type == DSS_PLL_TYPE_A) { + unsigned long pll_min, pll_max; - return dss_pll_calc(ctx->pll, clkin, - pll_min, pll_max, - dpi_calc_pll_cb, ctx); + ctx->pck_min = pck - 1000; + ctx->pck_max = pck + 1000; + + pll_min = 0; + pll_max = 0; + + return dss_pll_calc_a(ctx->pll, clkin, + pll_min, pll_max, + dpi_calc_pll_cb, ctx); + } else { /* DSS_PLL_TYPE_B */ + dss_pll_calc_b(dpi->pll, clkin, pck, &ctx->pll_cinfo); + + ctx->dispc_cinfo.lck_div = 1; + ctx->dispc_cinfo.pck_div = 1; + ctx->dispc_cinfo.lck = ctx->pll_cinfo.clkout[0]; + ctx->dispc_cinfo.pck = ctx->dispc_cinfo.lck; + + return true; + } } static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) @@ -279,7 +278,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) -static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel, +static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) { @@ -287,20 +286,19 @@ static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel, int r; bool ok; - ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx); + ok = dpi_pll_clk_calc(dpi, pck_req, &ctx); if (!ok) return -EINVAL; - r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo); + r = dss_pll_set_config(dpi->pll, &ctx.pll_cinfo); if (r) return r; - dss_select_lcd_clk_source(channel, - dpi_get_alt_clk_src(channel)); + dss_select_lcd_clk_source(channel, dpi->clk_src); dpi->mgr_config.clock_info = ctx.dispc_cinfo; - *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; + *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx]; *lck_div = ctx.dispc_cinfo.lck_div; *pck_div = ctx.dispc_cinfo.pck_div; @@ -342,7 +340,7 @@ static int dpi_set_mode(struct dpi_data *dpi) int r = 0; if (dpi->pll) - r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck, + r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck, &lck_div, &pck_div); else r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck, @@ -419,7 +417,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev) if (dpi->pll) { r = dss_pll_enable(dpi->pll); if (r) - goto err_dsi_pll_init; + goto err_pll_init; } r = dpi_set_mode(dpi); @@ -442,7 +440,7 @@ err_mgr_enable: err_set_mode: if (dpi->pll) dss_pll_disable(dpi->pll); -err_dsi_pll_init: +err_pll_init: err_src_sel: dispc_runtime_put(); err_get_dispc: @@ -465,7 +463,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev) dss_mgr_disable(channel); if (dpi->pll) { - dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK); dss_pll_disable(dpi->pll); } @@ -524,11 +522,11 @@ static int dpi_check_timings(struct omap_dss_device *dssdev, return -EINVAL; if (dpi->pll) { - ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx); + ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx); if (!ok) return -EINVAL; - fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; + fck = ctx.pll_cinfo.clkout[ctx.clkout_idx]; } else { ok = dpi_dss_clk_calc(timings->pixelclock, &ctx); if (!ok) @@ -558,7 +556,7 @@ static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines) mutex_unlock(&dpi->lock); } -static int dpi_verify_dsi_pll(struct dss_pll *pll) +static int dpi_verify_pll(struct dss_pll *pll) { int r; @@ -602,16 +600,14 @@ static void dpi_init_pll(struct dpi_data *dpi) if (dpi->pll) return; - pll = dpi_get_pll(dpi->output.dispc_channel); + dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel); + + pll = dss_pll_find_by_src(dpi->clk_src); if (!pll) return; - /* On DRA7 we need to set a mux to use the PLL */ - if (omapdss_get_version() == OMAPDSS_VER_DRA7xx) - dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel); - - if (dpi_verify_dsi_pll(pll)) { - DSSWARN("DSI PLL not operational\n"); + if (dpi_verify_pll(pll)) { + DSSWARN("PLL not operational\n"); return; } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 8730646a0cbb..6f45e9d00b41 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -42,9 +42,9 @@ #include <linux/of_platform.h> #include <linux/component.h> -#include <video/omapdss.h> #include <video/mipi_display.h> +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -1180,15 +1180,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) return PTR_ERR(vdds_dsi); } - if (regulator_can_change_voltage(vdds_dsi)) { - r = regulator_set_voltage(vdds_dsi, 1800000, 1800000); - if (r) { - devm_regulator_put(vdds_dsi); - DSSERR("can't set the DSI regulator voltage\n"); - return r; - } - } - dsi->vdds_dsi_reg = vdds_dsi; return 0; @@ -1271,7 +1262,7 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev) unsigned long r; struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); - if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) { + if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) { /* DSI FCLK source is DSS_CLK_FCK */ r = clk_get_rate(dsi->dss_clk); } else { @@ -1484,7 +1475,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; - enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; + enum dss_clk_source dispc_clk_src, dsi_clk_src; int dsi_module = dsi->module_id; struct dss_pll *pll = &dsi->pll; @@ -1504,28 +1495,27 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, cinfo->clkdco, cinfo->m); seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n", - dss_feat_get_clk_source_name(dsi_module == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC), + dss_get_clk_source_name(dsi_module == 0 ? + DSS_CLK_SRC_PLL1_1 : + DSS_CLK_SRC_PLL2_1), cinfo->clkout[HSDIV_DISPC], cinfo->mX[HSDIV_DISPC], - dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ? + dispc_clk_src == DSS_CLK_SRC_FCK ? "off" : "on"); seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n", - dss_feat_get_clk_source_name(dsi_module == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI), + dss_get_clk_source_name(dsi_module == 0 ? + DSS_CLK_SRC_PLL1_2 : + DSS_CLK_SRC_PLL2_2), cinfo->clkout[HSDIV_DSI], cinfo->mX[HSDIV_DSI], - dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ? + dsi_clk_src == DSS_CLK_SRC_FCK ? "off" : "on"); seq_printf(s, "- DSI%d -\n", dsi_module + 1); - seq_printf(s, "dsi fclk source = %s (%s)\n", - dss_get_generic_clk_source_name(dsi_clk_src), - dss_feat_get_clk_source_name(dsi_clk_src)); + seq_printf(s, "dsi fclk source = %s\n", + dss_get_clk_source_name(dsi_clk_src)); seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); @@ -4111,8 +4101,8 @@ static int dsi_display_init_dispc(struct platform_device *dsidev, int r; dss_select_lcd_clk_source(channel, dsi->module_id == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC); + DSS_CLK_SRC_PLL1_1 : + DSS_CLK_SRC_PLL2_1); if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) { r = dss_mgr_register_framedone_handler(channel, @@ -4159,7 +4149,7 @@ err1: dss_mgr_unregister_framedone_handler(channel, dsi_framedone_irq_callback, dsidev); err: - dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK); return r; } @@ -4172,7 +4162,7 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev, dss_mgr_unregister_framedone_handler(channel, dsi_framedone_irq_callback, dsidev); - dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK); } static int dsi_configure_dsi_clocks(struct platform_device *dsidev) @@ -4206,8 +4196,8 @@ static int dsi_display_init_dsi(struct platform_device *dsidev) goto err1; dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ? - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI); + DSS_CLK_SRC_PLL1_2 : + DSS_CLK_SRC_PLL2_2); DSSDBG("PLL OK\n"); @@ -4239,7 +4229,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev) err3: dsi_cio_uninit(dsidev); err2: - dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); + dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK); err1: dss_pll_disable(&dsi->pll); err0: @@ -4261,7 +4251,7 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev, dsi_vc_enable(dsidev, 2, 0); dsi_vc_enable(dsidev, 3, 0); - dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); + dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK); dsi_cio_uninit(dsidev); dsi_pll_uninit(dsidev, disconnect_lanes); } @@ -4462,7 +4452,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint, ctx->dsi_cinfo.fint = fint; ctx->dsi_cinfo.clkdco = clkdco; - return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, + return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), dsi_cm_calc_hsdiv_cb, ctx); } @@ -4501,7 +4491,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi, pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4); pll_max = cfg->hs_clk_max * 4; - return dss_pll_calc(ctx->pll, clkin, + return dss_pll_calc_a(ctx->pll, clkin, pll_min, pll_max, dsi_cm_calc_pll_cb, ctx); } @@ -4760,7 +4750,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint, ctx->dsi_cinfo.fint = fint; ctx->dsi_cinfo.clkdco = clkdco; - return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, + return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), dsi_vm_calc_hsdiv_cb, ctx); } @@ -4802,7 +4792,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi, pll_max = byteclk_max * 4 * 4; } - return dss_pll_calc(ctx->pll, clkin, + return dss_pll_calc_a(ctx->pll, clkin, pll_min, pll_max, dsi_vm_calc_pll_cb, ctx); } @@ -5148,6 +5138,8 @@ static const struct dss_pll_ops dsi_pll_ops = { }; static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 7) - 1, .m_max = (1 << 11) - 1, .mX_max = (1 << 4) - 1, @@ -5173,6 +5165,8 @@ static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { }; static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 8) - 1, .m_max = (1 << 12) - 1, .mX_max = (1 << 5) - 1, @@ -5198,6 +5192,8 @@ static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { }; static const struct dss_pll_hw dss_omap5_dsi_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 8) - 1, .m_max = (1 << 12) - 1, .mX_max = (1 << 5) - 1, diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c index bf407b6ba15c..dfd4e9621e3b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss-of.c +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -18,8 +18,7 @@ #include <linux/of.h> #include <linux/seq_file.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" struct device_node * diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index f95ff319e68e..14887d5b02e5 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -30,6 +30,7 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/clk.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/gfp.h> @@ -41,8 +42,7 @@ #include <linux/suspend.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -75,6 +75,8 @@ struct dss_features { const enum omap_display_type *ports; int num_ports; int (*dpi_select_source)(int port, enum omap_channel channel); + int (*select_lcd_source)(enum omap_channel channel, + enum dss_clk_source clk_src); }; static struct { @@ -91,9 +93,9 @@ static struct { unsigned long cache_prate; struct dispc_clock_info cache_dispc_cinfo; - enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI]; - enum omap_dss_clk_source dispc_clk_source; - enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; + enum dss_clk_source dsi_clk_source[MAX_NUM_DSI]; + enum dss_clk_source dispc_clk_source; + enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; bool ctx_valid; u32 ctx[DSS_SZ_REGS / sizeof(u32)]; @@ -105,11 +107,14 @@ static struct { } dss; static const char * const dss_generic_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI", + [DSS_CLK_SRC_FCK] = "FCK", + [DSS_CLK_SRC_PLL1_1] = "PLL1:1", + [DSS_CLK_SRC_PLL1_2] = "PLL1:2", + [DSS_CLK_SRC_PLL1_3] = "PLL1:3", + [DSS_CLK_SRC_PLL2_1] = "PLL2:1", + [DSS_CLK_SRC_PLL2_2] = "PLL2:2", + [DSS_CLK_SRC_PLL2_3] = "PLL2:3", + [DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL", }; static bool dss_initialized; @@ -202,68 +207,70 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable) 1 << shift, val << shift); } -void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, +static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src, enum omap_channel channel) { unsigned shift, val; if (!dss.syscon_pll_ctrl) - return; + return -EINVAL; switch (channel) { case OMAP_DSS_CHANNEL_LCD: shift = 3; - switch (pll_id) { - case DSS_PLL_VIDEO1: + switch (clk_src) { + case DSS_CLK_SRC_PLL1_1: val = 0; break; - case DSS_PLL_HDMI: + case DSS_CLK_SRC_HDMI_PLL: val = 1; break; default: DSSERR("error in PLL mux config for LCD\n"); - return; + return -EINVAL; } break; case OMAP_DSS_CHANNEL_LCD2: shift = 5; - switch (pll_id) { - case DSS_PLL_VIDEO1: + switch (clk_src) { + case DSS_CLK_SRC_PLL1_3: val = 0; break; - case DSS_PLL_VIDEO2: + case DSS_CLK_SRC_PLL2_3: val = 1; break; - case DSS_PLL_HDMI: + case DSS_CLK_SRC_HDMI_PLL: val = 2; break; default: DSSERR("error in PLL mux config for LCD2\n"); - return; + return -EINVAL; } break; case OMAP_DSS_CHANNEL_LCD3: shift = 7; - switch (pll_id) { - case DSS_PLL_VIDEO1: - val = 1; break; - case DSS_PLL_VIDEO2: + switch (clk_src) { + case DSS_CLK_SRC_PLL2_1: val = 0; break; - case DSS_PLL_HDMI: + case DSS_CLK_SRC_PLL1_3: + val = 1; break; + case DSS_CLK_SRC_HDMI_PLL: val = 2; break; default: DSSERR("error in PLL mux config for LCD3\n"); - return; + return -EINVAL; } break; default: DSSERR("error in PLL mux config\n"); - return; + return -EINVAL; } regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset, 0x3 << shift, val << shift); + + return 0; } void dss_sdi_init(int datapairs) @@ -353,14 +360,14 @@ void dss_sdi_disable(void) REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ } -const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src) +const char *dss_get_clk_source_name(enum dss_clk_source clk_src) { return dss_generic_clk_source_names[clk_src]; } void dss_dump_clocks(struct seq_file *s) { - const char *fclk_name, *fclk_real_name; + const char *fclk_name; unsigned long fclk_rate; if (dss_runtime_get()) @@ -368,12 +375,11 @@ void dss_dump_clocks(struct seq_file *s) seq_printf(s, "- DSS -\n"); - fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); - fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK); + fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK); fclk_rate = clk_get_rate(dss.dss_clk); - seq_printf(s, "%s (%s) = %lu\n", - fclk_name, fclk_real_name, + seq_printf(s, "%s = %lu\n", + fclk_name, fclk_rate); dss_runtime_put(); @@ -402,19 +408,42 @@ static void dss_dump_regs(struct seq_file *s) #undef DUMPREG } -static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) +static int dss_get_channel_index(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0; + case OMAP_DSS_CHANNEL_LCD2: + return 1; + case OMAP_DSS_CHANNEL_LCD3: + return 2; + default: + WARN_ON(1); + return 0; + } +} + +static void dss_select_dispc_clk_source(enum dss_clk_source clk_src) { int b; u8 start, end; + /* + * We always use PRCM clock as the DISPC func clock, except on DSS3, + * where we don't have separate DISPC and LCD clock sources. + */ + if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) && + clk_src != DSS_CLK_SRC_FCK)) + return; + switch (clk_src) { - case OMAP_DSS_CLK_SRC_FCK: + case DSS_CLK_SRC_FCK: b = 0; break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + case DSS_CLK_SRC_PLL1_1: b = 1; break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + case DSS_CLK_SRC_PLL2_1: b = 2; break; default: @@ -430,19 +459,19 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) } void dss_select_dsi_clk_source(int dsi_module, - enum omap_dss_clk_source clk_src) + enum dss_clk_source clk_src) { int b, pos; switch (clk_src) { - case OMAP_DSS_CLK_SRC_FCK: + case DSS_CLK_SRC_FCK: b = 0; break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: + case DSS_CLK_SRC_PLL1_2: BUG_ON(dsi_module != 0); b = 1; break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI: + case DSS_CLK_SRC_PLL2_2: BUG_ON(dsi_module != 1); b = 1; break; @@ -457,59 +486,125 @@ void dss_select_dsi_clk_source(int dsi_module, dss.dsi_clk_source[dsi_module] = clk_src; } +static int dss_lcd_clk_mux_dra7(enum omap_channel channel, + enum dss_clk_source clk_src) +{ + const u8 ctrl_bits[] = { + [OMAP_DSS_CHANNEL_LCD] = 0, + [OMAP_DSS_CHANNEL_LCD2] = 12, + [OMAP_DSS_CHANNEL_LCD3] = 19, + }; + + u8 ctrl_bit = ctrl_bits[channel]; + int r; + + if (clk_src == DSS_CLK_SRC_FCK) { + /* LCDx_CLK_SWITCH */ + REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit); + return -EINVAL; + } + + r = dss_ctrl_pll_set_control_mux(clk_src, channel); + if (r) + return r; + + REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit); + + return 0; +} + +static int dss_lcd_clk_mux_omap5(enum omap_channel channel, + enum dss_clk_source clk_src) +{ + const u8 ctrl_bits[] = { + [OMAP_DSS_CHANNEL_LCD] = 0, + [OMAP_DSS_CHANNEL_LCD2] = 12, + [OMAP_DSS_CHANNEL_LCD3] = 19, + }; + const enum dss_clk_source allowed_plls[] = { + [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1, + [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_FCK, + [OMAP_DSS_CHANNEL_LCD3] = DSS_CLK_SRC_PLL2_1, + }; + + u8 ctrl_bit = ctrl_bits[channel]; + + if (clk_src == DSS_CLK_SRC_FCK) { + /* LCDx_CLK_SWITCH */ + REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit); + return -EINVAL; + } + + if (WARN_ON(allowed_plls[channel] != clk_src)) + return -EINVAL; + + REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit); + + return 0; +} + +static int dss_lcd_clk_mux_omap4(enum omap_channel channel, + enum dss_clk_source clk_src) +{ + const u8 ctrl_bits[] = { + [OMAP_DSS_CHANNEL_LCD] = 0, + [OMAP_DSS_CHANNEL_LCD2] = 12, + }; + const enum dss_clk_source allowed_plls[] = { + [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1, + [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_PLL2_1, + }; + + u8 ctrl_bit = ctrl_bits[channel]; + + if (clk_src == DSS_CLK_SRC_FCK) { + /* LCDx_CLK_SWITCH */ + REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit); + return 0; + } + + if (WARN_ON(allowed_plls[channel] != clk_src)) + return -EINVAL; + + REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit); + + return 0; +} + void dss_select_lcd_clk_source(enum omap_channel channel, - enum omap_dss_clk_source clk_src) + enum dss_clk_source clk_src) { - int b, ix, pos; + int idx = dss_get_channel_index(channel); + int r; if (!dss_has_feature(FEAT_LCD_CLK_SRC)) { dss_select_dispc_clk_source(clk_src); + dss.lcd_clk_source[idx] = clk_src; return; } - switch (clk_src) { - case OMAP_DSS_CLK_SRC_FCK: - b = 0; - break; - case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - BUG_ON(channel != OMAP_DSS_CHANNEL_LCD); - b = 1; - break; - case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 && - channel != OMAP_DSS_CHANNEL_LCD3); - b = 1; - break; - default: - BUG(); + r = dss.feat->select_lcd_source(channel, clk_src); + if (r) return; - } - - pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : - (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19); - REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */ - ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : - (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); - dss.lcd_clk_source[ix] = clk_src; + dss.lcd_clk_source[idx] = clk_src; } -enum omap_dss_clk_source dss_get_dispc_clk_source(void) +enum dss_clk_source dss_get_dispc_clk_source(void) { return dss.dispc_clk_source; } -enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) +enum dss_clk_source dss_get_dsi_clk_source(int dsi_module) { return dss.dsi_clk_source[dsi_module]; } -enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) +enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) { if (dss_has_feature(FEAT_LCD_CLK_SRC)) { - int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : - (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); - return dss.lcd_clk_source[ix]; + int idx = dss_get_channel_index(channel); + return dss.lcd_clk_source[idx]; } else { /* LCD_CLK source is the same as DISPC_FCLK source for * OMAP2 and OMAP3 */ @@ -858,6 +953,7 @@ static const struct dss_features omap44xx_dss_feats = { .dpi_select_source = &dss_dpi_select_source_omap4, .ports = omap2plus_ports, .num_ports = ARRAY_SIZE(omap2plus_ports), + .select_lcd_source = &dss_lcd_clk_mux_omap4, }; static const struct dss_features omap54xx_dss_feats = { @@ -867,6 +963,7 @@ static const struct dss_features omap54xx_dss_feats = { .dpi_select_source = &dss_dpi_select_source_omap5, .ports = omap2plus_ports, .num_ports = ARRAY_SIZE(omap2plus_ports), + .select_lcd_source = &dss_lcd_clk_mux_omap5, }; static const struct dss_features am43xx_dss_feats = { @@ -885,6 +982,7 @@ static const struct dss_features dra7xx_dss_feats = { .dpi_select_source = &dss_dpi_select_source_dra7xx, .ports = dra7xx_ports, .num_ports = ARRAY_SIZE(dra7xx_ports), + .select_lcd_source = &dss_lcd_clk_mux_dra7, }; static int dss_init_features(struct platform_device *pdev) @@ -1142,18 +1240,18 @@ static int dss_bind(struct device *dev) /* Select DPLL */ REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); - dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); + dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); #ifdef CONFIG_OMAP2_DSS_VENC REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ #endif - dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; - dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; - dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; - dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; - dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; + dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK; + dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK; + dss.dispc_clk_source = DSS_CLK_SRC_FCK; + dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; + dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; rev = dss_read_reg(DSS_REVISION); printk(KERN_INFO "OMAP DSS rev %d.%d\n", diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 38e6ab50142d..4fd06dc41cb3 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -102,6 +102,20 @@ enum dss_writeback_channel { DSS_WB_LCD3_MGR = 7, }; +enum dss_clk_source { + DSS_CLK_SRC_FCK = 0, + + DSS_CLK_SRC_PLL1_1, + DSS_CLK_SRC_PLL1_2, + DSS_CLK_SRC_PLL1_3, + + DSS_CLK_SRC_PLL2_1, + DSS_CLK_SRC_PLL2_2, + DSS_CLK_SRC_PLL2_3, + + DSS_CLK_SRC_HDMI_PLL, +}; + enum dss_pll_id { DSS_PLL_DSI1, DSS_PLL_DSI2, @@ -114,6 +128,11 @@ struct dss_pll; #define DSS_PLL_MAX_HSDIVS 4 +enum dss_pll_type { + DSS_PLL_TYPE_A, + DSS_PLL_TYPE_B, +}; + /* * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7. * Type-B PLLs: clkout[0] refers to m2. @@ -140,6 +159,8 @@ struct dss_pll_ops { }; struct dss_pll_hw { + enum dss_pll_type type; + unsigned n_max; unsigned m_min; unsigned m_max; @@ -227,7 +248,7 @@ unsigned long dss_get_dispc_clk_rate(void); int dss_dpi_select_source(int port, enum omap_channel channel); void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); -const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); +const char *dss_get_clk_source_name(enum dss_clk_source clk_src); void dss_dump_clocks(struct seq_file *s); /* DSS VIDEO PLL */ @@ -244,20 +265,18 @@ void dss_debug_dump_clocks(struct seq_file *s); #endif void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable); -void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, - enum omap_channel channel); void dss_sdi_init(int datapairs); int dss_sdi_enable(void); void dss_sdi_disable(void); void dss_select_dsi_clk_source(int dsi_module, - enum omap_dss_clk_source clk_src); + enum dss_clk_source clk_src); void dss_select_lcd_clk_source(enum omap_channel channel, - enum omap_dss_clk_source clk_src); -enum omap_dss_clk_source dss_get_dispc_clk_source(void); -enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module); -enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); + enum dss_clk_source clk_src); +enum dss_clk_source dss_get_dispc_clk_source(void); +enum dss_clk_source dss_get_dsi_clk_source(int dsi_module); +enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); void dss_set_venc_output(enum omap_dss_venc_type type); void dss_set_dac_pwrdn_bgz(bool enable); @@ -409,17 +428,23 @@ typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc, int dss_pll_register(struct dss_pll *pll); void dss_pll_unregister(struct dss_pll *pll); struct dss_pll *dss_pll_find(const char *name); +struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src); +unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src); int dss_pll_enable(struct dss_pll *pll); void dss_pll_disable(struct dss_pll *pll); int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cinfo); -bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, +bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco, unsigned long out_min, unsigned long out_max, dss_hsdiv_calc_func func, void *data); -bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, +bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin, unsigned long pll_min, unsigned long pll_max, dss_pll_calc_func func, void *data); + +bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin, + unsigned long target_clkout, struct dss_pll_clock_info *cinfo); + int dss_pll_write_config_type_a(struct dss_pll *pll, const struct dss_pll_clock_info *cinfo); int dss_pll_write_config_type_b(struct dss_pll *pll, diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c index c886a2927f73..ee5b93ce2763 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss_features.c +++ b/drivers/gpu/drm/omapdrm/dss/dss_features.c @@ -23,8 +23,7 @@ #include <linux/err.h> #include <linux/slab.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -50,7 +49,6 @@ struct omap_dss_features { const enum omap_dss_output_id *supported_outputs; const enum omap_color_mode *supported_color_modes; const enum omap_overlay_caps *overlay_caps; - const char * const *clksrc_names; const struct dss_param_range *dss_params; const enum omap_dss_rotation_type supported_rotation_types; @@ -389,34 +387,6 @@ static const enum omap_overlay_caps omap4_dss_overlay_caps[] = { OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, }; -static const char * const omap2_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1", -}; - -static const char * const omap3_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK", - [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK", -}; - -static const char * const omap4_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2", -}; - -static const char * const omap5_dss_clk_source_names[] = { - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DPLL_DSI1_A_CLK1", - [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DPLL_DSI1_A_CLK2", - [OMAP_DSS_CLK_SRC_FCK] = "DSS_CLK", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1", - [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DPLL_DSI1_C_CLK2", -}; - static const struct dss_param_range omap2_dss_param_range[] = { [FEAT_PARAM_DSS_FCK] = { 0, 133000000 }, [FEAT_PARAM_DSS_PCD] = { 2, 255 }, @@ -631,7 +601,6 @@ static const struct omap_dss_features omap2_dss_features = { .supported_outputs = omap2_dss_supported_outputs, .supported_color_modes = omap2_dss_supported_color_modes, .overlay_caps = omap2_dss_overlay_caps, - .clksrc_names = omap2_dss_clk_source_names, .dss_params = omap2_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -652,7 +621,6 @@ static const struct omap_dss_features omap3430_dss_features = { .supported_outputs = omap3430_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3430_dss_overlay_caps, - .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -676,7 +644,6 @@ static const struct omap_dss_features am35xx_dss_features = { .supported_outputs = omap3430_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3430_dss_overlay_caps, - .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -696,7 +663,6 @@ static const struct omap_dss_features am43xx_dss_features = { .supported_outputs = am43xx_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3430_dss_overlay_caps, - .clksrc_names = omap2_dss_clk_source_names, .dss_params = am43xx_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA, .buffer_size_unit = 1, @@ -716,7 +682,6 @@ static const struct omap_dss_features omap3630_dss_features = { .supported_outputs = omap3630_dss_supported_outputs, .supported_color_modes = omap3_dss_supported_color_modes, .overlay_caps = omap3630_dss_overlay_caps, - .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, .buffer_size_unit = 1, @@ -738,7 +703,6 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = { .supported_outputs = omap4_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -759,7 +723,6 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = { .supported_outputs = omap4_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -780,7 +743,6 @@ static const struct omap_dss_features omap4_dss_features = { .supported_outputs = omap4_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -801,7 +763,6 @@ static const struct omap_dss_features omap5_dss_features = { .supported_outputs = omap5_dss_supported_outputs, .supported_color_modes = omap4_dss_supported_color_modes, .overlay_caps = omap4_dss_overlay_caps, - .clksrc_names = omap5_dss_clk_source_names, .dss_params = omap5_dss_param_range, .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, .buffer_size_unit = 16, @@ -859,11 +820,6 @@ bool dss_feat_color_mode_supported(enum omap_plane plane, color_mode; } -const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id) -{ - return omap_current_dss_features->clksrc_names[id]; -} - u32 dss_feat_get_buffer_size_unit(void) { return omap_current_dss_features->buffer_size_unit; diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h index 3d67d39f192f..bb4b7f0e642b 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss_features.h +++ b/drivers/gpu/drm/omapdrm/dss/dss_features.h @@ -91,7 +91,6 @@ unsigned long dss_feat_get_param_max(enum dss_range_param param); enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane); bool dss_feat_color_mode_supported(enum omap_plane plane, enum omap_color_mode color_mode); -const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id); u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ u32 dss_feat_get_burst_size_unit(void); /* in bytes */ diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h index 53616b02b613..63e711545865 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi.h +++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h @@ -23,8 +23,9 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/hdmi.h> -#include <video/omapdss.h> +#include <sound/omap-hdmi-audio.h> +#include "omapdss.h" #include "dss.h" /* HDMI Wrapper */ @@ -240,6 +241,7 @@ struct hdmi_pll_data { void __iomem *base; + struct platform_device *pdev; struct hdmi_wp_data *wp; }; @@ -306,8 +308,6 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp); /* HDMI PLL funcs */ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s); -void hdmi_pll_compute(struct hdmi_pll_data *pll, - unsigned long target_tmds, struct dss_pll_clock_info *pi); int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, struct hdmi_wp_data *wp); void hdmi_pll_uninit(struct hdmi_pll_data *hpll); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index f892ae157ff3..cbd28dfdb86a 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -33,9 +33,10 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <linux/of.h> #include <sound/omap-hdmi-audio.h> +#include "omapdss.h" #include "hdmi4_core.h" #include "dss.h" #include "dss_features.h" @@ -100,7 +101,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) static int hdmi_init_regulator(void) { - int r; struct regulator *reg; if (hdmi.vdda_reg != NULL) @@ -114,15 +114,6 @@ static int hdmi_init_regulator(void) return PTR_ERR(reg); } - if (regulator_can_change_voltage(reg)) { - r = regulator_set_voltage(reg, 1800000, 1800000); - if (r) { - devm_regulator_put(reg); - DSSWARN("can't set the regulator voltage\n"); - return r; - } - } - hdmi.vdda_reg = reg; return 0; @@ -186,7 +177,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) if (p->double_pixel) pc *= 2; - hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); + /* DSS_HDMI_TCLK is bitclk / 10 */ + pc *= 10; + + dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin), + pc, &hdmi_cinfo); r = dss_pll_enable(&hdmi.pll.pll); if (r) { @@ -213,9 +208,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); - /* bypass TV gamma table */ - dispc_enable_gamma_table(0); - /* tv size */ dss_mgr_set_timings(channel, p); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index fa72e735dad2..ef3afe99e487 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -211,7 +211,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg) static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) { DSSDBG("Enter hdmi_core_powerdown_disable\n"); - REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0); + REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0); } static void hdmi_core_swreset_release(struct hdmi_core_data *core) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index a43f7b10e113..061f9bab4c9b 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -38,9 +38,10 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <linux/of.h> #include <sound/omap-hdmi-audio.h> +#include "omapdss.h" #include "hdmi5_core.h" #include "dss.h" #include "dss_features.h" @@ -131,15 +132,6 @@ static int hdmi_init_regulator(void) return PTR_ERR(reg); } - if (regulator_can_change_voltage(reg)) { - r = regulator_set_voltage(reg, 1800000, 1800000); - if (r) { - devm_regulator_put(reg); - DSSWARN("can't set the regulator voltage\n"); - return r; - } - } - hdmi.vdda_reg = reg; return 0; @@ -198,7 +190,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) if (p->double_pixel) pc *= 2; - hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); + /* DSS_HDMI_TCLK is bitclk / 10 */ + pc *= 10; + + dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin), + pc, &hdmi_cinfo); /* disable and clear irqs */ hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); @@ -230,9 +226,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev) hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); - /* bypass TV gamma table */ - dispc_enable_gamma_table(0); - /* tv size */ dss_mgr_set_timings(channel, p); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 6a397520cae5..8ab2093daa12 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core) { void __iomem *base = core->base; const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ - const unsigned ss_scl_high = 4000; /* ns */ - const unsigned ss_scl_low = 4700; /* ns */ + const unsigned ss_scl_high = 4600; /* ns */ + const unsigned ss_scl_low = 5400; /* ns */ const unsigned fs_scl_high = 600; /* ns */ const unsigned fs_scl_low = 1300; /* ns */ const unsigned sda_hold = 1000; /* ns */ @@ -458,7 +458,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, c = (ptr[1] >> 6) & 0x3; m = (ptr[1] >> 4) & 0x3; - r = (ptr[1] >> 0) & 0x3; + r = (ptr[1] >> 0) & 0xf; itc = (ptr[2] >> 7) & 0x1; ec = (ptr[2] >> 4) & 0x7; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c index 1b8fcc6c4ba1..4dfb67fe5f6d 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c @@ -4,8 +4,8 @@ #include <linux/kernel.h> #include <linux/err.h> #include <linux/of.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "hdmi.h" int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c index 1f5d19c119ce..3ead47cccac5 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c @@ -13,8 +13,9 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <linux/seq_file.h> +#include "omapdss.h" #include "dss.h" #include "hdmi.h" diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c index 06e23a7c432c..b8bf6a9e5557 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c @@ -16,9 +16,10 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/seq_file.h> +#include <linux/pm_runtime.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "hdmi.h" @@ -38,71 +39,14 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s) DUMPPLL(PLLCTRL_CFG4); } -void hdmi_pll_compute(struct hdmi_pll_data *pll, - unsigned long target_tmds, struct dss_pll_clock_info *pi) -{ - unsigned long fint, clkdco, clkout; - unsigned long target_bitclk, target_clkdco; - unsigned long min_dco; - unsigned n, m, mf, m2, sd; - unsigned long clkin; - const struct dss_pll_hw *hw = pll->pll.hw; - - clkin = clk_get_rate(pll->pll.clkin); - - DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds); - - target_bitclk = target_tmds * 10; - - /* Fint */ - n = DIV_ROUND_UP(clkin, hw->fint_max); - fint = clkin / n; - - /* adjust m2 so that the clkdco will be high enough */ - min_dco = roundup(hw->clkdco_min, fint); - m2 = DIV_ROUND_UP(min_dco, target_bitclk); - if (m2 == 0) - m2 = 1; - - target_clkdco = target_bitclk * m2; - m = target_clkdco / fint; - - clkdco = fint * m; - - /* adjust clkdco with fractional mf */ - if (WARN_ON(target_clkdco - clkdco > fint)) - mf = 0; - else - mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint); - - if (mf > 0) - clkdco += (u32)div_u64((u64)mf * fint, 262144); - - clkout = clkdco / m2; - - /* sigma-delta */ - sd = DIV_ROUND_UP(fint * m, 250000000); - - DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n", - n, m, mf, m2, sd); - DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout); - - pi->n = n; - pi->m = m; - pi->mf = mf; - pi->mX[0] = m2; - pi->sd = sd; - - pi->fint = fint; - pi->clkdco = clkdco; - pi->clkout[0] = clkout; -} - static int hdmi_pll_enable(struct dss_pll *dsspll) { struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); struct hdmi_wp_data *wp = pll->wp; - u16 r = 0; + int r; + + r = pm_runtime_get_sync(&pll->pdev->dev); + WARN_ON(r < 0); dss_ctrl_pll_enable(DSS_PLL_HDMI, true); @@ -117,10 +61,14 @@ static void hdmi_pll_disable(struct dss_pll *dsspll) { struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); struct hdmi_wp_data *wp = pll->wp; + int r; hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF); dss_ctrl_pll_enable(DSS_PLL_HDMI, false); + + r = pm_runtime_put_sync(&pll->pdev->dev); + WARN_ON(r < 0 && r != -ENOSYS); } static const struct dss_pll_ops dsi_pll_ops = { @@ -130,6 +78,8 @@ static const struct dss_pll_ops dsi_pll_ops = { }; static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { + .type = DSS_PLL_TYPE_B, + .n_max = 255, .m_min = 20, .m_max = 4095, @@ -153,6 +103,8 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { }; static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = { + .type = DSS_PLL_TYPE_B, + .n_max = 255, .m_min = 20, .m_max = 2045, @@ -224,6 +176,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, int r; struct resource *res; + pll->pdev = pdev; pll->wp = wp; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c index 13442b9052d1..203694a52d18 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c @@ -14,8 +14,9 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> -#include <video/omapdss.h> +#include <linux/seq_file.h> +#include "omapdss.h" #include "dss.h" #include "hdmi.h" diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index d7e7c909bbc2..6eaf1adbd606 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -18,7 +18,872 @@ #ifndef __OMAP_DRM_DSS_H #define __OMAP_DRM_DSS_H -#include <video/omapdss.h> +#include <linux/list.h> +#include <linux/kobject.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <video/videomode.h> +#include <linux/platform_data/omapdss.h> +#include <uapi/drm/drm_mode.h> + +#define DISPC_IRQ_FRAMEDONE (1 << 0) +#define DISPC_IRQ_VSYNC (1 << 1) +#define DISPC_IRQ_EVSYNC_EVEN (1 << 2) +#define DISPC_IRQ_EVSYNC_ODD (1 << 3) +#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4) +#define DISPC_IRQ_PROG_LINE_NUM (1 << 5) +#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6) +#define DISPC_IRQ_GFX_END_WIN (1 << 7) +#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8) +#define DISPC_IRQ_OCP_ERR (1 << 9) +#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10) +#define DISPC_IRQ_VID1_END_WIN (1 << 11) +#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12) +#define DISPC_IRQ_VID2_END_WIN (1 << 13) +#define DISPC_IRQ_SYNC_LOST (1 << 14) +#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15) +#define DISPC_IRQ_WAKEUP (1 << 16) +#define DISPC_IRQ_SYNC_LOST2 (1 << 17) +#define DISPC_IRQ_VSYNC2 (1 << 18) +#define DISPC_IRQ_VID3_END_WIN (1 << 19) +#define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20) +#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21) +#define DISPC_IRQ_FRAMEDONE2 (1 << 22) +#define DISPC_IRQ_FRAMEDONEWB (1 << 23) +#define DISPC_IRQ_FRAMEDONETV (1 << 24) +#define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25) +#define DISPC_IRQ_WBUNCOMPLETEERROR (1 << 26) +#define DISPC_IRQ_SYNC_LOST3 (1 << 27) +#define DISPC_IRQ_VSYNC3 (1 << 28) +#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29) +#define DISPC_IRQ_FRAMEDONE3 (1 << 30) + +struct omap_dss_device; +struct omap_overlay_manager; +struct dss_lcd_mgr_config; +struct snd_aes_iec958; +struct snd_cea_861_aud_if; +struct hdmi_avi_infoframe; + +enum omap_display_type { + OMAP_DISPLAY_TYPE_NONE = 0, + OMAP_DISPLAY_TYPE_DPI = 1 << 0, + OMAP_DISPLAY_TYPE_DBI = 1 << 1, + OMAP_DISPLAY_TYPE_SDI = 1 << 2, + OMAP_DISPLAY_TYPE_DSI = 1 << 3, + OMAP_DISPLAY_TYPE_VENC = 1 << 4, + OMAP_DISPLAY_TYPE_HDMI = 1 << 5, + OMAP_DISPLAY_TYPE_DVI = 1 << 6, +}; + +enum omap_plane { + OMAP_DSS_GFX = 0, + OMAP_DSS_VIDEO1 = 1, + OMAP_DSS_VIDEO2 = 2, + OMAP_DSS_VIDEO3 = 3, + OMAP_DSS_WB = 4, +}; + +enum omap_channel { + OMAP_DSS_CHANNEL_LCD = 0, + OMAP_DSS_CHANNEL_DIGIT = 1, + OMAP_DSS_CHANNEL_LCD2 = 2, + OMAP_DSS_CHANNEL_LCD3 = 3, + OMAP_DSS_CHANNEL_WB = 4, +}; + +enum omap_color_mode { + OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */ + OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */ + OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */ + OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */ + OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */ + OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */ + OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */ + OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */ + OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */ + OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */ + OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */ + OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */ + OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */ + OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */ + OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */ + OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */ + OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */ + OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */ + OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */ +}; + +enum omap_dss_load_mode { + OMAP_DSS_LOAD_CLUT_AND_FRAME = 0, + OMAP_DSS_LOAD_CLUT_ONLY = 1, + OMAP_DSS_LOAD_FRAME_ONLY = 2, + OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3, +}; + +enum omap_dss_trans_key_type { + OMAP_DSS_COLOR_KEY_GFX_DST = 0, + OMAP_DSS_COLOR_KEY_VID_SRC = 1, +}; + +enum omap_rfbi_te_mode { + OMAP_DSS_RFBI_TE_MODE_1 = 1, + OMAP_DSS_RFBI_TE_MODE_2 = 2, +}; + +enum omap_dss_signal_level { + OMAPDSS_SIG_ACTIVE_LOW, + OMAPDSS_SIG_ACTIVE_HIGH, +}; + +enum omap_dss_signal_edge { + OMAPDSS_DRIVE_SIG_FALLING_EDGE, + OMAPDSS_DRIVE_SIG_RISING_EDGE, +}; + +enum omap_dss_venc_type { + OMAP_DSS_VENC_TYPE_COMPOSITE, + OMAP_DSS_VENC_TYPE_SVIDEO, +}; + +enum omap_dss_dsi_pixel_format { + OMAP_DSS_DSI_FMT_RGB888, + OMAP_DSS_DSI_FMT_RGB666, + OMAP_DSS_DSI_FMT_RGB666_PACKED, + OMAP_DSS_DSI_FMT_RGB565, +}; + +enum omap_dss_dsi_mode { + OMAP_DSS_DSI_CMD_MODE = 0, + OMAP_DSS_DSI_VIDEO_MODE, +}; + +enum omap_display_caps { + OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0, + OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1, +}; + +enum omap_dss_display_state { + OMAP_DSS_DISPLAY_DISABLED = 0, + OMAP_DSS_DISPLAY_ACTIVE, +}; + +enum omap_dss_rotation_type { + OMAP_DSS_ROT_DMA = 1 << 0, + OMAP_DSS_ROT_VRFB = 1 << 1, + OMAP_DSS_ROT_TILER = 1 << 2, +}; + +/* clockwise rotation angle */ +enum omap_dss_rotation_angle { + OMAP_DSS_ROT_0 = 0, + OMAP_DSS_ROT_90 = 1, + OMAP_DSS_ROT_180 = 2, + OMAP_DSS_ROT_270 = 3, +}; + +enum omap_overlay_caps { + OMAP_DSS_OVL_CAP_SCALE = 1 << 0, + OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1, + OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2, + OMAP_DSS_OVL_CAP_ZORDER = 1 << 3, + OMAP_DSS_OVL_CAP_POS = 1 << 4, + OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5, +}; + +enum omap_overlay_manager_caps { + OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */ +}; + +enum omap_dss_clk_source { + OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK + * OMAP4: DSS_FCLK */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK + * OMAP4: PLL1_CLK1 */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK + * OMAP4: PLL1_CLK2 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */ +}; + +enum omap_hdmi_flags { + OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0, +}; + +enum omap_dss_output_id { + OMAP_DSS_OUTPUT_DPI = 1 << 0, + OMAP_DSS_OUTPUT_DBI = 1 << 1, + OMAP_DSS_OUTPUT_SDI = 1 << 2, + OMAP_DSS_OUTPUT_DSI1 = 1 << 3, + OMAP_DSS_OUTPUT_DSI2 = 1 << 4, + OMAP_DSS_OUTPUT_VENC = 1 << 5, + OMAP_DSS_OUTPUT_HDMI = 1 << 6, +}; + +/* RFBI */ + +struct rfbi_timings { + int cs_on_time; + int cs_off_time; + int we_on_time; + int we_off_time; + int re_on_time; + int re_off_time; + int we_cycle_time; + int re_cycle_time; + int cs_pulse_width; + int access_time; + + int clk_div; + + u32 tim[5]; /* set by rfbi_convert_timings() */ + + int converted; +}; + +/* DSI */ + +enum omap_dss_dsi_trans_mode { + /* Sync Pulses: both sync start and end packets sent */ + OMAP_DSS_DSI_PULSE_MODE, + /* Sync Events: only sync start packets sent */ + OMAP_DSS_DSI_EVENT_MODE, + /* Burst: only sync start packets sent, pixels are time compressed */ + OMAP_DSS_DSI_BURST_MODE, +}; + +struct omap_dss_dsi_videomode_timings { + unsigned long hsclk; + + unsigned ndl; + unsigned bitspp; + + /* pixels */ + u16 hact; + /* lines */ + u16 vact; + + /* DSI video mode blanking data */ + /* Unit: byte clock cycles */ + u16 hss; + u16 hsa; + u16 hse; + u16 hfp; + u16 hbp; + /* Unit: line clocks */ + u16 vsa; + u16 vfp; + u16 vbp; + + /* DSI blanking modes */ + int blanking_mode; + int hsa_blanking_mode; + int hbp_blanking_mode; + int hfp_blanking_mode; + + enum omap_dss_dsi_trans_mode trans_mode; + + bool ddr_clk_always_on; + int window_sync; +}; + +struct omap_dss_dsi_config { + enum omap_dss_dsi_mode mode; + enum omap_dss_dsi_pixel_format pixel_format; + const struct omap_video_timings *timings; + + unsigned long hs_clk_min, hs_clk_max; + unsigned long lp_clk_min, lp_clk_max; + + bool ddr_clk_always_on; + enum omap_dss_dsi_trans_mode trans_mode; +}; + +struct omap_video_timings { + /* Unit: pixels */ + u16 x_res; + /* Unit: pixels */ + u16 y_res; + /* Unit: Hz */ + u32 pixelclock; + /* Unit: pixel clocks */ + u16 hsw; /* Horizontal synchronization pulse width */ + /* Unit: pixel clocks */ + u16 hfp; /* Horizontal front porch */ + /* Unit: pixel clocks */ + u16 hbp; /* Horizontal back porch */ + /* Unit: line clocks */ + u16 vsw; /* Vertical synchronization pulse width */ + /* Unit: line clocks */ + u16 vfp; /* Vertical front porch */ + /* Unit: line clocks */ + u16 vbp; /* Vertical back porch */ + + /* Vsync logic level */ + enum omap_dss_signal_level vsync_level; + /* Hsync logic level */ + enum omap_dss_signal_level hsync_level; + /* Interlaced or Progressive timings */ + bool interlace; + /* Pixel clock edge to drive LCD data */ + enum omap_dss_signal_edge data_pclk_edge; + /* Data enable logic level */ + enum omap_dss_signal_level de_level; + /* Pixel clock edges to drive HSYNC and VSYNC signals */ + enum omap_dss_signal_edge sync_pclk_edge; + + bool double_pixel; +}; + +/* Hardcoded timings for tv modes. Venc only uses these to + * identify the mode, and does not actually use the configs + * itself. However, the configs should be something that + * a normal monitor can also show */ +extern const struct omap_video_timings omap_dss_pal_timings; +extern const struct omap_video_timings omap_dss_ntsc_timings; + +struct omap_dss_cpr_coefs { + s16 rr, rg, rb; + s16 gr, gg, gb; + s16 br, bg, bb; +}; + +struct omap_overlay_info { + dma_addr_t paddr; + dma_addr_t p_uv_addr; /* for NV12 format */ + u16 screen_width; + u16 width; + u16 height; + enum omap_color_mode color_mode; + u8 rotation; + enum omap_dss_rotation_type rotation_type; + bool mirror; + + u16 pos_x; + u16 pos_y; + u16 out_width; /* if 0, out_width == width */ + u16 out_height; /* if 0, out_height == height */ + u8 global_alpha; + u8 pre_mult_alpha; + u8 zorder; +}; + +struct omap_overlay { + struct kobject kobj; + struct list_head list; + + /* static fields */ + const char *name; + enum omap_plane id; + enum omap_color_mode supported_modes; + enum omap_overlay_caps caps; + + /* dynamic fields */ + struct omap_overlay_manager *manager; + + /* + * The following functions do not block: + * + * is_enabled + * set_overlay_info + * get_overlay_info + * + * The rest of the functions may block and cannot be called from + * interrupt context + */ + + int (*enable)(struct omap_overlay *ovl); + int (*disable)(struct omap_overlay *ovl); + bool (*is_enabled)(struct omap_overlay *ovl); + + int (*set_manager)(struct omap_overlay *ovl, + struct omap_overlay_manager *mgr); + int (*unset_manager)(struct omap_overlay *ovl); + + int (*set_overlay_info)(struct omap_overlay *ovl, + struct omap_overlay_info *info); + void (*get_overlay_info)(struct omap_overlay *ovl, + struct omap_overlay_info *info); + + int (*wait_for_go)(struct omap_overlay *ovl); + + struct omap_dss_device *(*get_device)(struct omap_overlay *ovl); +}; + +struct omap_overlay_manager_info { + u32 default_color; + + enum omap_dss_trans_key_type trans_key_type; + u32 trans_key; + bool trans_enabled; + + bool partial_alpha_enabled; + + bool cpr_enable; + struct omap_dss_cpr_coefs cpr_coefs; +}; + +struct omap_overlay_manager { + struct kobject kobj; + + /* static fields */ + const char *name; + enum omap_channel id; + enum omap_overlay_manager_caps caps; + struct list_head overlays; + enum omap_display_type supported_displays; + enum omap_dss_output_id supported_outputs; + + /* dynamic fields */ + struct omap_dss_device *output; + + /* + * The following functions do not block: + * + * set_manager_info + * get_manager_info + * apply + * + * The rest of the functions may block and cannot be called from + * interrupt context + */ + + int (*set_output)(struct omap_overlay_manager *mgr, + struct omap_dss_device *output); + int (*unset_output)(struct omap_overlay_manager *mgr); + + int (*set_manager_info)(struct omap_overlay_manager *mgr, + struct omap_overlay_manager_info *info); + void (*get_manager_info)(struct omap_overlay_manager *mgr, + struct omap_overlay_manager_info *info); + + int (*apply)(struct omap_overlay_manager *mgr); + int (*wait_for_go)(struct omap_overlay_manager *mgr); + int (*wait_for_vsync)(struct omap_overlay_manager *mgr); + + struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr); +}; + +/* 22 pins means 1 clk lane and 10 data lanes */ +#define OMAP_DSS_MAX_DSI_PINS 22 + +struct omap_dsi_pin_config { + int num_pins; + /* + * pin numbers in the following order: + * clk+, clk- + * data1+, data1- + * data2+, data2- + * ... + */ + int pins[OMAP_DSS_MAX_DSI_PINS]; +}; + +struct omap_dss_writeback_info { + u32 paddr; + u32 p_uv_addr; + u16 buf_width; + u16 width; + u16 height; + enum omap_color_mode color_mode; + u8 rotation; + enum omap_dss_rotation_type rotation_type; + bool mirror; + u8 pre_mult_alpha; +}; + +struct omapdss_dpi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines); +}; + +struct omapdss_sdi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs); +}; + +struct omapdss_dvi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); +}; + +struct omapdss_atv_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + void (*set_type)(struct omap_dss_device *dssdev, + enum omap_dss_venc_type type); + void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev, + bool invert_polarity); + + int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); + u32 (*get_wss)(struct omap_dss_device *dssdev); +}; + +struct omapdss_hdmi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); + bool (*detect)(struct omap_dss_device *dssdev); + + int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); + int (*set_infoframe)(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi); +}; + +struct omapdss_dsi_ops { + int (*connect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + void (*disconnect)(struct omap_dss_device *dssdev, + struct omap_dss_device *dst); + + int (*enable)(struct omap_dss_device *dssdev); + void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes, + bool enter_ulps); + + /* bus configuration */ + int (*set_config)(struct omap_dss_device *dssdev, + const struct omap_dss_dsi_config *cfg); + int (*configure_pins)(struct omap_dss_device *dssdev, + const struct omap_dsi_pin_config *pin_cfg); + + void (*enable_hs)(struct omap_dss_device *dssdev, int channel, + bool enable); + int (*enable_te)(struct omap_dss_device *dssdev, bool enable); + + int (*update)(struct omap_dss_device *dssdev, int channel, + void (*callback)(int, void *), void *data); + + void (*bus_lock)(struct omap_dss_device *dssdev); + void (*bus_unlock)(struct omap_dss_device *dssdev); + + int (*enable_video_output)(struct omap_dss_device *dssdev, int channel); + void (*disable_video_output)(struct omap_dss_device *dssdev, + int channel); + + int (*request_vc)(struct omap_dss_device *dssdev, int *channel); + int (*set_vc_id)(struct omap_dss_device *dssdev, int channel, + int vc_id); + void (*release_vc)(struct omap_dss_device *dssdev, int channel); + + /* data transfer */ + int (*dcs_write)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, + u8 *data, int len); + + int (*gen_write)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel, + u8 *data, int len); + int (*gen_read)(struct omap_dss_device *dssdev, int channel, + u8 *reqdata, int reqlen, + u8 *data, int len); + + int (*bta_sync)(struct omap_dss_device *dssdev, int channel); + + int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev, + int channel, u16 plen); +}; + +struct omap_dss_device { + struct kobject kobj; + struct device *dev; + + struct module *owner; + + struct list_head panel_list; + + /* alias in the form of "display%d" */ + char alias[16]; + + enum omap_display_type type; + enum omap_display_type output_type; + + union { + struct { + u8 data_lines; + } dpi; + + struct { + u8 channel; + u8 data_lines; + } rfbi; + + struct { + u8 datapairs; + } sdi; + + struct { + int module; + } dsi; + + struct { + enum omap_dss_venc_type type; + bool invert_polarity; + } venc; + } phy; + + struct { + struct omap_video_timings timings; + + enum omap_dss_dsi_pixel_format dsi_pix_fmt; + enum omap_dss_dsi_mode dsi_mode; + } panel; + + struct { + u8 pixel_size; + struct rfbi_timings rfbi_timings; + } ctrl; + + const char *name; + + /* used to match device to driver */ + const char *driver_name; + + void *data; + + struct omap_dss_driver *driver; + + union { + const struct omapdss_dpi_ops *dpi; + const struct omapdss_sdi_ops *sdi; + const struct omapdss_dvi_ops *dvi; + const struct omapdss_hdmi_ops *hdmi; + const struct omapdss_atv_ops *atv; + const struct omapdss_dsi_ops *dsi; + } ops; + + /* helper variable for driver suspend/resume */ + bool activate_after_resume; + + enum omap_display_caps caps; + + struct omap_dss_device *src; + + enum omap_dss_display_state state; + + /* OMAP DSS output specific fields */ + + struct list_head list; + + /* DISPC channel for this output */ + enum omap_channel dispc_channel; + bool dispc_channel_connected; + + /* output instance */ + enum omap_dss_output_id id; + + /* the port number in the DT node */ + int port_num; + + /* dynamic fields */ + struct omap_overlay_manager *manager; + + struct omap_dss_device *dst; +}; + +struct omap_dss_driver { + int (*probe)(struct omap_dss_device *); + void (*remove)(struct omap_dss_device *); + + int (*connect)(struct omap_dss_device *dssdev); + void (*disconnect)(struct omap_dss_device *dssdev); + + int (*enable)(struct omap_dss_device *display); + void (*disable)(struct omap_dss_device *display); + int (*run_test)(struct omap_dss_device *display, int test); + + int (*update)(struct omap_dss_device *dssdev, + u16 x, u16 y, u16 w, u16 h); + int (*sync)(struct omap_dss_device *dssdev); + + int (*enable_te)(struct omap_dss_device *dssdev, bool enable); + int (*get_te)(struct omap_dss_device *dssdev); + + u8 (*get_rotate)(struct omap_dss_device *dssdev); + int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate); + + bool (*get_mirror)(struct omap_dss_device *dssdev); + int (*set_mirror)(struct omap_dss_device *dssdev, bool enable); + + int (*memory_read)(struct omap_dss_device *dssdev, + void *buf, size_t size, + u16 x, u16 y, u16 w, u16 h); + + void (*get_resolution)(struct omap_dss_device *dssdev, + u16 *xres, u16 *yres); + void (*get_dimensions)(struct omap_dss_device *dssdev, + u32 *width, u32 *height); + int (*get_recommended_bpp)(struct omap_dss_device *dssdev); + + int (*check_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*set_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + void (*get_timings)(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + + int (*set_wss)(struct omap_dss_device *dssdev, u32 wss); + u32 (*get_wss)(struct omap_dss_device *dssdev); + + int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); + bool (*detect)(struct omap_dss_device *dssdev); + + int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode); + int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev, + const struct hdmi_avi_infoframe *avi); +}; + +enum omapdss_version omapdss_get_version(void); +bool omapdss_is_initialized(void); + +int omap_dss_register_driver(struct omap_dss_driver *); +void omap_dss_unregister_driver(struct omap_dss_driver *); + +int omapdss_register_display(struct omap_dss_device *dssdev); +void omapdss_unregister_display(struct omap_dss_device *dssdev); + +struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev); +void omap_dss_put_device(struct omap_dss_device *dssdev); +#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL) +struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from); +struct omap_dss_device *omap_dss_find_device(void *data, + int (*match)(struct omap_dss_device *dssdev, void *data)); +const char *omapdss_get_default_display_name(void); + +void videomode_to_omap_video_timings(const struct videomode *vm, + struct omap_video_timings *ovt); +void omap_video_timings_to_videomode(const struct omap_video_timings *ovt, + struct videomode *vm); + +int dss_feat_get_num_mgrs(void); +int dss_feat_get_num_ovls(void); +enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane); + + + +int omap_dss_get_num_overlay_managers(void); +struct omap_overlay_manager *omap_dss_get_overlay_manager(int num); + +int omap_dss_get_num_overlays(void); +struct omap_overlay *omap_dss_get_overlay(int num); + +int omapdss_register_output(struct omap_dss_device *output); +void omapdss_unregister_output(struct omap_dss_device *output); +struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id); +struct omap_dss_device *omap_dss_find_output(const char *name); +struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port); +int omapdss_output_set_device(struct omap_dss_device *out, + struct omap_dss_device *dssdev); +int omapdss_output_unset_device(struct omap_dss_device *out); + +struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev); +struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev); + +void omapdss_default_get_resolution(struct omap_dss_device *dssdev, + u16 *xres, u16 *yres); +int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev); +void omapdss_default_get_timings(struct omap_dss_device *dssdev, + struct omap_video_timings *timings); + +typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); +int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); +int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask); + +int omapdss_compat_init(void); +void omapdss_compat_uninit(void); + +static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev) +{ + return dssdev->src; +} + +static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) +{ + return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE; +} + +struct device_node * +omapdss_of_get_next_port(const struct device_node *parent, + struct device_node *prev); + +struct device_node * +omapdss_of_get_next_endpoint(const struct device_node *parent, + struct device_node *prev); + +struct device_node * +omapdss_of_get_first_endpoint(const struct device_node *parent); + +struct omap_dss_device * +omapdss_of_find_source_for_first_ep(struct device_node *node); u32 dispc_read_irqstatus(void); void dispc_clear_irqstatus(u32 mask); @@ -44,6 +909,10 @@ void dispc_mgr_set_timings(enum omap_channel channel, const struct omap_video_timings *timings); void dispc_mgr_setup(enum omap_channel channel, const struct omap_overlay_manager_info *info); +u32 dispc_mgr_gamma_size(enum omap_channel channel); +void dispc_mgr_set_gamma(enum omap_channel channel, + const struct drm_color_lut *lut, + unsigned int length); int dispc_ovl_enable(enum omap_plane plane, bool enable); bool dispc_ovl_enabled(enum omap_plane plane); diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 829232ad8c81..24f859488201 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -21,8 +21,7 @@ #include <linux/slab.h> #include <linux/of.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" static LIST_HEAD(output_list); diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c index f974ddcd3b6e..0a76c89cdc2e 100644 --- a/drivers/gpu/drm/omapdrm/dss/pll.c +++ b/drivers/gpu/drm/omapdrm/dss/pll.c @@ -22,8 +22,7 @@ #include <linux/regulator/consumer.h> #include <linux/sched.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #define PLL_CONTROL 0x0000 @@ -76,6 +75,59 @@ struct dss_pll *dss_pll_find(const char *name) return NULL; } +struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src) +{ + struct dss_pll *pll; + + switch (src) { + default: + case DSS_CLK_SRC_FCK: + return NULL; + + case DSS_CLK_SRC_HDMI_PLL: + return dss_pll_find("hdmi"); + + case DSS_CLK_SRC_PLL1_1: + case DSS_CLK_SRC_PLL1_2: + case DSS_CLK_SRC_PLL1_3: + pll = dss_pll_find("dsi0"); + if (!pll) + pll = dss_pll_find("video0"); + return pll; + + case DSS_CLK_SRC_PLL2_1: + case DSS_CLK_SRC_PLL2_2: + case DSS_CLK_SRC_PLL2_3: + pll = dss_pll_find("dsi1"); + if (!pll) + pll = dss_pll_find("video1"); + return pll; + } +} + +unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src) +{ + switch (src) { + case DSS_CLK_SRC_HDMI_PLL: + return 0; + + case DSS_CLK_SRC_PLL1_1: + case DSS_CLK_SRC_PLL2_1: + return 0; + + case DSS_CLK_SRC_PLL1_2: + case DSS_CLK_SRC_PLL2_2: + return 1; + + case DSS_CLK_SRC_PLL1_3: + case DSS_CLK_SRC_PLL2_3: + return 2; + + default: + return 0; + } +} + int dss_pll_enable(struct dss_pll *pll) { int r; @@ -129,7 +181,7 @@ int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cin return 0; } -bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, +bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco, unsigned long out_min, unsigned long out_max, dss_hsdiv_calc_func func, void *data) { @@ -154,7 +206,11 @@ bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, return false; } -bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, +/* + * clkdco = clkin / n * m * 2 + * clkoutX = clkdco / mX + */ +bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin, unsigned long pll_min, unsigned long pll_max, dss_pll_calc_func func, void *data) { @@ -195,6 +251,71 @@ bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, return false; } +/* + * This calculates a PLL config that will provide the target_clkout rate + * for clkout. Additionally clkdco rate will be the same as clkout rate + * when clkout rate is >= min_clkdco. + * + * clkdco = clkin / n * m + clkin / n * mf / 262144 + * clkout = clkdco / m2 + */ +bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin, + unsigned long target_clkout, struct dss_pll_clock_info *cinfo) +{ + unsigned long fint, clkdco, clkout; + unsigned long target_clkdco; + unsigned long min_dco; + unsigned n, m, mf, m2, sd; + const struct dss_pll_hw *hw = pll->hw; + + DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout); + + /* Fint */ + n = DIV_ROUND_UP(clkin, hw->fint_max); + fint = clkin / n; + + /* adjust m2 so that the clkdco will be high enough */ + min_dco = roundup(hw->clkdco_min, fint); + m2 = DIV_ROUND_UP(min_dco, target_clkout); + if (m2 == 0) + m2 = 1; + + target_clkdco = target_clkout * m2; + m = target_clkdco / fint; + + clkdco = fint * m; + + /* adjust clkdco with fractional mf */ + if (WARN_ON(target_clkdco - clkdco > fint)) + mf = 0; + else + mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint); + + if (mf > 0) + clkdco += (u32)div_u64((u64)mf * fint, 262144); + + clkout = clkdco / m2; + + /* sigma-delta */ + sd = DIV_ROUND_UP(fint * m, 250000000); + + DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n", + n, m, mf, m2, sd); + DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout); + + cinfo->n = n; + cinfo->m = m; + cinfo->mf = mf; + cinfo->mX[0] = m2; + cinfo->sd = sd; + + cinfo->fint = fint; + cinfo->clkdco = clkdco; + cinfo->clkout[0] = clkout; + + return true; +} + static int wait_for_bit_change(void __iomem *reg, int bitnum, int value) { unsigned long timeout; diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c index 3796576dfadf..cd53566d75eb 100644 --- a/drivers/gpu/drm/omapdrm/dss/rfbi.c +++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c @@ -38,7 +38,7 @@ #include <linux/pm_runtime.h> #include <linux/component.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" struct rfbi_reg { u16 idx; }; diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index cd6d3bfb041d..0a96c321ce62 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -29,7 +29,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include "omapdss.h" #include "dss.h" static struct { diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 08a2cc778ba9..6eedf2118708 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -37,8 +37,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c index b1ec59e42940..7429de928d4e 100644 --- a/drivers/gpu/drm/omapdrm/dss/video-pll.c +++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c @@ -17,8 +17,7 @@ #include <linux/platform_device.h> #include <linux/sched.h> -#include <video/omapdss.h> - +#include "omapdss.h" #include "dss.h" #include "dss_features.h" @@ -108,6 +107,8 @@ static const struct dss_pll_ops dss_pll_ops = { }; static const struct dss_pll_hw dss_dra7_video_pll_hw = { + .type = DSS_PLL_TYPE_A, + .n_max = (1 << 8) - 1, .m_max = (1 << 12) - 1, .mX_max = (1 << 5) - 1, @@ -124,6 +125,10 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = { .mX_lsb[0] = 21, .mX_msb[1] = 30, .mX_lsb[1] = 26, + .mX_msb[2] = 4, + .mX_lsb[2] = 0, + .mX_msb[3] = 9, + .mX_lsb[3] = 5, .has_refsel = true, }; diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index ce2d67b6a8c7..137fe690a0da 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -32,7 +32,6 @@ struct omap_connector { struct drm_connector base; struct omap_dss_device *dssdev; - struct drm_encoder *encoder; bool hdmi_mode; }; @@ -256,13 +255,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector, return ret; } -struct drm_encoder *omap_connector_attached_encoder( - struct drm_connector *connector) -{ - struct omap_connector *omap_connector = to_omap_connector(connector); - return omap_connector->encoder; -} - static const struct drm_connector_funcs omap_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .reset = drm_atomic_helper_connector_reset, @@ -276,7 +268,6 @@ static const struct drm_connector_funcs omap_connector_funcs = { static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { .get_modes = omap_connector_get_modes, .mode_valid = omap_connector_mode_valid, - .best_encoder = omap_connector_attached_encoder, }; /* initialize connector */ @@ -296,7 +287,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev, goto fail; omap_connector->dssdev = dssdev; - omap_connector->encoder = encoder; connector = &omap_connector->base; diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 075f2bb44867..180f644e861e 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -372,6 +372,20 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) copy_timings_drm_to_omap(&omap_crtc->timings, mode); } +static int omap_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + if (state->color_mgmt_changed && state->gamma_lut) { + uint length = state->gamma_lut->length / + sizeof(struct drm_color_lut); + + if (length < 2) + return -EINVAL; + } + + return 0; +} + static void omap_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -384,6 +398,32 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc, WARN_ON(omap_crtc->vblank_irq.registered); + if (crtc->state->color_mgmt_changed) { + struct drm_color_lut *lut = NULL; + uint length = 0; + + if (crtc->state->gamma_lut) { + lut = (struct drm_color_lut *) + crtc->state->gamma_lut->data; + length = crtc->state->gamma_lut->length / + sizeof(*lut); + } + dispc_mgr_set_gamma(omap_crtc->channel, lut, length); + } + + if (crtc->state->color_mgmt_changed) { + struct drm_color_lut *lut = NULL; + uint length = 0; + + if (crtc->state->gamma_lut) { + lut = (struct drm_color_lut *) + crtc->state->gamma_lut->data; + length = crtc->state->gamma_lut->length / + sizeof(*lut); + } + dispc_mgr_set_gamma(omap_crtc->channel, lut, length); + } + if (dispc_mgr_is_enabled(omap_crtc->channel)) { DBG("%s: GO", omap_crtc->name); @@ -460,6 +500,7 @@ static const struct drm_crtc_funcs omap_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = omap_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, + .gamma_set = drm_atomic_helper_legacy_gamma_set, .set_property = drm_atomic_helper_crtc_set_property, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, @@ -471,6 +512,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = { .mode_set_nofb = omap_crtc_mode_set_nofb, .disable = omap_crtc_disable, .enable = omap_crtc_enable, + .atomic_check = omap_crtc_atomic_check, .atomic_begin = omap_crtc_atomic_begin, .atomic_flush = omap_crtc_atomic_flush, }; @@ -534,6 +576,20 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs); + /* The dispc API adapts to what ever size, but the HW supports + * 256 element gamma table for LCDs and 1024 element table for + * OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma + * tables so lets use that. Size of HW gamma table can be + * extracted with dispc_mgr_gamma_size(). If it returns 0 + * gamma table is not supprted. + */ + if (dispc_mgr_gamma_size(channel)) { + uint gamma_lut_size = 256; + + drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size); + drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size); + } + omap_plane_install_properties(crtc->primary, &crtc->base); omap_crtcs[channel] = omap_crtc; diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index 6f5fc14fc015..479bf24050f8 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c @@ -17,6 +17,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/seq_file.h> + #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index de275a5be1db..4ceed7a9762f 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/platform_device.h> /* platform_device() */ #include <linux/sched.h> +#include <linux/seq_file.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/vmalloc.h> diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index d86f5479345b..6b97011154bf 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -142,8 +142,9 @@ static int omap_atomic_commit(struct drm_device *dev, { struct omap_drm_private *priv = dev->dev_private; struct omap_atomic_state_commit *commit; - unsigned int i; - int ret; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i, ret; ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) @@ -163,10 +164,8 @@ static int omap_atomic_commit(struct drm_device *dev, /* Wait until all affected CRTCs have completed previous commits and * mark them as pending. */ - for (i = 0; i < dev->mode_config.num_crtc; ++i) { - if (state->crtcs[i]) - commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + commit->crtcs |= drm_crtc_mask(crtc); wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit)); @@ -175,7 +174,7 @@ static int omap_atomic_commit(struct drm_device *dev, spin_unlock(&priv->commit.lock); /* Swap the state, this is the point of no return. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) schedule_work(&commit->work); @@ -203,6 +202,8 @@ static int get_connector_type(struct omap_dss_device *dssdev) return DRM_MODE_CONNECTOR_HDMIA; case OMAP_DISPLAY_TYPE_DVI: return DRM_MODE_CONNECTOR_DVID; + case OMAP_DISPLAY_TYPE_DSI: + return DRM_MODE_CONNECTOR_DSI; default: return DRM_MODE_CONNECTOR_Unknown; } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 3f823c368912..dcc30a98b9d4 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -24,7 +24,6 @@ #include <linux/platform_data/omap_drm.h> #include <linux/types.h> #include <linux/wait.h> -#include <video/omapdss.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> @@ -183,7 +182,6 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); -struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); int omap_framebuffer_pin(struct drm_framebuffer *fb); void omap_framebuffer_unpin(struct drm_framebuffer *fb); void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, @@ -231,7 +229,6 @@ int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, int x, int y, dma_addr_t *paddr); uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj); size_t omap_gem_mmap_size(struct drm_gem_object *obj); -int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h); int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient); struct dma_buf *omap_gem_prime_export(struct drm_device *dev, @@ -239,17 +236,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev, struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, struct dma_buf *buffer); -static inline int align_pitch(int pitch, int width, int bpp) -{ - int bytespp = (bpp + 7) / 8; - /* in case someone tries to feed us a completely bogus stride: */ - pitch = max(pitch, width * bytespp); - /* PVR needs alignment to 8 pixels.. right now that is the most - * restrictive stride requirement.. - */ - return roundup(pitch, 8 * bytespp); -} - /* map crtc to vblank mask */ uint32_t pipe2vbl(struct drm_crtc *crtc); struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 94ec06d3d737..983c8cf2441c 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -17,6 +17,8 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/seq_file.h> + #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> @@ -120,17 +122,9 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb) kfree(omap_fb); } -static int omap_framebuffer_dirty(struct drm_framebuffer *fb, - struct drm_file *file_priv, unsigned flags, unsigned color, - struct drm_clip_rect *clips, unsigned num_clips) -{ - return 0; -} - static const struct drm_framebuffer_funcs omap_framebuffer_funcs = { .create_handle = omap_framebuffer_create_handle, .destroy = omap_framebuffer_destroy, - .dirty = omap_framebuffer_dirty, }; static uint32_t get_linear_addr(struct plane *plane, @@ -318,14 +312,6 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb) mutex_unlock(&omap_fb->lock); } -struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p) -{ - struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); - if (p >= drm_format_num_planes(fb->pixel_format)) - return NULL; - return omap_fb->planes[p].bo; -} - /* iterate thru all the connectors, returning ones that are attached * to the same fb.. */ diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 89da41ac64d2..31dfa0893416 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -125,9 +125,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; - mode_cmd.pitches[0] = align_pitch( - mode_cmd.width * ((sizes->surface_bpp + 7) / 8), - mode_cmd.width, sizes->surface_bpp); + mode_cmd.pitches[0] = + DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8); fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; if (fbdev->ywrap_enabled) { diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index b97afc281778..9b3f565fd8d7 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -17,6 +17,7 @@ * this program. If not, see <http://www.gnu.org/licenses/>. */ +#include <linux/seq_file.h> #include <linux/shmem_fs.h> #include <linux/spinlock.h> #include <linux/pfn_t.h> @@ -382,18 +383,6 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj) return size; } -/* get tiled size, returns -EINVAL if not tiled buffer */ -int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) -{ - struct omap_gem_object *omap_obj = to_omap_bo(obj); - if (omap_obj->flags & OMAP_BO_TILED) { - *w = omap_obj->width; - *h = omap_obj->height; - return 0; - } - return -EINVAL; -} - /* ----------------------------------------------------------------------------- * Fault Handling */ @@ -660,7 +649,8 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, { union omap_gem_size gsize; - args->pitch = align_pitch(0, args->width, args->bpp); + args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + args->size = PAGE_ALIGN(args->pitch * args->height); gsize = (union omap_gem_size){ diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 8b5d54385892..ad429683fef7 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -221,7 +221,6 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; - struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb); struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb); struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj); @@ -252,14 +251,14 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, &norect, one_clip_rect, inc); - drm_vblank_get(dev, qcrtc->index); + drm_crtc_vblank_get(crtc); if (event) { spin_lock_irqsave(&dev->event_lock, flags); - drm_send_vblank_event(dev, qcrtc->index, event); + drm_crtc_send_vblank_event(crtc, event); spin_unlock_irqrestore(&dev->event_lock, flags); } - drm_vblank_put(dev, qcrtc->index); + drm_crtc_vblank_put(crtc); ret = qxl_bo_reserve(bo, false); if (!ret) { @@ -730,7 +729,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id) drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs); qxl_crtc->index = crtc_id; - drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256); drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index dc9df5fe50ba..460bbceae297 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -256,7 +256,7 @@ static struct drm_driver qxl_driver = { .gem_prime_vmap = qxl_gem_prime_vmap, .gem_prime_vunmap = qxl_gem_prime_vunmap, .gem_prime_mmap = qxl_gem_prime_mmap, - .gem_free_object = qxl_gem_object_free, + .gem_free_object_unlocked = qxl_gem_object_free, .gem_open_object = qxl_gem_object_open, .gem_close_object = qxl_gem_object_close, .fops = &qxl_fops, diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 5ea57f6320b8..df2657051afd 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -131,10 +131,6 @@ static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev, int ret; int aligned_size, size; int height = mode_cmd->height; - int bpp; - int depth; - - drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth); size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 4efa8e261baf..f599cd073b72 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -96,7 +96,7 @@ retry: return 0; if (have_drawable_releases && sc > 300) { - FENCE_WARN(fence, "failed to wait on release %d " + FENCE_WARN(fence, "failed to wait on release %llu " "after spincount %d\n", fence->context & ~0xf0000000, sc); goto signaled; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 2e216e2ea78c..e91763d5d800 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -276,14 +276,14 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); atombios_blank_crtc(crtc, ATOM_DISABLE); if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_on(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_on(crtc); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_off(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (radeon_crtc->enabled) atombios_blank_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 80b24a495d6c..5633ee3eb46e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2386,7 +2386,7 @@ struct radeon_device { struct radeon_mman mman; struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; wait_queue_head_t fence_queue; - unsigned fence_context; + u64 fence_context; struct mutex ring_lock; struct radeon_ring ring[RADEON_NUM_RINGS]; bool ib_pool_ready; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 6a41b4982647..3965d1916b9c 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -231,19 +231,21 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, *blue = radeon_crtc->lut_b[regno] << 6; } -static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) +static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, uint32_t size) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - int end = (start + size > 256) ? 256 : start + size, i; + int i; /* userspace palettes are always correct as is */ - for (i = start; i < end; i++) { + for (i = 0; i < size; i++) { radeon_crtc->lut_r[i] = red[i] >> 6; radeon_crtc->lut_g[i] = green[i] >> 6; radeon_crtc->lut_b[i] = blue[i] >> 6; } radeon_crtc_load_lut(crtc); + + return 0; } static void radeon_crtc_destroy(struct drm_crtc *crtc) @@ -381,7 +383,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); - drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); + drm_crtc_vblank_put(&radeon_crtc->base); radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); queue_work(radeon_crtc->flip_queue, &work->unpin_work); } @@ -598,7 +600,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, } work->base = base; - r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); + r = drm_crtc_vblank_get(crtc); if (r) { DRM_ERROR("failed to get vblank before flip\n"); goto pflip_cleanup; @@ -625,7 +627,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, return 0; vblank_cleanup: - drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); + drm_crtc_vblank_put(crtc); pflip_cleanup: if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { @@ -688,6 +690,7 @@ radeon_crtc_set_config(struct drm_mode_set *set) pm_runtime_put_autosuspend(dev->dev); return ret; } + static const struct drm_crtc_funcs radeon_crtc_funcs = { .cursor_set2 = radeon_crtc_cursor_set2, .cursor_move = radeon_crtc_cursor_move, diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index b55aa740171f..a455dc7d4aa1 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -34,11 +34,9 @@ #include "radeon_drv.h" #include <drm/drm_pciids.h> -#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/module.h> #include <linux/pm_runtime.h> -#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <drm/drm_gem.h> @@ -340,13 +338,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, if (ret == -EPROBE_DEFER) return ret; - /* - * apple-gmux is needed on dual GPU MacBook Pro - * to probe the panel if we're the inactive GPU. - */ - if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && - apple_gmux_present() && pdev != vga_default_device() && - !vga_switcheroo_handler_flags()) + if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; /* Get rid of things like offb */ diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 478d4099b0d0..d0de4022fff9 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -332,14 +332,14 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); } if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_on(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_on(crtc); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: if (dev->num_crtcs > radeon_crtc->crtc_id) - drm_vblank_off(dev, radeon_crtc->crtc_id); + drm_crtc_vblank_off(crtc); if (radeon_crtc->crtc_id) WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); else { diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 38226d925a5b..4b6542538ff9 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -246,6 +246,7 @@ static void radeon_set_power_state(struct radeon_device *rdev) static void radeon_pm_set_clocks(struct radeon_device *rdev) { + struct drm_crtc *crtc; int i, r; /* no need to take locks, etc. if nothing's going to change */ @@ -274,26 +275,30 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) radeon_unmap_vram_bos(rdev); if (rdev->irq.installed) { - for (i = 0; i < rdev->num_crtc; i++) { + i = 0; + drm_for_each_crtc(crtc, rdev->ddev) { if (rdev->pm.active_crtcs & (1 << i)) { /* This can fail if a modeset is in progress */ - if (drm_vblank_get(rdev->ddev, i) == 0) + if (drm_crtc_vblank_get(crtc) == 0) rdev->pm.req_vblank |= (1 << i); else DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n", i); } + i++; } } radeon_set_power_state(rdev); if (rdev->irq.installed) { - for (i = 0; i < rdev->num_crtc; i++) { + i = 0; + drm_for_each_crtc(crtc, rdev->ddev) { if (rdev->pm.req_vblank & (1 << i)) { rdev->pm.req_vblank &= ~(1 << i); - drm_vblank_put(rdev->ddev, i); + drm_crtc_vblank_put(crtc); } + i++; } } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index fb9242d27883..48ec4b6e8b26 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -217,7 +217,7 @@ static struct drm_driver rcar_du_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = rcar_du_enable_vblank, .disable_vblank = rcar_du_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 4e939e41f030..55149e9ce28e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -27,18 +27,6 @@ #include "rcar_du_vgacon.h" /* ----------------------------------------------------------------------------- - * Common connector functions - */ - -struct drm_encoder * -rcar_du_connector_best_encoder(struct drm_connector *connector) -{ - struct rcar_du_connector *rcon = to_rcar_connector(connector); - - return rcar_encoder_to_drm_encoder(rcon->encoder); -} - -/* ----------------------------------------------------------------------------- * Encoder */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h index 719b6f2a031c..a8669c3e0dd5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -49,9 +49,6 @@ struct rcar_du_connector { #define to_rcar_connector(c) \ container_of(c, struct rcar_du_connector, connector) -struct drm_encoder * -rcar_du_connector_best_encoder(struct drm_connector *connector); - int rcar_du_encoder_init(struct rcar_du_device *rcdu, enum rcar_du_encoder_type type, enum rcar_du_output output, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c index 6c927144b5c9..612b4d5ae098 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c @@ -52,7 +52,6 @@ static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector, static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = rcar_du_hdmi_connector_get_modes, .mode_valid = rcar_du_hdmi_connector_mode_valid, - .best_encoder = rcar_du_connector_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index e70a4f33d970..6bb032d8ac6b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -288,6 +288,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev, { struct rcar_du_device *rcdu = dev->dev_private; struct rcar_du_commit *commit; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; unsigned int i; int ret; @@ -309,10 +311,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev, /* Wait until all affected CRTCs have completed previous commits and * mark them as pending. */ - for (i = 0; i < dev->mode_config.num_crtc; ++i) { - if (state->crtcs[i]) - commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]); - } + for_each_crtc_in_state(state, crtc, crtc_state, i) + commit->crtcs |= drm_crtc_mask(crtc); spin_lock(&rcdu->commit.wait.lock); ret = wait_event_interruptible_locked(rcdu->commit.wait, @@ -327,7 +327,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev, } /* Swap the state, this is the point of no return. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) schedule_work(&commit->work); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c index e905f5da7aaa..6afd0af312ba 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c @@ -59,7 +59,6 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector) static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = rcar_du_lvds_connector_get_modes, - .best_encoder = rcar_du_connector_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index d445e67f78e1..bfe31ca870cc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -140,18 +140,17 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, bool needs_realloc = false; unsigned int groups = 0; unsigned int i; + struct drm_plane *drm_plane; + struct drm_plane_state *drm_plane_state; /* Check if hardware planes need to be reallocated. */ - for (i = 0; i < dev->mode_config.num_total_plane; ++i) { + for_each_plane_in_state(state, drm_plane, drm_plane_state, i) { struct rcar_du_plane_state *plane_state; struct rcar_du_plane *plane; unsigned int index; - if (!state->planes[i]) - continue; - - plane = to_rcar_plane(state->planes[i]); - plane_state = to_rcar_plane_state(state->plane_states[i]); + plane = to_rcar_plane(drm_plane); + plane_state = to_rcar_plane_state(drm_plane_state); dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__, plane->group->index, plane - plane->group->planes); @@ -247,18 +246,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev, } /* Reallocate hardware planes for each plane that needs it. */ - for (i = 0; i < dev->mode_config.num_total_plane; ++i) { + for_each_plane_in_state(state, drm_plane, drm_plane_state, i) { struct rcar_du_plane_state *plane_state; struct rcar_du_plane *plane; unsigned int crtc_planes; unsigned int free; int idx; - if (!state->planes[i]) - continue; - - plane = to_rcar_plane(state->planes[i]); - plane_state = to_rcar_plane_state(state->plane_states[i]); + plane = to_rcar_plane(drm_plane); + plane_state = to_rcar_plane_state(drm_plane_state); dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__, plane->group->index, plane - plane->group->planes); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c index 9d7e5c99caf6..8d6125c1c0f9 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c @@ -28,7 +28,6 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector) static const struct drm_connector_helper_funcs connector_helper_funcs = { .get_modes = rcar_du_vga_connector_get_modes, - .best_encoder = rcar_du_connector_best_encoder, }; static enum drm_connector_status @@ -79,7 +78,5 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, if (ret < 0) return ret; - rcon->encoder = renc; - return 0; } diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 7f6a55cae27a..c120172add5c 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -349,20 +349,11 @@ static int rockchip_dp_remove(struct platform_device *pdev) return 0; } +static const struct dev_pm_ops rockchip_dp_pm_ops = { #ifdef CONFIG_PM_SLEEP -static int rockchip_dp_suspend(struct device *dev) -{ - return analogix_dp_suspend(dev); -} - -static int rockchip_dp_resume(struct device *dev) -{ - return analogix_dp_resume(dev); -} + .suspend = analogix_dp_suspend, + .resume_early = analogix_dp_resume, #endif - -static const struct dev_pm_ops rockchip_dp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume) }; static const struct of_device_id rockchip_dp_dt_ids[] = { diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index dedc65b40f36..ca22e5ee89ca 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c @@ -964,18 +964,9 @@ static enum drm_mode_status dw_mipi_dsi_mode_valid( return mode_status; } -static struct drm_encoder *dw_mipi_dsi_connector_best_encoder( - struct drm_connector *connector) -{ - struct dw_mipi_dsi *dsi = con_to_dsi(connector); - - return &dsi->encoder; -} - static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = { .get_modes = dw_mipi_dsi_connector_get_modes, .mode_valid = dw_mipi_dsi_mode_valid, - .best_encoder = dw_mipi_dsi_connector_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index f8b4feb60b25..006260de9dbd 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -579,14 +579,6 @@ inno_hdmi_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -inno_hdmi_connector_best_encoder(struct drm_connector *connector) -{ - struct inno_hdmi *hdmi = to_inno_hdmi(connector); - - return &hdmi->encoder; -} - static int inno_hdmi_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY) @@ -613,7 +605,6 @@ static struct drm_connector_funcs inno_hdmi_connector_funcs = { static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = { .get_modes = inno_hdmi_connector_get_modes, .mode_valid = inno_hdmi_connector_mode_valid, - .best_encoder = inno_hdmi_connector_best_encoder, }; static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index a409d1f703cb..c2bcc5ea1abe 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -19,11 +19,13 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_cma_helper.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/component.h> +#include <linux/console.h> #include "rockchip_drm_drv.h" #include "rockchip_drm_fb.h" @@ -37,6 +39,7 @@ #define DRIVER_MINOR 0 static bool is_support_iommu = true; +static struct drm_driver rockchip_drm_driver; /* * Attach a (component) device to the shared drm dma mapping from master drm @@ -132,20 +135,28 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, priv->crtc_funcs[pipe]->disable_vblank(crtc); } -static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) +static int rockchip_drm_bind(struct device *dev) { + struct drm_device *drm_dev; struct rockchip_drm_private *private; struct dma_iommu_mapping *mapping = NULL; - struct device *dev = drm_dev->dev; - struct drm_connector *connector; int ret; - private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); - if (!private) + drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev); + if (!drm_dev) return -ENOMEM; - mutex_init(&private->commit.lock); - INIT_WORK(&private->commit.work, rockchip_drm_atomic_work); + ret = drm_dev_register(drm_dev, 0); + if (ret) + goto err_free; + + dev_set_drvdata(dev, drm_dev); + + private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); + if (!private) { + ret = -ENOMEM; + goto err_unregister; + } drm_dev->dev_private = private; @@ -186,21 +197,10 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) if (ret) goto err_detach_device; - /* - * All components are now added, we can publish the connector sysfs - * entries to userspace. This will generate hotplug events and so - * userspace will expect to be able to access DRM at this point. - */ - list_for_each_entry(connector, &drm_dev->mode_config.connector_list, - head) { - ret = drm_connector_register(connector); - if (ret) { - dev_err(drm_dev->dev, - "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n", - connector->base.id, - connector->name, ret); - goto err_unbind; - } + ret = drm_connector_register_all(drm_dev); + if (ret) { + dev_err(dev, "failed to register connectors\n"); + goto err_unbind; } /* init kms poll for handling hpd */ @@ -240,12 +240,16 @@ err_release_mapping: err_config_cleanup: drm_mode_config_cleanup(drm_dev); drm_dev->dev_private = NULL; +err_unregister: + drm_dev_unregister(drm_dev); +err_free: + drm_dev_unref(drm_dev); return ret; } -static int rockchip_drm_unload(struct drm_device *drm_dev) +static void rockchip_drm_unbind(struct device *dev) { - struct device *dev = drm_dev->dev; + struct drm_device *drm_dev = dev_get_drvdata(dev); rockchip_drm_fbdev_fini(drm_dev); drm_vblank_cleanup(drm_dev); @@ -255,29 +259,9 @@ static int rockchip_drm_unload(struct drm_device *drm_dev) arm_iommu_detach_device(dev); drm_mode_config_cleanup(drm_dev); drm_dev->dev_private = NULL; - - return 0; -} - -static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc, - struct drm_file *file_priv) -{ - struct rockchip_drm_private *priv = crtc->dev->dev_private; - int pipe = drm_crtc_index(crtc); - - if (pipe < ROCKCHIP_MAX_CRTC && - priv->crtc_funcs[pipe] && - priv->crtc_funcs[pipe]->cancel_pending_vblank) - priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv); -} - -static void rockchip_drm_preclose(struct drm_device *dev, - struct drm_file *file_priv) -{ - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv); + drm_dev_unregister(drm_dev); + drm_dev_unref(drm_dev); + dev_set_drvdata(dev, NULL); } void rockchip_drm_lastclose(struct drm_device *dev) @@ -300,23 +284,15 @@ static const struct file_operations rockchip_drm_driver_fops = { .release = drm_release, }; -const struct vm_operations_struct rockchip_drm_vm_ops = { - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - static struct drm_driver rockchip_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, - .load = rockchip_drm_load, - .unload = rockchip_drm_unload, - .preclose = rockchip_drm_preclose, .lastclose = rockchip_drm_lastclose, .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = rockchip_drm_crtc_enable_vblank, .disable_vblank = rockchip_drm_crtc_disable_vblank, - .gem_vm_ops = &rockchip_drm_vm_ops, - .gem_free_object = rockchip_gem_free_object, + .gem_vm_ops = &drm_gem_cma_vm_ops, + .gem_free_object_unlocked = rockchip_gem_free_object, .dumb_create = rockchip_gem_dumb_create, .dumb_map_offset = rockchip_gem_dumb_map_offset, .dumb_destroy = drm_gem_dumb_destroy, @@ -337,25 +313,38 @@ static struct drm_driver rockchip_drm_driver = { }; #ifdef CONFIG_PM_SLEEP -static int rockchip_drm_sys_suspend(struct device *dev) +void rockchip_drm_fb_suspend(struct drm_device *drm) { - struct drm_device *drm = dev_get_drvdata(dev); - struct drm_connector *connector; + struct rockchip_drm_private *priv = drm->dev_private; - if (!drm) - return 0; + console_lock(); + drm_fb_helper_set_suspend(&priv->fbdev_helper, 1); + console_unlock(); +} + +void rockchip_drm_fb_resume(struct drm_device *drm) +{ + struct rockchip_drm_private *priv = drm->dev_private; - drm_modeset_lock_all(drm); - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - int old_dpms = connector->dpms; + console_lock(); + drm_fb_helper_set_suspend(&priv->fbdev_helper, 0); + console_unlock(); +} - if (connector->funcs->dpms) - connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); +static int rockchip_drm_sys_suspend(struct device *dev) +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct rockchip_drm_private *priv = drm->dev_private; - /* Set the old mode back to the connector for resume */ - connector->dpms = old_dpms; + drm_kms_helper_poll_disable(drm); + rockchip_drm_fb_suspend(drm); + + priv->state = drm_atomic_helper_suspend(drm); + if (IS_ERR(priv->state)) { + rockchip_drm_fb_resume(drm); + drm_kms_helper_poll_enable(drm); + return PTR_ERR(priv->state); } - drm_modeset_unlock_all(drm); return 0; } @@ -363,47 +352,11 @@ static int rockchip_drm_sys_suspend(struct device *dev) static int rockchip_drm_sys_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct drm_connector *connector; - enum drm_connector_status status; - bool changed = false; - - if (!drm) - return 0; - - drm_modeset_lock_all(drm); - list_for_each_entry(connector, &drm->mode_config.connector_list, head) { - int desired_mode = connector->dpms; - - /* - * at suspend time, we save dpms to connector->dpms, - * restore the old_dpms, and at current time, the connector - * dpms status must be DRM_MODE_DPMS_OFF. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - - /* - * If the connector has been disconnected during suspend, - * disconnect it from the encoder and leave it off. We'll notify - * userspace at the end. - */ - if (desired_mode == DRM_MODE_DPMS_ON) { - status = connector->funcs->detect(connector, true); - if (status == connector_status_disconnected) { - connector->encoder = NULL; - connector->status = status; - changed = true; - continue; - } - } - if (connector->funcs->dpms) - connector->funcs->dpms(connector, desired_mode); - } - drm_modeset_unlock_all(drm); - - drm_helper_resume_force_mode(drm); + struct rockchip_drm_private *priv = drm->dev_private; - if (changed) - drm_kms_helper_hotplug_event(drm); + drm_atomic_helper_resume(drm, priv->state); + rockchip_drm_fb_resume(drm); + drm_kms_helper_poll_enable(drm); return 0; } @@ -444,37 +397,6 @@ static void rockchip_add_endpoints(struct device *dev, } } -static int rockchip_drm_bind(struct device *dev) -{ - struct drm_device *drm; - int ret; - - drm = drm_dev_alloc(&rockchip_drm_driver, dev); - if (!drm) - return -ENOMEM; - - ret = drm_dev_register(drm, 0); - if (ret) - goto err_free; - - dev_set_drvdata(dev, drm); - - return 0; - -err_free: - drm_dev_unref(drm); - return ret; -} - -static void rockchip_drm_unbind(struct device *dev) -{ - struct drm_device *drm = dev_get_drvdata(dev); - - drm_dev_unregister(drm); - drm_dev_unref(drm); - dev_set_drvdata(dev, NULL); -} - static const struct component_master_ops rockchip_drm_ops = { .bind = rockchip_drm_bind, .unbind = rockchip_drm_unbind, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 56f43a364c7f..ea3932940061 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -40,14 +40,6 @@ struct rockchip_crtc_funcs { int (*enable_vblank)(struct drm_crtc *crtc); void (*disable_vblank)(struct drm_crtc *crtc); void (*wait_for_update)(struct drm_crtc *crtc); - void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv); -}; - -struct rockchip_atomic_commit { - struct work_struct work; - struct drm_atomic_state *state; - struct drm_device *dev; - struct mutex lock; }; struct rockchip_crtc_state { @@ -68,11 +60,9 @@ struct rockchip_drm_private { struct drm_fb_helper fbdev_helper; struct drm_gem_object *fbdev_bo; const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; - - struct rockchip_atomic_commit commit; + struct drm_atomic_state *state; }; -void rockchip_drm_atomic_work(struct work_struct *work); int rockchip_register_crtc_funcs(struct drm_crtc *crtc, const struct rockchip_crtc_funcs *crtc_funcs); void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 755cfdba61cd..20f12bc5a386 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -228,87 +228,32 @@ rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_stat } static void -rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit) +rockchip_atomic_commit_tail(struct drm_atomic_state *state) { - struct drm_atomic_state *state = commit->state; - struct drm_device *dev = commit->dev; + struct drm_device *dev = state->dev; - /* - * TODO: do fence wait here. - */ - - /* - * Rockchip crtc support runtime PM, can't update display planes - * when crtc is disabled. - * - * drm_atomic_helper_commit comments detail that: - * For drivers supporting runtime PM the recommended sequence is - * - * drm_atomic_helper_commit_modeset_disables(dev, state); - * - * drm_atomic_helper_commit_modeset_enables(dev, state); - * - * drm_atomic_helper_commit_planes(dev, state, true); - * - * See the kerneldoc entries for these three functions for more details. - */ drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_commit_planes(dev, state, true); + drm_atomic_helper_commit_hw_done(state); + rockchip_atomic_wait_for_complete(dev, state); drm_atomic_helper_cleanup_planes(dev, state); - - drm_atomic_state_free(state); -} - -void rockchip_drm_atomic_work(struct work_struct *work) -{ - struct rockchip_atomic_commit *commit = container_of(work, - struct rockchip_atomic_commit, work); - - rockchip_atomic_commit_complete(commit); } -int rockchip_drm_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, - bool nonblock) -{ - struct rockchip_drm_private *private = dev->dev_private; - struct rockchip_atomic_commit *commit = &private->commit; - int ret; - - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) - return ret; - - /* serialize outstanding nonblocking commits */ - mutex_lock(&commit->lock); - flush_work(&commit->work); - - drm_atomic_helper_swap_state(dev, state); - - commit->dev = dev; - commit->state = state; - - if (nonblock) - schedule_work(&commit->work); - else - rockchip_atomic_commit_complete(commit); - - mutex_unlock(&commit->lock); - - return 0; -} +struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = { + .atomic_commit_tail = rockchip_atomic_commit_tail, +}; static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { .fb_create = rockchip_user_fb_create, .output_poll_changed = rockchip_drm_output_poll_changed, .atomic_check = drm_atomic_helper_check, - .atomic_commit = rockchip_drm_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, }; struct drm_framebuffer * @@ -339,4 +284,5 @@ void rockchip_drm_mode_config_init(struct drm_device *dev) dev->mode_config.max_height = 4096; dev->mode_config.funcs = &rockchip_drm_mode_config_funcs; + dev->mode_config.helper_private = &rockchip_mode_config_helpers; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c index f261512bb4a0..207e01de6e32 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c @@ -108,7 +108,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, fbi->screen_size = rk_obj->base.size; fbi->fix.smem_len = rk_obj->base.size; - DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n", + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n", fb->width, fb->height, fb->depth, rk_obj->kvaddr, offset, size); @@ -156,9 +156,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev) goto err_drm_fb_helper_fini; } - /* disable all the possible outputs/crtcs before entering KMS mode */ - drm_helper_disable_unused_functions(dev); - ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); if (ret < 0) { dev_err(dev->dev, "Failed to set initial hw config - %d.\n", diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 9c2d8a894093..059e902f872d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -38,7 +38,7 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, &rk_obj->dma_addr, GFP_KERNEL, &rk_obj->dma_attrs); if (!rk_obj->kvaddr) { - DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size); + DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); return -ENOMEM; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 1c4d5b5a70a2..8cd840f602b7 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -98,7 +98,9 @@ struct vop_win { const struct vop_win_data *data; struct vop *vop; - struct vop_plane_state state; + /* protected by dev->event_lock */ + bool enable; + dma_addr_t yrgb_mst; }; struct vop { @@ -112,6 +114,8 @@ struct vop { bool vsync_work_pending; struct completion dsp_hold_completion; struct completion wait_update_complete; + + /* protected by dev->event_lock */ struct drm_pending_vblank_event *event; const struct vop_data *data; @@ -431,9 +435,6 @@ static void vop_enable(struct drm_crtc *crtc) struct vop *vop = to_vop(crtc); int ret; - if (vop->is_enabled) - return; - ret = pm_runtime_get_sync(vop->dev); if (ret < 0) { dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); @@ -501,8 +502,7 @@ static void vop_crtc_disable(struct drm_crtc *crtc) struct vop *vop = to_vop(crtc); int i; - if (!vop->is_enabled) - return; + WARN_ON(vop->event); /* * We need to make sure that all windows are disabled before we @@ -553,6 +553,14 @@ static void vop_crtc_disable(struct drm_crtc *crtc) clk_disable(vop->aclk); clk_disable(vop->hclk); pm_runtime_put(vop->dev); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + spin_unlock_irq(&crtc->dev->event_lock); + + crtc->state->event = NULL; + } } static void vop_plane_destroy(struct drm_plane *plane) @@ -658,6 +666,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane, if (!old_state->crtc) return; + spin_lock_irq(&plane->dev->event_lock); + vop_win->enable = false; + vop_win->yrgb_mst = 0; + spin_unlock_irq(&plane->dev->event_lock); + spin_lock(&vop->reg_lock); VOP_WIN_SET(vop, win, enable, 0); @@ -692,7 +705,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, /* * can't update plane when vop is disabled. */ - if (!crtc) + if (WARN_ON(!crtc)) return; if (WARN_ON(!vop->is_enabled)) @@ -721,6 +734,11 @@ static void vop_plane_atomic_update(struct drm_plane *plane, offset += (src->y1 >> 16) * fb->pitches[0]; vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0]; + spin_lock_irq(&plane->dev->event_lock); + vop_win->enable = true; + vop_win->yrgb_mst = vop_plane_state->yrgb_mst; + spin_unlock_irq(&plane->dev->event_lock); + spin_lock(&vop->reg_lock); VOP_WIN_SET(vop, win, format, vop_plane_state->format); @@ -876,30 +894,10 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc) WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); } -static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc, - struct drm_file *file_priv) -{ - struct drm_device *drm = crtc->dev; - struct vop *vop = to_vop(crtc); - struct drm_pending_vblank_event *e; - unsigned long flags; - - spin_lock_irqsave(&drm->event_lock, flags); - e = vop->event; - if (e && e->base.file_priv == file_priv) { - vop->event = NULL; - - e->base.destroy(&e->base); - file_priv->event_space += sizeof(e->event); - } - spin_unlock_irqrestore(&drm->event_lock, flags); -} - static const struct rockchip_crtc_funcs private_crtc_funcs = { .enable_vblank = vop_crtc_enable_vblank, .disable_vblank = vop_crtc_disable_vblank, .wait_for_update = vop_crtc_wait_for_update, - .cancel_pending_vblank = vop_crtc_cancel_pending_vblank, }; static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, @@ -931,6 +929,8 @@ static void vop_crtc_enable(struct drm_crtc *crtc) u16 vact_end = vact_st + vdisplay; uint32_t val; + WARN_ON(vop->event); + vop_enable(crtc); /* * If dclk rate is zero, mean that scanout is stop, @@ -1027,12 +1027,15 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc, { struct vop *vop = to_vop(crtc); + spin_lock_irq(&crtc->dev->event_lock); if (crtc->state->event) { WARN_ON(drm_crtc_vblank_get(crtc) != 0); + WARN_ON(vop->event); vop->event = crtc->state->event; crtc->state->event = NULL; } + spin_unlock_irq(&crtc->dev->event_lock); } static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { @@ -1080,16 +1083,14 @@ static const struct drm_crtc_funcs vop_crtc_funcs = { static bool vop_win_pending_is_complete(struct vop_win *vop_win) { - struct drm_plane *plane = &vop_win->base; - struct vop_plane_state *state = to_vop_plane_state(plane->state); dma_addr_t yrgb_mst; - if (!state->enable) + if (!vop_win->enable) return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0; yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data); - return yrgb_mst == state->yrgb_mst; + return yrgb_mst == vop_win->yrgb_mst; } static void vop_handle_vblank(struct vop *vop) @@ -1104,15 +1105,16 @@ static void vop_handle_vblank(struct vop *vop) return; } + spin_lock_irqsave(&drm->event_lock, flags); if (vop->event) { - spin_lock_irqsave(&drm->event_lock, flags); drm_crtc_send_vblank_event(crtc, vop->event); drm_crtc_vblank_put(crtc); vop->event = NULL; - spin_unlock_irqrestore(&drm->event_lock, flags); } + spin_unlock_irqrestore(&drm->event_lock, flags); + if (!completion_done(&vop->wait_update_complete)) complete(&vop->wait_update_complete); } diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index 1e154fc779d5..6547b1db460a 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -441,7 +441,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc) scrtc->event = NULL; if (event) { drm_crtc_send_vblank_event(&scrtc->crtc, event); - drm_vblank_put(dev, 0); + drm_crtc_vblank_put(&scrtc->crtc); } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -467,7 +467,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc, if (event) { event->pipe = 0; - drm_vblank_get(dev, 0); + drm_crtc_vblank_get(&scrtc->crtc); spin_lock_irqsave(&dev->event_lock, flags); scrtc->event = event; spin_unlock_irqrestore(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 7700ff172079..ee79264b5b6a 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -264,7 +264,7 @@ static struct drm_driver shmob_drm_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = shmob_drm_enable_vblank, .disable_vblank = shmob_drm_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 505620c7c2c8..e04deedabd4a 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -51,15 +51,6 @@ static void sti_crtc_disabling(struct drm_crtc *crtc) mixer->status = STI_MIXER_DISABLING; } -static bool sti_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - /* accept the provided drm_display_mode, do not fix it up */ - drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); - return true; -} - static int sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) { @@ -230,7 +221,6 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { .enable = sti_crtc_enable, .disable = sti_crtc_disabling, - .mode_fixup = sti_crtc_mode_fixup, .mode_set = drm_helper_crtc_mode_set, .mode_set_nofb = sti_crtc_mode_set_nofb, .mode_set_base = drm_helper_crtc_mode_set_base, diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index 4e990299735c..53aa0029295b 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -105,12 +105,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_plane_to_str(&cursor->plane), cursor->regs); @@ -129,7 +123,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP(CUR_AWE); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 872495e72294..dd2c400c4a46 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -72,11 +72,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data) struct drm_info_node *node = s->private; struct drm_device *dev = node->minor->dev; struct drm_plane *p; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; list_for_each_entry(p, &dev->mode_config.plane_list, head) { struct sti_plane *plane = to_sti_plane(p); @@ -86,7 +81,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data) plane->fps_info.fips_str); } - mutex_unlock(&dev->struct_mutex); return 0; } @@ -221,7 +215,7 @@ static int sti_atomic_commit(struct drm_device *drm, * the software side now. */ - drm_atomic_helper_swap_state(drm, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) sti_atomic_schedule(private, state); @@ -310,7 +304,7 @@ static struct drm_driver sti_driver = { .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, .load = sti_load, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 25f76632002c..e2901667eceb 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -177,12 +177,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs); DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL); @@ -193,7 +187,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data) dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -384,20 +377,10 @@ static int sti_dvo_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector) -{ - struct sti_dvo_connector *dvo_connector - = to_sti_dvo_connector(connector); - - /* Best encoder is the one associated during connector creation */ - return dvo_connector->encoder; -} - static const struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = { .get_modes = sti_dvo_connector_get_modes, .mode_valid = sti_dvo_connector_mode_valid, - .best_encoder = sti_dvo_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index ff33c38da197..fdf69b5a041b 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -208,14 +208,8 @@ static int gdp_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; struct drm_plane *drm_plane = &gdp->plane.drm_plane; struct drm_crtc *crtc = drm_plane->crtc; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_plane_to_str(&gdp->plane), gdp->regs); @@ -248,7 +242,6 @@ static int gdp_dbg_show(struct seq_file *s, void *data) seq_printf(s, " Connected to DRM CRTC #%d (%s)\n", crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc))); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -279,13 +272,7 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg) { struct drm_info_node *node = s->private; struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; unsigned int b; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; for (b = 0; b < GDP_NODE_NB_BANK; b++) { seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b); @@ -294,7 +281,6 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg) gdp_node_dump_node(s, gdp->node_list[b].btm_field); } - mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index f7d3464cdf09..dcec5a8eda59 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -376,12 +376,6 @@ static int hda_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_hda *hda = (struct sti_hda *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs); DBGFS_DUMP(HDA_ANA_CFG); @@ -397,7 +391,6 @@ static int hda_dbg_show(struct seq_file *s, void *data) hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -676,20 +669,10 @@ static int sti_hda_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector) -{ - struct sti_hda_connector *hda_connector - = to_sti_hda_connector(connector); - - /* Best encoder is the one associated during connector creation */ - return hda_connector->encoder; -} - static const struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = { .get_modes = sti_hda_connector_get_modes, .mode_valid = sti_hda_connector_mode_valid, - .best_encoder = sti_hda_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 6ef0715bd5b9..36d9d6635784 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -628,12 +628,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs); DBGFS_DUMP("\n", HDMI_CFG); @@ -690,7 +684,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -897,20 +890,10 @@ static int sti_hdmi_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector) -{ - struct sti_hdmi_connector *hdmi_connector - = to_sti_hdmi_connector(connector); - - /* Best encoder is the one associated during connector creation */ - return hdmi_connector->encoder; -} - static const struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = { .get_modes = sti_hdmi_connector_get_modes, .mode_valid = sti_hdmi_connector_mode_valid, - .best_encoder = sti_hdmi_best_encoder, }; /* get detection status of display device */ diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 1edec29b9e45..1c06a50fddca 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -555,14 +555,8 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; int cmd, cmd_offset, infoxp70; void *virt; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_plane_to_str(&hqvdp->plane), hqvdp->regs); @@ -630,7 +624,6 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data) seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c index aed7801b51f7..6f86f2b2b6a5 100644 --- a/drivers/gpu/drm/sti/sti_mixer.c +++ b/drivers/gpu/drm/sti/sti_mixer.c @@ -151,12 +151,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg) { struct drm_info_node *node = s->private; struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "%s: (vaddr = 0x%p)", sti_mixer_to_str(mixer), mixer->regs); @@ -176,7 +170,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg) mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index f983db5a59da..60fe0afa5644 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -515,13 +515,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data) { struct drm_info_node *node = s->private; struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; struct drm_crtc *crtc; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs); @@ -587,7 +581,6 @@ static int tvout_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c index 523ed19f5ac6..0132aaebe598 100644 --- a/drivers/gpu/drm/sti/sti_vid.c +++ b/drivers/gpu/drm/sti/sti_vid.c @@ -92,12 +92,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg) { struct drm_info_node *node = s->private; struct sti_vid *vid = (struct sti_vid *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs); @@ -122,7 +116,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg) DBGFS_DUMP(VID_CSAT); seq_puts(s, "\n"); - mutex_unlock(&dev->struct_mutex); return 0; } diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c index 4182a21f5923..f628b6d8f23f 100644 --- a/drivers/gpu/drm/sun4i/sun4i_crtc.c +++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c @@ -51,10 +51,22 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc, { struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); struct sun4i_drv *drv = scrtc->drv; + struct drm_pending_vblank_event *event = crtc->state->event; DRM_DEBUG_DRIVER("Committing plane changes\n"); sun4i_backend_commit(drv->backend); + + if (event) { + crtc->state->event = NULL; + + spin_lock_irq(&crtc->dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irq(&crtc->dev->event_lock); + } } static void sun4i_crtc_disable(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 76e922bb60e5..68e9d85085fb 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -103,7 +103,7 @@ static struct drm_driver sun4i_drv_driver = { .dumb_create = drm_gem_cma_dumb_create, .dumb_destroy = drm_gem_dumb_destroy, .dumb_map_offset = drm_gem_cma_dumb_map_offset, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, /* PRIME Operations */ diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index ab6494818050..442cfe271688 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -90,19 +90,9 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -sun4i_rgb_best_encoder(struct drm_connector *connector) -{ - struct sun4i_rgb *rgb = - drm_connector_to_sun4i_rgb(connector); - - return &rgb->encoder; -} - static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = { .get_modes = sun4i_rgb_get_modes, .mode_valid = sun4i_rgb_mode_valid, - .best_encoder = sun4i_rgb_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c index bc047f923508..b84147896294 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tv.c +++ b/drivers/gpu/drm/sun4i/sun4i_tv.c @@ -526,18 +526,9 @@ static int sun4i_tv_comp_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder * -sun4i_tv_comp_best_encoder(struct drm_connector *connector) -{ - struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector); - - return &tv->encoder; -} - static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = { .get_modes = sun4i_tv_comp_get_modes, .mode_valid = sun4i_tv_comp_mode_valid, - .best_encoder = sun4i_tv_comp_best_encoder, }; static enum drm_connector_status diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index b59c3bf0df44..a177a42a9849 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -93,7 +93,7 @@ static int tegra_atomic_commit(struct drm_device *drm, * the software side now. */ - drm_atomic_helper_swap_state(drm, state); + drm_atomic_helper_swap_state(state, true); if (nonblock) tegra_atomic_schedule(tegra, state); diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index f52d6cb24ff5..0ddcce1b420d 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -239,8 +239,6 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output); void tegra_output_exit(struct tegra_output *output); int tegra_output_connector_get_modes(struct drm_connector *connector); -struct drm_encoder * -tegra_output_connector_best_encoder(struct drm_connector *connector); enum drm_connector_status tegra_output_connector_detect(struct drm_connector *connector, bool force); void tegra_output_connector_destroy(struct drm_connector *connector); diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index d1239ebc190f..099cccb2fbcb 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -794,7 +794,6 @@ tegra_dsi_connector_mode_valid(struct drm_connector *connector, static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = { .get_modes = tegra_output_connector_get_modes, .mode_valid = tegra_dsi_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index b7ef4929e347..2fdb8796443e 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -806,7 +806,6 @@ static const struct drm_connector_helper_funcs tegra_hdmi_connector_helper_funcs = { .get_modes = tegra_output_connector_get_modes, .mode_valid = tegra_hdmi_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 46664b622270..1480f6aaffe4 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -42,14 +42,6 @@ int tegra_output_connector_get_modes(struct drm_connector *connector) return err; } -struct drm_encoder * -tegra_output_connector_best_encoder(struct drm_connector *connector) -{ - struct tegra_output *output = connector_to_output(connector); - - return &output->encoder; -} - enum drm_connector_status tegra_output_connector_detect(struct drm_connector *connector, bool force) { diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c index e246334e0252..a131b44e2d6f 100644 --- a/drivers/gpu/drm/tegra/rgb.c +++ b/drivers/gpu/drm/tegra/rgb.c @@ -112,7 +112,6 @@ tegra_rgb_connector_mode_valid(struct drm_connector *connector, static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { .get_modes = tegra_output_connector_get_modes, .mode_valid = tegra_rgb_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 757c6e8603af..34958d71284b 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -1087,7 +1087,6 @@ tegra_sor_connector_mode_valid(struct drm_connector *connector, static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = { .get_modes = tegra_sor_connector_get_modes, .mode_valid = tegra_sor_connector_mode_valid, - .best_encoder = tegra_output_connector_best_encoder, }; static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index 709bc903524d..308e197908fc 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -549,7 +549,7 @@ static struct drm_driver tilcdc_driver = { .get_vblank_counter = drm_vblank_no_hw_counter, .enable_vblank = tilcdc_enable_vblank, .disable_vblank = tilcdc_disable_vblank, - .gem_free_object = drm_gem_cma_free_object, + .gem_free_object_unlocked = drm_gem_cma_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .dumb_create = drm_gem_cma_dumb_create, .dumb_map_offset = drm_gem_cma_dumb_map_offset, diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index b87afee44995..f92ea9579674 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -376,7 +376,7 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); if (event) - drm_send_vblank_event(dev, 0, event); + drm_crtc_send_vblank_event(crtc, event); spin_unlock_irqrestore(&dev->event_lock, flags); crtc->primary->fb = fb; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index e5a9d3aaf45f..59adcf8532dd 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev) /* Called on the last userspace/kernel unreference of the BO. Returns * it to the BO cache if possible, otherwise frees it. - * - * Note that this is called with the struct_mutex held. */ void vc4_free_object(struct drm_gem_object *gem_bo) { diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 904d0754ad78..4c0f26a644a3 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -175,20 +175,22 @@ vc4_crtc_lut_load(struct drm_crtc *crtc) HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); } -static void +static int vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) + uint32_t size) { struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); u32 i; - for (i = start; i < start + size; i++) { + for (i = 0; i < size; i++) { vc4_crtc->lut_r[i] = r[i] >> 8; vc4_crtc->lut_g[i] = g[i] >> 8; vc4_crtc->lut_b[i] = b[i] >> 8; } vc4_crtc_lut_load(crtc); + + return 0; } static u32 vc4_get_fifo_full_level(u32 format) @@ -395,6 +397,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_plane *plane; unsigned long flags; + const struct drm_plane_state *plane_state; u32 dlist_count = 0; int ret; @@ -404,18 +407,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, if (hweight32(state->connector_mask) > 1) return -EINVAL; - drm_atomic_crtc_state_for_each_plane(plane, state) { - struct drm_plane_state *plane_state = - state->state->plane_states[drm_plane_index(plane)]; - - /* plane might not have changed, in which case take - * current state: - */ - if (!plane_state) - plane_state = plane->state; - + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state) dlist_count += vc4_plane_dlist_size(plane_state); - } dlist_count++; /* Account for SCALER_CTL0_END. */ diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index 9817dbfa4ac3..dba1114297e4 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -208,14 +208,6 @@ static int vc4_dpi_connector_get_modes(struct drm_connector *connector) return 0; } -static struct drm_encoder * -vc4_dpi_connector_best_encoder(struct drm_connector *connector) -{ - struct vc4_dpi_connector *dpi_connector = - to_vc4_dpi_connector(connector); - return dpi_connector->encoder; -} - static const struct drm_connector_funcs vc4_dpi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = vc4_dpi_connector_detect, @@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_dpi_connector_funcs = { static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = { .get_modes = vc4_dpi_connector_get_modes, - .best_encoder = vc4_dpi_connector_best_encoder, }; static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 3446ece21b4a..58b8fc036332 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -99,7 +99,7 @@ static struct drm_driver vc4_drm_driver = { #endif .gem_create_object = vc4_create_object, - .gem_free_object = vc4_free_object, + .gem_free_object_unlocked = vc4_free_object, .gem_vm_ops = &drm_gem_cma_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 37cac59401d7..c799baabc008 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -469,7 +469,7 @@ int vc4_kms_load(struct drm_device *dev); struct drm_plane *vc4_plane_init(struct drm_device *dev, enum drm_plane_type type); u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); -u32 vc4_plane_dlist_size(struct drm_plane_state *state); +u32 vc4_plane_dlist_size(const struct drm_plane_state *state); void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 46899d6de675..6155e8aca1c6 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state) { unsigned int i; - mutex_lock(&dev->struct_mutex); for (i = 0; i < state->user_state.bo_count; i++) - drm_gem_object_unreference(state->bo[i]); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(state->bo[i]); kfree(state); } @@ -687,11 +685,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned i; - /* Need the struct lock for drm_gem_object_unreference(). */ - mutex_lock(&dev->struct_mutex); if (exec->bo) { for (i = 0; i < exec->bo_count; i++) - drm_gem_object_unreference(&exec->bo[i]->base); + drm_gem_object_unreference_unlocked(&exec->bo[i]->base); kfree(exec->bo); } @@ -699,9 +695,8 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_bo *bo = list_first_entry(&exec->unref_list, struct vc4_bo, unref_head); list_del(&bo->unref_head); - drm_gem_object_unreference(&bo->base.base); + drm_gem_object_unreference_unlocked(&bo->base.base); } - mutex_unlock(&dev->struct_mutex); mutex_lock(&vc4->power_lock); if (--vc4->power_refcount == 0) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index fd2644d231ff..68df91c3f860 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -208,14 +208,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) return ret; } -static struct drm_encoder * -vc4_hdmi_connector_best_encoder(struct drm_connector *connector) -{ - struct vc4_hdmi_connector *hdmi_connector = - to_vc4_hdmi_connector(connector); - return hdmi_connector->encoder; -} - static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { .dpms = drm_atomic_helper_connector_dpms, .detect = vc4_hdmi_connector_detect, @@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = { .get_modes = vc4_hdmi_connector_get_modes, - .best_encoder = vc4_hdmi_connector_best_encoder, }; static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index cb37751bc99f..8f4d5ffc32be 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -111,6 +111,8 @@ static int vc4_atomic_commit(struct drm_device *dev, int i; uint64_t wait_seqno = 0; struct vc4_commit *c; + struct drm_plane *plane; + struct drm_plane_state *new_state; c = commit_init(state); if (!c) @@ -130,13 +132,7 @@ static int vc4_atomic_commit(struct drm_device *dev, return ret; } - for (i = 0; i < dev->mode_config.num_total_plane; i++) { - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *new_state = state->plane_states[i]; - - if (!plane) - continue; - + for_each_plane_in_state(state, plane, new_state, i) { if ((plane->state->fb != new_state->fb) && new_state->fb) { struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(new_state->fb, 0); @@ -152,7 +148,7 @@ static int vc4_atomic_commit(struct drm_device *dev, * the software side now. */ - drm_atomic_helper_swap_state(dev, state); + drm_atomic_helper_swap_state(state, true); /* * Everything below can be run asynchronously without the need to grab diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 4037b52fde31..5d2c3d9fd17a 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -690,9 +690,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist) return vc4_state->dlist_count; } -u32 vc4_plane_dlist_size(struct drm_plane_state *state) +u32 vc4_plane_dlist_size(const struct drm_plane_state *state) { - struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + const struct vc4_plane_state *vc4_state = + container_of(state, typeof(*vc4_state), base); return vc4_state->dlist_count; } diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 341f9be3dde6..1b4cc8b27080 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -235,7 +235,7 @@ static const struct file_operations vgem_driver_fops = { static struct drm_driver vgem_driver = { .driver_features = DRIVER_GEM, - .gem_free_object = vgem_gem_free_object, + .gem_free_object_unlocked = vgem_gem_free_object, .gem_vm_ops = &vgem_gem_vm_ops, .ioctls = vgem_ioctls, .fops = &vgem_driver_fops, diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index d4305da88f44..ac758cdbc1bc 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -29,8 +29,8 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_atomic_helper.h> -#define XRES_MIN 320 -#define YRES_MIN 200 +#define XRES_MIN 32 +#define YRES_MIN 32 #define XRES_DEF 1024 #define YRES_DEF 768 @@ -38,138 +38,11 @@ #define XRES_MAX 8192 #define YRES_MAX 8192 -static void -virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev, - struct virtio_gpu_output *output) -{ - output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); - output->cursor.resource_id = 0; - virtio_gpu_cursor_ping(vgdev, output); -} - -static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, - uint32_t height, - int32_t hot_x, int32_t hot_y) -{ - struct virtio_gpu_device *vgdev = crtc->dev->dev_private; - struct virtio_gpu_output *output = - container_of(crtc, struct virtio_gpu_output, crtc); - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; - struct virtio_gpu_fence *fence = NULL; - int ret = 0; - - if (handle == 0) { - virtio_gpu_hide_cursor(vgdev, output); - return 0; - } - - /* lookup the cursor */ - gobj = drm_gem_object_lookup(file_priv, handle); - if (gobj == NULL) - return -ENOENT; - - qobj = gem_to_virtio_gpu_obj(gobj); - - if (!qobj->hw_res_handle) { - ret = -EINVAL; - goto out; - } - - virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0, - cpu_to_le32(64), - cpu_to_le32(64), - 0, 0, &fence); - ret = virtio_gpu_object_reserve(qobj, false); - if (!ret) { - reservation_object_add_excl_fence(qobj->tbo.resv, - &fence->f); - fence_put(&fence->f); - virtio_gpu_object_unreserve(qobj); - virtio_gpu_object_wait(qobj, false); - } - - output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); - output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle); - output->cursor.hot_x = cpu_to_le32(hot_x); - output->cursor.hot_y = cpu_to_le32(hot_y); - virtio_gpu_cursor_ping(vgdev, output); - ret = 0; - -out: - drm_gem_object_unreference_unlocked(gobj); - return ret; -} - -static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc, - int x, int y) -{ - struct virtio_gpu_device *vgdev = crtc->dev->dev_private; - struct virtio_gpu_output *output = - container_of(crtc, struct virtio_gpu_output, crtc); - - output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); - output->cursor.pos.x = cpu_to_le32(x); - output->cursor.pos.y = cpu_to_le32(y); - virtio_gpu_cursor_ping(vgdev, output); - return 0; -} - -static int virtio_gpu_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t flags) -{ - struct virtio_gpu_device *vgdev = crtc->dev->dev_private; - struct virtio_gpu_output *output = - container_of(crtc, struct virtio_gpu_output, crtc); - struct drm_plane *plane = crtc->primary; - struct virtio_gpu_framebuffer *vgfb; - struct virtio_gpu_object *bo; - unsigned long irqflags; - uint32_t handle; - - plane->fb = fb; - vgfb = to_virtio_gpu_framebuffer(plane->fb); - bo = gem_to_virtio_gpu_obj(vgfb->obj); - handle = bo->hw_res_handle; - - DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle, - bo->dumb ? ", dumb" : "", - crtc->mode.hdisplay, crtc->mode.vdisplay); - if (bo->dumb) { - virtio_gpu_cmd_transfer_to_host_2d - (vgdev, handle, 0, - cpu_to_le32(crtc->mode.hdisplay), - cpu_to_le32(crtc->mode.vdisplay), - 0, 0, NULL); - } - virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, - crtc->mode.hdisplay, - crtc->mode.vdisplay, 0, 0); - virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0, - crtc->mode.hdisplay, - crtc->mode.vdisplay); - - if (event) { - spin_lock_irqsave(&crtc->dev->event_lock, irqflags); - drm_send_vblank_event(crtc->dev, -1, event); - spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags); - } - - return 0; -} - static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { - .cursor_set2 = virtio_gpu_crtc_cursor_set, - .cursor_move = virtio_gpu_crtc_cursor_move, .set_config = drm_atomic_helper_set_config, .destroy = drm_crtc_cleanup, - .page_flip = virtio_gpu_page_flip, + .page_flip = drm_atomic_helper_page_flip, .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, @@ -267,6 +140,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock_irqsave(&crtc->dev->event_lock, flags); if (crtc->state->event) drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } @@ -341,15 +215,6 @@ static int virtio_gpu_conn_mode_valid(struct drm_connector *connector, return MODE_BAD; } -static struct drm_encoder* -virtio_gpu_best_encoder(struct drm_connector *connector) -{ - struct virtio_gpu_output *virtio_gpu_output = - drm_connector_to_virtio_gpu_output(connector); - - return &virtio_gpu_output->enc; -} - static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { .mode_set = virtio_gpu_enc_mode_set, .enable = virtio_gpu_enc_enable, @@ -359,7 +224,6 @@ static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = { .get_modes = virtio_gpu_conn_get_modes, .mode_valid = virtio_gpu_conn_mode_valid, - .best_encoder = virtio_gpu_best_encoder, }; static enum drm_connector_status virtio_gpu_conn_detect( @@ -406,7 +270,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) struct drm_connector *connector = &output->conn; struct drm_encoder *encoder = &output->enc; struct drm_crtc *crtc = &output->crtc; - struct drm_plane *plane; + struct drm_plane *primary, *cursor; output->index = index; if (index == 0) { @@ -415,13 +279,17 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) output->info.r.height = cpu_to_le32(YRES_DEF); } - plane = virtio_gpu_plane_init(vgdev, index); - if (IS_ERR(plane)) - return PTR_ERR(plane); - drm_crtc_init_with_planes(dev, crtc, plane, NULL, + primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index); + if (IS_ERR(primary)) + return PTR_ERR(primary); + cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index); + if (IS_ERR(cursor)) + return PTR_ERR(cursor); + drm_crtc_init_with_planes(dev, crtc, primary, cursor, &virtio_gpu_crtc_funcs, NULL); drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); - plane->crtc = crtc; + primary->crtc = crtc; + cursor->crtc = crtc; drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); @@ -466,6 +334,24 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev, return &virtio_gpu_fb->base; } +static void vgdev_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_planes(dev, state, true); + + drm_atomic_helper_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); +} + +struct drm_mode_config_helper_funcs virtio_mode_config_helpers = { + .atomic_commit_tail = vgdev_atomic_commit_tail, +}; + static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = { .fb_create = virtio_gpu_user_framebuffer_create, .atomic_check = drm_atomic_helper_check, @@ -477,7 +363,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) int i; drm_mode_config_init(vgdev->ddev); - vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs; + vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs; + vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers; /* modes will be validated against the framebuffer size */ vgdev->ddev->mode_config.min_width = XRES_MIN; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 3cc7afa77a35..5820b7020ae5 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -143,7 +143,7 @@ static struct drm_driver driver = { .gem_prime_vunmap = virtgpu_gem_prime_vunmap, .gem_prime_mmap = virtgpu_gem_prime_mmap, - .gem_free_object = virtio_gpu_gem_free_object, + .gem_free_object_unlocked = virtio_gpu_gem_free_object, .gem_open_object = virtio_gpu_gem_object_open, .gem_close_object = virtio_gpu_gem_object_close, .fops = &virtio_gpu_driver_fops, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 0a54f43f846a..acf556a35cb2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -33,6 +33,7 @@ #include <drm/drmP.h> #include <drm/drm_gem.h> +#include <drm/drm_atomic.h> #include <drm/drm_crtc_helper.h> #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> @@ -335,6 +336,7 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); /* virtio_gpu_plane.c */ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, + enum drm_plane_type type, int index); /* virtio_gpu_ttm.c */ diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 70b44a2345ab..925ca25209df 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -38,6 +38,10 @@ static const uint32_t virtio_gpu_formats[] = { DRM_FORMAT_ABGR8888, }; +static const uint32_t virtio_gpu_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + static void virtio_gpu_plane_destroy(struct drm_plane *plane) { kfree(plane); @@ -58,16 +62,22 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, return 0; } -static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) +static void virtio_gpu_primary_plane_update(struct drm_plane *plane, + struct drm_plane_state *old_state) { struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; - struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc); + struct virtio_gpu_output *output = NULL; struct virtio_gpu_framebuffer *vgfb; struct virtio_gpu_object *bo; uint32_t handle; + if (plane->state->crtc) + output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); + if (old_state->crtc) + output = drm_crtc_to_virtio_gpu_output(old_state->crtc); + WARN_ON(!output); + if (plane->state->fb) { vgfb = to_virtio_gpu_framebuffer(plane->state->fb); bo = gem_to_virtio_gpu_obj(vgfb->obj); @@ -75,55 +85,149 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, if (bo->dumb) { virtio_gpu_cmd_transfer_to_host_2d (vgdev, handle, 0, - cpu_to_le32(plane->state->crtc_w), - cpu_to_le32(plane->state->crtc_h), - plane->state->crtc_x, plane->state->crtc_y, NULL); + cpu_to_le32(plane->state->src_w >> 16), + cpu_to_le32(plane->state->src_h >> 16), + plane->state->src_x >> 16, + plane->state->src_y >> 16, NULL); } } else { handle = 0; } - DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle, + DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle, plane->state->crtc_w, plane->state->crtc_h, - plane->state->crtc_x, plane->state->crtc_y); + plane->state->crtc_x, plane->state->crtc_y, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, - plane->state->crtc_w, - plane->state->crtc_h, - plane->state->crtc_x, - plane->state->crtc_y); + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); virtio_gpu_cmd_resource_flush(vgdev, handle, - plane->state->crtc_x, - plane->state->crtc_y, - plane->state->crtc_w, - plane->state->crtc_h); + plane->state->src_x >> 16, + plane->state->src_y >> 16, + plane->state->src_w >> 16, + plane->state->src_h >> 16); } +static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_output *output = NULL; + struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_fence *fence = NULL; + struct virtio_gpu_object *bo = NULL; + uint32_t handle; + int ret = 0; -static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = { + if (plane->state->crtc) + output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); + if (old_state->crtc) + output = drm_crtc_to_virtio_gpu_output(old_state->crtc); + WARN_ON(!output); + + if (plane->state->fb) { + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + bo = gem_to_virtio_gpu_obj(vgfb->obj); + handle = bo->hw_res_handle; + } else { + handle = 0; + } + + if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { + /* new cursor -- update & wait */ + virtio_gpu_cmd_transfer_to_host_2d + (vgdev, handle, 0, + cpu_to_le32(plane->state->crtc_w), + cpu_to_le32(plane->state->crtc_h), + 0, 0, &fence); + ret = virtio_gpu_object_reserve(bo, false); + if (!ret) { + reservation_object_add_excl_fence(bo->tbo.resv, + &fence->f); + fence_put(&fence->f); + fence = NULL; + virtio_gpu_object_unreserve(bo); + virtio_gpu_object_wait(bo, false); + } + } + + if (plane->state->fb != old_state->fb) { + DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, + plane->state->crtc_x, + plane->state->crtc_y, + plane->state->fb ? plane->state->fb->hot_x : 0, + plane->state->fb ? plane->state->fb->hot_y : 0); + output->cursor.hdr.type = + cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); + output->cursor.resource_id = cpu_to_le32(handle); + if (plane->state->fb) { + output->cursor.hot_x = + cpu_to_le32(plane->state->fb->hot_x); + output->cursor.hot_y = + cpu_to_le32(plane->state->fb->hot_y); + } else { + output->cursor.hot_x = cpu_to_le32(0); + output->cursor.hot_y = cpu_to_le32(0); + } + } else { + DRM_DEBUG("move +%d+%d\n", + plane->state->crtc_x, + plane->state->crtc_y); + output->cursor.hdr.type = + cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); + } + output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); + output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); + virtio_gpu_cursor_ping(vgdev, output); +} + +static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { + .atomic_check = virtio_gpu_plane_atomic_check, + .atomic_update = virtio_gpu_primary_plane_update, +}; + +static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { .atomic_check = virtio_gpu_plane_atomic_check, - .atomic_update = virtio_gpu_plane_atomic_update, + .atomic_update = virtio_gpu_cursor_plane_update, }; struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, + enum drm_plane_type type, int index) { struct drm_device *dev = vgdev->ddev; + const struct drm_plane_helper_funcs *funcs; struct drm_plane *plane; - int ret; + const uint32_t *formats; + int ret, nformats; plane = kzalloc(sizeof(*plane), GFP_KERNEL); if (!plane) return ERR_PTR(-ENOMEM); + if (type == DRM_PLANE_TYPE_CURSOR) { + formats = virtio_gpu_cursor_formats; + nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); + funcs = &virtio_gpu_cursor_helper_funcs; + } else { + formats = virtio_gpu_formats; + nformats = ARRAY_SIZE(virtio_gpu_formats); + funcs = &virtio_gpu_primary_helper_funcs; + } ret = drm_universal_plane_init(dev, plane, 1 << index, &virtio_gpu_plane_funcs, - virtio_gpu_formats, - ARRAY_SIZE(virtio_gpu_formats), - DRM_PLANE_TYPE_PRIMARY, NULL); + formats, nformats, + type, NULL); if (ret) goto err_plane_init; - drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs); + drm_plane_helper_add(plane, funcs); return plane; err_plane_init: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index e959df6ede83..26ac8e80a478 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -46,7 +46,7 @@ struct vmw_fence_manager { bool goal_irq_on; /* Protected by @goal_irq_mutex */ bool seqno_valid; /* Protected by @lock, and may not be set to true without the @goal_irq_mutex held. */ - unsigned ctx; + u64 ctx; }; struct vmw_user_fence { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 55231cce73a0..8a69d4da40b5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1404,9 +1404,9 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, return 0; } -void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, - u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size) +int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, + u16 *r, u16 *g, u16 *b, + uint32_t size) { struct vmw_private *dev_priv = vmw_priv(crtc->dev); int i; @@ -1418,6 +1418,8 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); } + + return 0; } int vmw_du_connector_dpms(struct drm_connector *connector, int mode) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 57203212c501..ff4803c107bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -195,9 +195,9 @@ struct vmw_display_unit { void vmw_du_cleanup(struct vmw_display_unit *du); void vmw_du_crtc_save(struct drm_crtc *crtc); void vmw_du_crtc_restore(struct drm_crtc *crtc); -void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, +int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size); + uint32_t size); int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y); diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index cbd7c986d926..2df216b39cc5 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -30,6 +30,7 @@ #define pr_fmt(fmt) "vga_switcheroo: " fmt +#include <linux/apple-gmux.h> #include <linux/console.h> #include <linux/debugfs.h> #include <linux/fb.h> @@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev, * * Register vga client (GPU). Enable vga_switcheroo if another GPU and a * handler have already registered. The power state of the client is assumed - * to be ON. + * to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called + * to ensure that all prerequisites are met. * * Return: 0 on success, -ENOMEM on memory allocation error. */ @@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client); * @id: client identifier * * Register audio client (audio device on a GPU). The power state of the - * client is assumed to be ON. + * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer() + * shall be called to ensure that all prerequisites are met. * * Return: 0 on success, -ENOMEM on memory allocation error. */ @@ -376,6 +379,33 @@ find_active_client(struct list_head *head) } /** + * vga_switcheroo_client_probe_defer() - whether to defer probing a given client + * @pdev: client pci device + * + * Determine whether any prerequisites are not fulfilled to probe a given + * client. Drivers shall invoke this early on in their ->probe callback + * and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not + * register the client ere thou hast called this. + * + * Return: %true if probing should be deferred, otherwise %false. + */ +bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) +{ + if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { + /* + * apple-gmux is needed on pre-retina MacBook Pro + * to probe the panel if pdev is the inactive GPU. + */ + if (apple_gmux_present() && pdev != vga_default_device() && + !vgasr_priv.handler_flags) + return true; + } + + return false; +} +EXPORT_SYMBOL(vga_switcheroo_client_probe_defer); + +/** * vga_switcheroo_get_client_state() - obtain power state of a given client * @pdev: client pci device * diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 6bd881be24ea..5eb1f9e17a98 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -41,6 +41,7 @@ #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) @@ -82,6 +83,7 @@ struct its_node { u64 flags; u32 ite_size; u32 device_ids; + int numa_node; }; #define ITS_ITT_ALIGN SZ_256 @@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d) static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; struct its_device *its_dev = irq_data_get_irq_chip_data(d); struct its_collection *target_col; u32 id = its_get_event_id(d); + /* lpi cannot be routed to a redistributor that is on a foreign node */ + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + if (its_dev->its->numa_node >= 0) { + cpu_mask = cpumask_of_node(its_dev->its->numa_node); + if (!cpumask_intersects(mask_val, cpu_mask)) + return -EINVAL; + } + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + if (cpu >= nr_cpu_ids) return -EINVAL; @@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void) list_for_each_entry(its, &its_nodes, entry) { u64 target; + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + continue; + } + /* * We now have to bind each collection to its target * redistributor. @@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain, { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + + /* get the cpu_mask of local node */ + if (its_dev->its->numa_node >= 0) + cpu_mask = cpumask_of_node(its_dev->its->numa_node); /* Bind the LPI to the first possible CPU */ - its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); + its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); /* Map the GIC IRQ and event to the device */ its_send_mapvi(its_dev, d->hwirq, event); @@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data) its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; } +static void __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; +} + static const struct gic_quirk its_quirks[] = { #ifdef CONFIG_CAVIUM_ERRATUM_22375 { @@ -1452,6 +1488,14 @@ static const struct gic_quirk its_quirks[] = { .init = its_enable_quirk_cavium_22375, }, #endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif { } }; @@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node, its->base = its_base; its->phys_base = res.start; its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; + its->numa_node = of_node_to_nid(node); its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); if (!its->cmd_base) { diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index fb042ba9a3db..2c5ba0e704bf 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable) while (count--) { val = readl_relaxed(rbase + GICR_WAKER); - if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) break; cpu_relax(); udelay(1); diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c index e7155db01d55..73addb4b625b 100644 --- a/drivers/irqchip/irq-pic32-evic.c +++ b/drivers/irqchip/irq-pic32-evic.c @@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data, /* set polarity for external interrupts only */ for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { if (priv->ext_irqs[i] == data->hwirq) { - ret = pic32_set_ext_polarity(i + 1, flow_type); + ret = pic32_set_ext_polarity(i, flow_type); if (ret) return ret; } diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index 70c28d19ea04..22cf60991df6 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c @@ -45,7 +45,7 @@ #include <media/v4l2-ioctl.h> #include <video/omapvrfb.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "omap_voutlib.h" #include "omap_voutdef.h" diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h index 9ccfe1f475a4..94b5d65afb19 100644 --- a/drivers/media/platform/omap/omap_voutdef.h +++ b/drivers/media/platform/omap/omap_voutdef.h @@ -11,7 +11,7 @@ #ifndef OMAP_VOUTDEF_H #define OMAP_VOUTDEF_H -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #define YUYV_BPP 2 diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c index 80b0d88f125c..58a25fdf0cce 100644 --- a/drivers/media/platform/omap/omap_voutlib.c +++ b/drivers/media/platform/omap/omap_voutlib.c @@ -26,7 +26,7 @@ #include <linux/dma-mapping.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "omap_voutlib.h" diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c984321d1881..5d438ad3ee32 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card) * switch to HS200 mode if bus width is set successfully. */ err = mmc_select_bus_width(card); - if (!err) { + if (err >= 0) { val = EXT_CSD_TIMING_HS200 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, @@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } else if (mmc_card_hs(card)) { /* Select the desired bus width optionally */ err = mmc_select_bus_width(card); - if (!err) { + if (err >= 0) { err = mmc_select_hs_ddr(card); if (err) goto free_card; diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 7fc8b7aa83f0..2ee4c21ec55e 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, - [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, - [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 }, + [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 }, }; static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, @@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev) MMC_CAP_1_8V_DDR | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; - /* TODO MMC DDR is not working on A80 */ - if (of_device_is_compatible(pdev->dev.of_node, - "allwinner,sun9i-a80-mmc")) - mmc->caps &= ~MMC_CAP_1_8V_DDR; - ret = mmc_of_parse(mmc); if (ret) goto error_free_dma; diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 16419f550eff..058460bdd5a6 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) priv->bus = bus; bus->priv = priv; bus->parent = priv->dev; - bus->name = "Synopsys MII Bus", + bus->name = "Synopsys MII Bus"; bus->read = &arc_mdio_read; bus->write = &arc_mdio_write; bus->reset = &arc_mdio_reset; diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index 8fc93c5f6abc..d02c4240b7df 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h @@ -96,6 +96,10 @@ struct alx_priv { unsigned int rx_ringsz; unsigned int rxbuf_size; + struct page *rx_page; + unsigned int rx_page_offset; + unsigned int rx_frag_size; + struct napi_struct napi; struct alx_tx_queue txq; struct alx_rx_queue rxq; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 9fe8b5e310d1..c98acdc0d14f 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry) } } +static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp) +{ + struct sk_buff *skb; + struct page *page; + + if (alx->rx_frag_size > PAGE_SIZE) + return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); + + page = alx->rx_page; + if (!page) { + alx->rx_page = page = alloc_page(gfp); + if (unlikely(!page)) + return NULL; + alx->rx_page_offset = 0; + } + + skb = build_skb(page_address(page) + alx->rx_page_offset, + alx->rx_frag_size); + if (likely(skb)) { + alx->rx_page_offset += alx->rx_frag_size; + if (alx->rx_page_offset >= PAGE_SIZE) + alx->rx_page = NULL; + else + get_page(page); + } + return skb; +} + + static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) { struct alx_rx_queue *rxq = &alx->rxq; @@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) while (!cur_buf->skb && next != rxq->read_idx) { struct alx_rfd *rfd = &rxq->rfd[cur]; - skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); + skb = alx_alloc_skb(alx, gfp); if (!skb) break; dma = dma_map_single(&alx->hw.pdev->dev, @@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); } + return count; } @@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx) kfree(alx->txq.bufs); kfree(alx->rxq.bufs); + if (alx->rx_page) { + put_page(alx->rx_page); + alx->rx_page = NULL; + } + dma_free_coherent(&alx->hw.pdev->dev, alx->descmem.size, alx->descmem.virt, @@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx) alx->dev->name, alx); if (!err) goto out; + /* fall back to legacy interrupt */ pci_disable_msi(alx->hw.pdev); } @@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx) struct pci_dev *pdev = alx->hw.pdev; struct alx_hw *hw = &alx->hw; int err; + unsigned int head_size; err = alx_identify_hw(alx); if (err) { @@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx) hw->smb_timer = 400; hw->mtu = alx->dev->mtu; + alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); + head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + alx->rx_frag_size = roundup_pow_of_two(head_size); + alx->tx_ringsz = 256; alx->rx_ringsz = 512; hw->imt = 200; @@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) { struct alx_priv *alx = netdev_priv(netdev); int max_frame = ALX_MAX_FRAME_LEN(mtu); + unsigned int head_size; if ((max_frame < ALX_MIN_FRAME_SIZE) || (max_frame > ALX_MAX_FRAME_SIZE)) @@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) netdev->mtu = mtu; alx->hw.mtu = mtu; alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); + head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + alx->rx_frag_size = roundup_pow_of_two(head_size); netdev_update_features(netdev); if (netif_running(netdev)) alx_reinit(alx); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 0a5b770cefaa..c5fe915870ad 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13941,14 +13941,14 @@ static int bnx2x_init_one(struct pci_dev *pdev, bp->doorbells = bnx2x_vf_doorbells(bp); rc = bnx2x_vf_pci_alloc(bp); if (rc) - goto init_one_exit; + goto init_one_freemem; } else { doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); if (doorbell_size > pci_resource_len(pdev, 2)) { dev_err(&bp->pdev->dev, "Cannot map doorbells, bar size too small, aborting\n"); rc = -ENOMEM; - goto init_one_exit; + goto init_one_freemem; } bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), doorbell_size); @@ -13957,19 +13957,19 @@ static int bnx2x_init_one(struct pci_dev *pdev, dev_err(&bp->pdev->dev, "Cannot map doorbell space, aborting\n"); rc = -ENOMEM; - goto init_one_exit; + goto init_one_freemem; } if (IS_VF(bp)) { rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); if (rc) - goto init_one_exit; + goto init_one_freemem; } /* Enable SRIOV if capability found in configuration space */ rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); if (rc) - goto init_one_exit; + goto init_one_freemem; /* calc qm_cid_count */ bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); @@ -13988,7 +13988,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, rc = bnx2x_set_int_mode(bp); if (rc) { dev_err(&pdev->dev, "Cannot set interrupts\n"); - goto init_one_exit; + goto init_one_freemem; } BNX2X_DEV_INFO("set interrupts successfully\n"); @@ -13996,7 +13996,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, rc = register_netdev(dev); if (rc) { dev_err(&pdev->dev, "Cannot register net device\n"); - goto init_one_exit; + goto init_one_freemem; } BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); @@ -14029,6 +14029,9 @@ static int bnx2x_init_one(struct pci_dev *pdev, return 0; +init_one_freemem: + bnx2x_free_mem_bp(bp); + init_one_exit: bnx2x_disable_pcie_error_reporting(bp); diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 085f9125cf42..06f031715b57 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) * re-adding ourselves to the poll list. */ - if (priv->tx_skb && !tx_ctrl_ct) + if (priv->tx_skb && !tx_ctrl_ct) { + nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); napi_reschedule(napi); + } } return work_done; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ca2cccc594fd..3c0255e98535 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); bdp->cbd_bufaddr = cpu_to_fec32(0); - if (!skb) { - bdp = fec_enet_get_nextdesc(bdp, &txq->bd); - continue; - } + if (!skb) + goto skb_done; /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); - +skb_done: /* Make sure the update to bdp and tx_skbuff are performed * before dirty_tx */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3d746c887873..67a648c7d3a9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev) u32 link_stat = priv->link; struct hnae_handle *h; - assert(priv && priv->ae_handle); h = priv->ae_handle; if (priv->phy) { @@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, { struct hns_nic_priv *priv = netdev_priv(net_dev); - assert(priv); - strncpy(drvinfo->version, HNAE_DRIVER_VERSION, sizeof(drvinfo->version)); drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; @@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev, struct hnae_handle *h; struct hnae_ae_ops *ops; - assert(priv || priv->ae_handle); - h = priv->ae_handle; ops = h->dev->ops; @@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev, struct hnae_ae_ops *ops; int ret; - assert(priv || priv->ae_handle); - ops = priv->ae_handle->dev->ops; if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) @@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; - assert(priv || priv->ae_handle); - ops = priv->ae_handle->dev->ops; cmd->version = HNS_CHIP_VERSION; @@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev) struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; - assert(priv || priv->ae_handle); - ops = priv->ae_handle->dev->ops; if (!ops->get_regs_len) { netdev_err(net_dev, "ops->get_regs_len is null!\n"); diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 01fccec632ec..466939f8f0cf 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); hwbm_pool->construct = mvneta_bm_construct; hwbm_pool->priv = new_pool; + spin_lock_init(&hwbm_pool->lock); /* Create new pool */ err = mvneta_bm_pool_create(priv, new_pool); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c761194bb323..fc95affaf76b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - data[index++] = ((unsigned long *)&priv->stats)[i]; + data[index++] = ((unsigned long *)&dev->stats)[i]; for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 92e0624f4cf0..19ceced6736c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev) } -static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) +static struct rtnl_link_stats64 * +mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx4_en_priv *priv = netdev_priv(dev); spin_lock_bh(&priv->stats_lock); - memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); + netdev_stats_to_stats64(stats, &dev->stats); spin_unlock_bh(&priv->stats_lock); - return &priv->ret_stats; + return stats; } static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) @@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev) if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) en_dbg(HW, priv, "Failed dumping statistics\n"); - memset(&priv->stats, 0, sizeof(priv->stats)); memset(&priv->pstats, 0, sizeof(priv->pstats)); memset(&priv->pkstats, 0, sizeof(priv->pkstats)); memset(&priv->port_stats, 0, sizeof(priv->port_stats)); @@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev) priv->tx_ring[i]->bytes = 0; priv->tx_ring[i]->packets = 0; priv->tx_ring[i]->tx_csum = 0; + priv->tx_ring[i]->tx_dropped = 0; + priv->tx_ring[i]->queue_stopped = 0; + priv->tx_ring[i]->wake_queue = 0; + priv->tx_ring[i]->tso_packets = 0; + priv->tx_ring[i]->xmit_more = 0; } for (i = 0; i < priv->rx_ring_num; i++) { priv->rx_ring[i]->bytes = 0; @@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_stop = mlx4_en_close, .ndo_start_xmit = mlx4_en_xmit, .ndo_select_queue = mlx4_en_select_queue, - .ndo_get_stats = mlx4_en_get_stats, + .ndo_get_stats64 = mlx4_en_get_stats64, .ndo_set_rx_mode = mlx4_en_set_rx_mode, .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, @@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_stop = mlx4_en_close, .ndo_start_xmit = mlx4_en_xmit, .ndo_select_queue = mlx4_en_select_queue, - .ndo_get_stats = mlx4_en_get_stats, + .ndo_get_stats64 = mlx4_en_get_stats64, .ndo_set_rx_mode = mlx4_en_set_rx_mode, .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 20b6c2e678b8..5aa8b751f417 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) struct mlx4_counter tmp_counter_stats; struct mlx4_en_stat_out_mbox *mlx4_en_stats; struct mlx4_en_stat_out_flow_control_mbox *flowstats; - struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); - struct net_device_stats *stats = &priv->stats; + struct net_device *dev = mdev->pndev[port]; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; struct mlx4_cmd_mailbox *mailbox; u64 in_mod = reset << 8 | port; int err; @@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) } stats->tx_packets = 0; stats->tx_bytes = 0; + stats->tx_dropped = 0; priv->port_stats.tx_chksum_offload = 0; priv->port_stats.queue_stopped = 0; priv->port_stats.wake_queue = 0; @@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->tx_packets += ring->packets; stats->tx_bytes += ring->bytes; + stats->tx_dropped += ring->tx_dropped; priv->port_stats.tx_chksum_offload += ring->tx_csum; priv->port_stats.queue_stopped += ring->queue_stopped; priv->port_stats.wake_queue += ring->wake_queue; @@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, &mlx4_en_stats->MCAST_prio_1, NUM_PRIORITIES); - stats->collisions = 0; stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + sw_rx_dropped; stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); - stats->rx_over_errors = 0; stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); - stats->rx_frame_errors = 0; stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); - stats->rx_missed_errors = 0; - stats->tx_aborted_errors = 0; - stats->tx_carrier_errors = 0; - stats->tx_fifo_errors = 0; - stats->tx_heartbeat_errors = 0; - stats->tx_window_errors = 0; - stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP); + stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP); /* RX stats */ priv->pkstats.rx_multicast_packets = stats->multicast; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f6e61570cb2c..76aa4d27183c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) bool inline_ok; u32 ring_cons; - if (!priv->port_up) - goto tx_drop; - tx_ind = skb_get_queue_mapping(skb); ring = priv->tx_ring[tx_ind]; + if (!priv->port_up) + goto tx_drop; + /* fetch ring->cons far ahead before needing it to avoid stall */ ring_cons = ACCESS_ONCE(ring->cons); @@ -1030,7 +1030,7 @@ tx_drop_unmap: tx_drop: dev_kfree_skb_any(skb); - priv->stats.tx_dropped++; + ring->tx_dropped++; return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index cc84e09f324a..467d47ed2c39 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -270,6 +270,7 @@ struct mlx4_en_tx_ring { unsigned long tx_csum; unsigned long tso_packets; unsigned long xmit_more; + unsigned int tx_dropped; struct mlx4_bf bf; unsigned long queue_stopped; @@ -482,8 +483,6 @@ struct mlx4_en_priv { struct mlx4_en_port_profile *prof; struct net_device *dev; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - struct net_device_stats stats; - struct net_device_stats ret_stats; struct mlx4_en_port_state port_state; spinlock_t stats_lock; struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index b3cc3ab63799..6fc156a3918d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) goto free_uar; } - uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); + uar->bf_map = io_mapping_map_wc(priv->bf_mapping, + uar->index << PAGE_SHIFT, + PAGE_SIZE); if (!uar->bf_map) { err = -ENOMEM; goto unamp_uar; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index cbf58e1f9333..21ec1c2df2c7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl, int count, bool dcbx_enabled) { - u8 tc, priority, priority_map; + u8 tc, priority_map; enum dcbx_protocol_type type; u16 protocol_id; + int priority; bool enable; int i; @@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, * indication, but we only got here if there was an * app tlv for the protocol, so dcbx must be enabled. */ - enable = !!(type == DCBX_PROTOCOL_ETH); + enable = !(type == DCBX_PROTOCOL_ETH); qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, priority, tc, type); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 089016f46f26..2d89e8c16b32 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev) } } -static int qed_init_qm_info(struct qed_hwfn *p_hwfn) +static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) { u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct init_qm_port_params *p_qm_port; u16 num_pqs, multi_cos_tcs = 1; + u8 pf_wfq = qm_info->pf_wfq; + u32 pf_rl = qm_info->pf_rl; u16 num_vfs = 0; #ifdef CONFIG_QED_SRIOV @@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. */ - qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * - num_pqs, GFP_KERNEL); + qm_info->qm_pq_params = kcalloc(num_pqs, + sizeof(struct init_qm_pq_params), + b_sleepable ? GFP_KERNEL : GFP_ATOMIC); if (!qm_info->qm_pq_params) goto alloc_err; - qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * - num_vports, GFP_KERNEL); + qm_info->qm_vport_params = kcalloc(num_vports, + sizeof(struct init_qm_vport_params), + b_sleepable ? GFP_KERNEL + : GFP_ATOMIC); if (!qm_info->qm_vport_params) goto alloc_err; - qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * - MAX_NUM_PORTS, GFP_KERNEL); + qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS, + sizeof(struct init_qm_port_params), + b_sleepable ? GFP_KERNEL + : GFP_ATOMIC); if (!qm_info->qm_port_params) goto alloc_err; - qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data), - GFP_KERNEL); + qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data), + b_sleepable ? GFP_KERNEL : GFP_ATOMIC); if (!qm_info->wfq_data) goto alloc_err; @@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) for (i = 0; i < qm_info->num_vports; i++) qm_info->qm_vport_params[i].vport_wfq = 1; - qm_info->pf_wfq = 0; - qm_info->pf_rl = 0; qm_info->vport_rl_en = 1; qm_info->vport_wfq_en = 1; + qm_info->pf_rl = pf_rl; + qm_info->pf_wfq = pf_wfq; return 0; @@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_qm_info_free(p_hwfn); /* initialize qed's qm data structure */ - rc = qed_init_qm_info(p_hwfn); + rc = qed_init_qm_info(p_hwfn, false); if (rc) return rc; @@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_err; /* Prepare and process QM requirements */ - rc = qed_init_qm_info(p_hwfn); + rc = qed_init_qm_info(p_hwfn, true); if (rc) goto alloc_err; @@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) hw_mode |= 1 << MODE_ASIC; + if (p_hwfn->cdev->num_hwfns > 1) + hw_mode |= 1 << MODE_100G; + p_hwfn->hw_info.hw_mode = hw_mode; + + DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), + "Configuring function for hw_mode: 0x%08x\n", + p_hwfn->hw_info.hw_mode); } /* Init run time data for all PFs on an engine. */ @@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev, u32 load_code, param; int rc, mfw_rc, i; + if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { + DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); + return -EINVAL; + } + if (IS_PF(cdev)) { rc = qed_init_fw_data(cdev, bin_fw_data); if (rc != 0) @@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) { int i; + if (cdev->num_hwfns > 1) { + DP_VERBOSE(cdev, + NETIF_MSG_LINK, + "WFQ configuration is not supported for this device\n"); + return; + } + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 8b22f87033ce..753064679bde 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) /* Fallthrough */ case QED_INT_MODE_MSI: - rc = pci_enable_msi(cdev->pdev); - if (!rc) { - int_params->out.int_mode = QED_INT_MODE_MSI; - goto out; - } + if (cdev->num_hwfns == 1) { + rc = pci_enable_msi(cdev->pdev); + if (!rc) { + int_params->out.int_mode = QED_INT_MODE_MSI; + goto out; + } - DP_NOTICE(cdev, "Failed to enable MSI\n"); - if (force_mode) - goto out; + DP_NOTICE(cdev, "Failed to enable MSI\n"); + if (force_mode) + goto out; + } /* Fallthrough */ case QED_INT_MODE_INTA: diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 1bc75358cbc4..ad3cae3b7243 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) case ETH_SS_PRIV_FLAGS: return QEDE_PRI_FLAG_LEN; case ETH_SS_TEST: - return QEDE_ETHTOOL_TEST_MAX; + if (!IS_VF(edev)) + return QEDE_ETHTOOL_TEST_MAX; + else + return 0; default: DP_VERBOSE(edev, QED_MSG_DEBUG, "Unsupported stringset 0x%08x\n", stringset); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 337e839ca586..5d00d1404bfc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1824,7 +1824,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx, { struct qede_dev *edev = netdev_priv(dev); - return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate, + return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate, max_tx_rate); } @@ -2091,6 +2091,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) edev->accept_any_vlan = false; } +int qede_set_features(struct net_device *dev, netdev_features_t features) +{ + struct qede_dev *edev = netdev_priv(dev); + netdev_features_t changes = features ^ dev->features; + bool need_reload = false; + + /* No action needed if hardware GRO is disabled during driver load */ + if (changes & NETIF_F_GRO) { + if (dev->features & NETIF_F_GRO) + need_reload = !edev->gro_disable; + else + need_reload = edev->gro_disable; + } + + if (need_reload && netif_running(edev->ndev)) { + dev->features = features; + qede_reload(edev, NULL, NULL); + return 1; + } + + return 0; +} + #ifdef CONFIG_QEDE_VXLAN static void qede_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, __be16 port) @@ -2175,6 +2198,7 @@ static const struct net_device_ops qede_netdev_ops = { #endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_set_features = qede_set_features, .ndo_get_stats64 = qede_get_stats64, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_link_state = qede_set_vf_link_state, diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 83d72106471c..fd5d1c93b55b 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev) } /* Disabling the timer */ - del_timer_sync(&qdev->timer); ql_cancel_all_work_sync(qdev); for (i = 0; i < qdev->rss_ring_count; i++) @@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_CAN_RECOVER; case pci_channel_io_frozen: netif_device_detach(ndev); + del_timer_sync(&qdev->timer); if (netif_running(ndev)) ql_eeh_close(ndev); pci_disable_device(pdev); @@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, case pci_channel_io_perm_failure: dev_err(&pdev->dev, "%s: pci_channel_io_perm_failure.\n", __func__); + del_timer_sync(&qdev->timer); ql_eeh_close(ndev); set_bit(QL_EEH_FATAL, &qdev->flags); return PCI_ERS_RESULT_DISCONNECT; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 1681084cc96f..1f309127457d 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -619,6 +619,17 @@ fail: return rc; } +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) +{ + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + + /* All our existing PIO buffers went away */ + efx_for_each_channel(channel, efx) + efx_for_each_channel_tx_queue(tx_queue, channel) + tx_queue->piobuf = NULL; +} + #else /* !EFX_USE_PIO */ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) @@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx) { } +static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) +{ +} + #endif /* EFX_USE_PIO */ static void efx_ef10_remove(struct efx_nic *efx) @@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) nic_data->must_realloc_vis = true; nic_data->must_restore_filters = true; nic_data->must_restore_piobufs = true; + efx_ef10_forget_old_piobufs(efx); nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; /* Driver-created vswitches and vports must be re-created */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 0705ec869487..097f363f1630 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx) #ifdef CONFIG_RFS_ACCEL if (efx->type->offload_features & NETIF_F_NTUPLE) { - efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, - sizeof(*efx->rps_flow_id), - GFP_KERNEL); - if (!efx->rps_flow_id) { + struct efx_channel *channel; + int i, success = 1; + + efx_for_each_channel(channel, efx) { + channel->rps_flow_id = + kcalloc(efx->type->max_rx_ip_filters, + sizeof(*channel->rps_flow_id), + GFP_KERNEL); + if (!channel->rps_flow_id) + success = 0; + else + for (i = 0; + i < efx->type->max_rx_ip_filters; + ++i) + channel->rps_flow_id[i] = + RPS_FLOW_ID_INVALID; + } + + if (!success) { + efx_for_each_channel(channel, efx) + kfree(channel->rps_flow_id); efx->type->filter_table_remove(efx); rc = -ENOMEM; goto out_unlock; } + + efx->rps_expire_index = efx->rps_expire_channel = 0; } #endif out_unlock: @@ -1744,7 +1763,10 @@ out_unlock: static void efx_remove_filters(struct efx_nic *efx) { #ifdef CONFIG_RFS_ACCEL - kfree(efx->rps_flow_id); + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + kfree(channel->rps_flow_id); #endif down_write(&efx->filter_sem); efx->type->filter_table_remove(efx); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 38c422321cda..d13ddf9703ff 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -403,6 +403,8 @@ enum efx_sync_events_state { * @event_test_cpu: Last CPU to handle interrupt or test event for this channel * @irq_count: Number of IRQs since last adaptive moderation decision * @irq_mod_score: IRQ moderation score + * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, + * indexed by filter ID * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors @@ -446,6 +448,8 @@ struct efx_channel { unsigned int irq_mod_score; #ifdef CONFIG_RFS_ACCEL unsigned int rfs_filters_added; +#define RPS_FLOW_ID_INVALID 0xFFFFFFFF + u32 *rps_flow_id; #endif unsigned n_rx_tobe_disc; @@ -889,9 +893,9 @@ struct vfdi_status; * @filter_sem: Filter table rw_semaphore, for freeing the table * @filter_lock: Filter table lock, for mere content changes * @filter_state: Architecture-dependent filter table state - * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, - * indexed by filter ID - * @rps_expire_index: Next index to check for expiry in @rps_flow_id + * @rps_expire_channel: Next channel to check for expiry + * @rps_expire_index: Next index to check for expiry in + * @rps_expire_channel's @rps_flow_id * @active_queues: Count of RX and TX queues that haven't been flushed and drained. * @rxq_flush_pending: Count of number of receive queues that need to be flushed. * Decremented when the efx_flush_rx_queue() is called. @@ -1035,7 +1039,7 @@ struct efx_nic { spinlock_t filter_lock; void *filter_state; #ifdef CONFIG_RFS_ACCEL - u32 *rps_flow_id; + unsigned int rps_expire_channel; unsigned int rps_expire_index; #endif diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8956995b2fe7..02b0b5272c14 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_filter_spec spec; - const __be16 *ports; - __be16 ether_type; - int nhoff; + struct flow_keys fk; int rc; - /* The core RPS/RFS code has already parsed and validated - * VLAN, IP and transport headers. We assume they are in the - * header area. - */ - - if (skb->protocol == htons(ETH_P_8021Q)) { - const struct vlan_hdr *vh = - (const struct vlan_hdr *)skb->data; + if (flow_id == RPS_FLOW_ID_INVALID) + return -EINVAL; - /* We can't filter on the IP 5-tuple and the vlan - * together, so just strip the vlan header and filter - * on the IP part. - */ - EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh)); - ether_type = vh->h_vlan_encapsulated_proto; - nhoff = sizeof(struct vlan_hdr); - } else { - ether_type = skb->protocol; - nhoff = 0; - } + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; - if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) + if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) return -EPROTONOSUPPORT; efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, @@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; - spec.ether_type = ether_type; - - if (ether_type == htons(ETH_P_IP)) { - const struct iphdr *ip = - (const struct iphdr *)(skb->data + nhoff); - - EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); - if (ip_is_fragment(ip)) - return -EPROTONOSUPPORT; - spec.ip_proto = ip->protocol; - spec.rem_host[0] = ip->saddr; - spec.loc_host[0] = ip->daddr; - EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); - ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); + spec.ether_type = fk.basic.n_proto; + spec.ip_proto = fk.basic.ip_proto; + + if (fk.basic.n_proto == htons(ETH_P_IP)) { + spec.rem_host[0] = fk.addrs.v4addrs.src; + spec.loc_host[0] = fk.addrs.v4addrs.dst; } else { - const struct ipv6hdr *ip6 = - (const struct ipv6hdr *)(skb->data + nhoff); - - EFX_BUG_ON_PARANOID(skb_headlen(skb) < - nhoff + sizeof(*ip6) + 4); - spec.ip_proto = ip6->nexthdr; - memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr)); - memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr)); - ports = (const __be16 *)(ip6 + 1); + memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr)); + memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr)); } - spec.rem_port = ports[0]; - spec.loc_port = ports[1]; + spec.rem_port = fk.ports.src; + spec.loc_port = fk.ports.dst; rc = efx->type->filter_rfs_insert(efx, &spec); if (rc < 0) return rc; /* Remember this so we can check whether to expire the filter later */ - efx->rps_flow_id[rc] = flow_id; - channel = efx_get_channel(efx, skb_get_rx_queue(skb)); + channel = efx_get_channel(efx, rxq_index); + channel->rps_flow_id[rc] = flow_id; ++channel->rfs_filters_added; - if (ether_type == htons(ETH_P_IP)) + if (spec.ether_type == htons(ETH_P_IP)) netif_info(efx, rx_status, efx->net_dev, "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", - spec.rem_host, ntohs(ports[0]), spec.loc_host, - ntohs(ports[1]), rxq_index, flow_id, rc); + spec.rem_host, ntohs(spec.rem_port), spec.loc_host, + ntohs(spec.loc_port), rxq_index, flow_id, rc); else netif_info(efx, rx_status, efx->net_dev, "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", - spec.rem_host, ntohs(ports[0]), spec.loc_host, - ntohs(ports[1]), rxq_index, flow_id, rc); + spec.rem_host, ntohs(spec.rem_port), spec.loc_host, + ntohs(spec.loc_port), rxq_index, flow_id, rc); return rc; } @@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) { bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); - unsigned int index, size; + unsigned int channel_idx, index, size; u32 flow_id; if (!spin_trylock_bh(&efx->filter_lock)) return false; expire_one = efx->type->filter_rfs_expire_one; + channel_idx = efx->rps_expire_channel; index = efx->rps_expire_index; size = efx->type->max_rx_ip_filters; while (quota--) { - flow_id = efx->rps_flow_id[index]; - if (expire_one(efx, flow_id, index)) + struct efx_channel *channel = efx_get_channel(efx, channel_idx); + flow_id = channel->rps_flow_id[index]; + + if (flow_id != RPS_FLOW_ID_INVALID && + expire_one(efx, flow_id, index)) { netif_info(efx, rx_status, efx->net_dev, - "expired filter %d [flow %u]\n", - index, flow_id); - if (++index == size) + "expired filter %d [queue %u flow %u]\n", + index, channel_idx, flow_id); + channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; + } + if (++index == size) { + if (++channel_idx == efx->n_channels) + channel_idx = 0; index = 0; + } } + efx->rps_expire_channel = channel_idx; efx->rps_expire_index = index; spin_unlock_bh(&efx->filter_lock); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 3f83c369f56c..ec295851812b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev) return -ENOMEM; if (mdio_bus_data->irqs) - memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq)); + memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq)); #ifdef CONFIG_OF if (priv->device->of_node) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a0f64cba86ba..2ace126533cd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -990,7 +990,7 @@ static void team_port_disable(struct team *team, #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_ALL_TSO) -static void __team_compute_features(struct team *team) +static void ___team_compute_features(struct team *team) { struct team_port *port; u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; @@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team) team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; +} +static void __team_compute_features(struct team *team) +{ + ___team_compute_features(team); netdev_change_features(team->dev); } static void team_compute_features(struct team *team) { mutex_lock(&team->lock); - __team_compute_features(team); + ___team_compute_features(team); mutex_unlock(&team->lock); + netdev_change_features(team->dev); } static int team_port_enter(struct team *team, struct team_port *port) diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 36cd7f016a8d..9bbe0161a2f4 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb) goto goon; } - if (!count || count < 4) + if (count < 4) goto goon; rx_status = buf[count - 2]; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index d9d2806a47b1..dc989a8b5afb 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -61,6 +61,8 @@ #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) +#define CARRIER_CHECK_DELAY (2 * HZ) + struct smsc95xx_priv { u32 mac_cr; u32 hash_hi; @@ -69,6 +71,9 @@ struct smsc95xx_priv { spinlock_t mac_cr_lock; u8 features; u8 suspend_flags; + bool link_ok; + struct delayed_work carrier_check; + struct usbnet *dev; }; static bool turbo_mode = true; @@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) intdata); } +static void set_carrier(struct usbnet *dev, bool link) +{ + struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); + + if (pdata->link_ok == link) + return; + + pdata->link_ok = link; + + if (link) + usbnet_link_change(dev, 1, 0); + else + usbnet_link_change(dev, 0, 0); +} + +static void check_carrier(struct work_struct *work) +{ + struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv, + carrier_check.work); + struct usbnet *dev = pdata->dev; + int ret; + + if (pdata->suspend_flags != 0) + return; + + ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR); + if (ret < 0) { + netdev_warn(dev->net, "Failed to read MII_BMSR\n"); + return; + } + if (ret & BMSR_LSTATUS) + set_carrier(dev, 1); + else + set_carrier(dev, 0); + + schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); +} + /* Enable or disable Tx & Rx checksum offload engines */ static int smsc95xx_set_features(struct net_device *netdev, netdev_features_t features) @@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + + pdata->dev = dev; + INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier); + schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); + return 0; } static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); + if (pdata) { + cancel_delayed_work(&pdata->carrier_check); netif_dbg(dev, ifdown, dev->net, "free pdata\n"); kfree(pdata); pdata = NULL; @@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf) /* do this first to ensure it's cleared even in error case */ pdata->suspend_flags = 0; + schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); if (suspend_flags & SUSPEND_ALLMODES) { /* clear wake-up sources */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 49d84e540343..e0638e556fe7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); - /* Last of all, set up some receive buffers. */ - for (i = 0; i < vi->curr_queue_pairs; i++) { - try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); - - /* If we didn't even get one input buffer, we're useless. */ - if (vi->rq[i].vq->num_free == - virtqueue_get_vring_size(vi->rq[i].vq)) { - free_unused_bufs(vi); - err = -ENOMEM; - goto free_recv_bufs; - } - } - vi->nb.notifier_call = &virtnet_cpu_callback; err = register_hotcpu_notifier(&vi->nb); if (err) { pr_debug("virtio_net: registering cpu notifier failed\n"); - goto free_recv_bufs; + goto free_unregister_netdev; } /* Assume link up if device can't report link status, @@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev) return 0; -free_recv_bufs: +free_unregister_netdev: vi->vdev->config->reset(vdev); - free_receive_bufs(vi); unregister_netdev(dev); free_vqs: cancel_delayed_work_sync(&vi->refill); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8ff30c3bdfce..f999db2f97b4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; + if (tb[IFLA_MTU]) + conf.mtu = nla_get_u32(tb[IFLA_MTU]); + err = vxlan_dev_configure(src_net, dev, &conf); switch (err) { case -ENODEV: diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index f2d01d4d9364..1b8304e1efaa 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -950,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) /* For SPIs, we need to track the affinity per IRQ */ if (using_spi) { - if (i >= pdev->num_resources) { - of_node_put(dn); + if (i >= pdev->num_resources) break; - } irqs[i] = cpu; } /* Keep track of the CPUs containing this PMU type */ cpumask_set_cpu(cpu, &pmu->supported_cpus); - of_node_put(dn); i++; } while (1); @@ -995,9 +992,6 @@ int arm_pmu_device_probe(struct platform_device *pdev, armpmu_init(pmu); - if (!__oprofile_cpu_pmu) - __oprofile_cpu_pmu = pmu; - pmu->plat_device = pdev; if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { @@ -1033,6 +1027,9 @@ int arm_pmu_device_probe(struct platform_device *pdev, if (ret) goto out_destroy; + if (!__oprofile_cpu_pmu) + __oprofile_cpu_pmu = pmu; + pr_info("enabled with %s PMU driver, %d counters available\n", pmu->name, pmu->num_events); @@ -1043,6 +1040,7 @@ out_destroy: out_free: pr_info("%s: failed to register PMU devices!\n", of_node_full_name(node)); + kfree(pmu->irq_affinity); kfree(pmu); return ret; } diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 207b13b618cf..a607655d7830 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) const struct mtk_desc_pin *pin; chained_irq_enter(chip, desc); - for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) { + for (eint_num = 0; + eint_num < pctl->devdata->ap_num; + eint_num += 32, reg += 4) { status = readl(reg); - reg += 4; while (status) { offset = __ffs(status); index = eint_num + offset; diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index ccbfc325c778..38faceff2f08 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset) clk_enable(nmk_chip->clk); - dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); + dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); clk_disable(nmk_chip->clk); diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 579fd65299a0..d637c933c8a9 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_SYS_OFFSET: - sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL); - if (!sysoff) { - err = -ENOMEM; - break; - } - if (copy_from_user(sysoff, (void __user *)arg, - sizeof(*sysoff))) { - err = -EFAULT; + sysoff = memdup_user((void __user *)arg, sizeof(*sysoff)); + if (IS_ERR(sysoff)) { + err = PTR_ERR(sysoff); + sysoff = NULL; break; } if (sysoff->n_samples > PTP_MAX_SAMPLES) { diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 8f90d9e77104..969c312de1be 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -621,6 +621,11 @@ struct aac_driver_ident #define AAC_QUIRK_SCSI_32 0x0020 /* + * SRC based adapters support the AifReqEvent functions + */ +#define AAC_QUIRK_SRC 0x0040 + +/* * The adapter interface specs all queues to be located in the same * physically contiguous block. The host structure that defines the * commuication queues will assume they are each a separate physically diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index a943bd230bc2..79871f3519ff 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = { { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ - { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */ - { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */ - { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */ - { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */ + { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */ + { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */ + { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */ + { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */ }; /** @@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) else shost->this_id = shost->max_id; - aac_intr_normal(aac, 0, 2, 0, NULL); + if (aac_drivers[index].quirks & AAC_QUIRK_SRC) + aac_intr_normal(aac, 0, 2, 0, NULL); /* * dmb - we may need to move the setting of these parms somewhere else once diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 6a4df5a315e9..6bff13e7afc7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, ActiveCableEventData = (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; if (ActiveCableEventData->ReasonCode == - MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) + MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) { pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", ioc->name, ActiveCableEventData->ReceptacleID); pr_info("cannot be powered and devices connected to this active cable"); pr_info("will not be seen. This active cable"); pr_info("requires %d mW of power", ActiveCableEventData->ActiveCablePowerRequirement); + } break; default: /* ignore the rest */ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b2e332af0f51..c71344aebdbb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } /* - * If we finished all bytes in the request we are done now. + * special case: failed zero length commands always need to + * drop down into the retry code. Otherwise, if we finished + * all bytes in the request we are done now. */ - if (!scsi_end_request(req, error, good_bytes, 0)) + if (!(blk_rq_bytes(req) == 0 && error) && + !scsi_end_request(req, error, good_bytes, 0)) return; /* diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 428c03ef02b2..f459dff30512 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp, **/ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) { - struct scsi_disk *sdkp = scsi_disk(disk); - struct scsi_device *sdp = sdkp->device; + struct scsi_disk *sdkp = scsi_disk_get(disk); + struct scsi_device *sdp; struct scsi_sense_hdr *sshdr = NULL; int retval; + if (!sdkp) + return 0; + + sdp = sdkp->device; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); /* @@ -1459,6 +1463,7 @@ out: kfree(sshdr); retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; sdp->changed = 0; + scsi_disk_put(sdkp); return retval; } diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index b56885c14839..ebb34dca60df 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h @@ -68,7 +68,8 @@ struct sync_timeline { /* protected by child_list_lock */ bool destroyed; - int context, value; + u64 context; + int value; struct list_head child_list_head; spinlock_t child_list_lock; diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c index 13d431cbd29e..a578cd257db4 100644 --- a/drivers/thermal/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/int340x_thermal/int3406_thermal.c @@ -177,7 +177,7 @@ static int int3406_thermal_probe(struct platform_device *pdev) return -ENODEV; d->raw_bd = bd; - ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br); + ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL); if (ret) return ret; diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index 82c4d2e45319..95103054c0e4 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -120,17 +120,6 @@ config UNIX98_PTYS All modern Linux systems use the Unix98 ptys. Say Y unless you're on an embedded system and want to conserve memory. -config DEVPTS_MULTIPLE_INSTANCES - bool "Support multiple instances of devpts" - depends on UNIX98_PTYS - default n - ---help--- - Enable support for multiple instances of devpts filesystem. - If you want to have isolated PTY namespaces (eg: in containers), - say Y here. Otherwise, say N. If enabled, each mount of devpts - filesystem with the '-o newinstance' option will create an - independent PTY namespace. - config LEGACY_PTYS bool "Legacy (BSD) PTY support" default y diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index dd4b8417e7f4..f856c4544eea 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -668,7 +668,7 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) else fsi = tty->link->driver_data; devpts_kill_index(fsi, tty->index); - devpts_put_ref(fsi); + devpts_release(fsi); } static const struct tty_operations ptm_unix98_ops = { @@ -733,10 +733,11 @@ static int ptmx_open(struct inode *inode, struct file *filp) if (retval) return retval; - fsi = devpts_get_ref(inode, filp); - retval = -ENODEV; - if (!fsi) + fsi = devpts_acquire(filp); + if (IS_ERR(fsi)) { + retval = PTR_ERR(fsi); goto out_free_file; + } /* find a device that is not in use. */ mutex_lock(&devpts_mutex); @@ -745,7 +746,7 @@ static int ptmx_open(struct inode *inode, struct file *filp) retval = index; if (index < 0) - goto out_put_ref; + goto out_put_fsi; mutex_lock(&tty_mutex); @@ -789,8 +790,8 @@ err_release: return retval; out: devpts_kill_index(fsi, index); -out_put_ref: - devpts_put_ref(fsi); +out_put_fsi: + devpts_release(fsi); out_free_file: tty_free_file(filp); return retval; diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 93601407dab8..688691d9058d 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -749,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos, if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) return count; } else { - if (pci_read_vpd(pdev, addr, 4, &data) != 4) + data = 0; + if (pci_read_vpd(pdev, addr, 4, &data) < 0) return count; *pdata = cpu_to_le32(data); } diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index e9ea3fef144a..15ecfc9c5f6c 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) static void vfio_intx_disable(struct vfio_pci_device *vdev) { - vfio_intx_set_signal(vdev, -1); vfio_virqfd_disable(&vdev->ctx[0].unmask); vfio_virqfd_disable(&vdev->ctx[0].mask); + vfio_intx_set_signal(vdev, -1); vdev->irq_type = VFIO_PCI_NUM_IRQS; vdev->num_ctx = 0; kfree(vdev->ctx); @@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) struct pci_dev *pdev = vdev->pdev; int i; - vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); - for (i = 0; i < vdev->num_ctx; i++) { vfio_virqfd_disable(&vdev->ctx[i].unmask); vfio_virqfd_disable(&vdev->ctx[i].mask); } + vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); + if (msix) { pci_disable_msix(vdev->pdev); kfree(vdev->msix); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 15a65823aad9..2ba19424e4a1 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova, unsigned long pfn, long npage, int prot) { long i; - int ret; + int ret = 0; for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { ret = iommu_map(domain->domain, iova, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c index 8511c648a15c..9d78411a3bf7 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c @@ -14,7 +14,7 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omap-panel-data.h> struct panel_drv_data { @@ -25,7 +25,6 @@ struct panel_drv_data { struct omap_video_timings timings; - enum omap_dss_venc_type connector_type; bool invert_polarity; }; @@ -45,10 +44,6 @@ static const struct omap_video_timings tvc_pal_timings = { static const struct of_device_id tvc_of_match[]; -struct tvc_of_data { - enum omap_dss_venc_type connector_type; -}; - #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) static int tvc_connect(struct omap_dss_device *dssdev) @@ -99,7 +94,7 @@ static int tvc_enable(struct omap_dss_device *dssdev) in->ops.atv->set_timings(in, &ddata->timings); if (!ddata->dev->of_node) { - in->ops.atv->set_type(in, ddata->connector_type); + in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE); in->ops.atv->invert_vid_out_polarity(in, ddata->invert_polarity); @@ -207,7 +202,6 @@ static int tvc_probe_pdata(struct platform_device *pdev) ddata->in = in; - ddata->connector_type = pdata->connector_type; ddata->invert_polarity = pdata->invert_polarity; dssdev = &ddata->dssdev; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c index d811e6dcaef7..06e1db34541e 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c @@ -16,8 +16,7 @@ #include <drm/drm_edid.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> static const struct omap_video_timings dvic_default_timings = { .x_res = 640, @@ -236,46 +235,6 @@ static struct omap_dss_driver dvic_driver = { .detect = dvic_detect, }; -static int dvic_probe_pdata(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct connector_dvi_platform_data *pdata; - struct omap_dss_device *in, *dssdev; - int i2c_bus_num; - - pdata = dev_get_platdata(&pdev->dev); - i2c_bus_num = pdata->i2c_bus_num; - - if (i2c_bus_num != -1) { - struct i2c_adapter *adapter; - - adapter = i2c_get_adapter(i2c_bus_num); - if (!adapter) { - dev_err(&pdev->dev, - "Failed to get I2C adapter, bus %d\n", - i2c_bus_num); - return -EPROBE_DEFER; - } - - ddata->i2c_adapter = adapter; - } - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - i2c_put_adapter(ddata->i2c_adapter); - - dev_err(&pdev->dev, "Failed to find video source\n"); - return -EPROBE_DEFER; - } - - ddata->in = in; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int dvic_probe_of(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); @@ -313,23 +272,18 @@ static int dvic_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; platform_set_drvdata(pdev, ddata); - if (dev_get_platdata(&pdev->dev)) { - r = dvic_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = dvic_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = dvic_probe_of(pdev); + if (r) + return r; ddata->timings = dvic_default_timings; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c index 6ee4129bc0c0..58d5803ede67 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c @@ -17,8 +17,7 @@ #include <drm/drm_edid.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> static const struct omap_video_timings hdmic_default_timings = { .x_res = 640, @@ -206,30 +205,6 @@ static struct omap_dss_driver hdmic_driver = { .set_hdmi_infoframe = hdmic_set_infoframe, }; -static int hdmic_probe_pdata(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct connector_hdmi_platform_data *pdata; - struct omap_dss_device *in, *dssdev; - - pdata = dev_get_platdata(&pdev->dev); - - ddata->hpd_gpio = -ENODEV; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "Failed to find video source\n"); - return -EPROBE_DEFER; - } - - ddata->in = in; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int hdmic_probe_of(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); @@ -261,6 +236,9 @@ static int hdmic_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; @@ -268,17 +246,9 @@ static int hdmic_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); ddata->dev = &pdev->dev; - if (dev_get_platdata(&pdev->dev)) { - r = hdmic_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = hdmic_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = hdmic_probe_of(pdev); + if (r) + return r; if (gpio_is_valid(ddata->hpd_gpio)) { r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c index 8c246c213e06..a9a67167cc3d 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c @@ -20,7 +20,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c index d9048b3df495..8c0953d069b7 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c @@ -15,8 +15,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -166,32 +165,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = { .get_timings = tfp410_get_timings, }; -static int tfp410_probe_pdata(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct encoder_tfp410_platform_data *pdata; - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&pdev->dev); - - ddata->pd_gpio = pdata->power_down_gpio; - - ddata->data_lines = pdata->data_lines; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "Failed to find video source\n"); - return -ENODEV; - } - - ddata->in = in; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int tfp410_probe_of(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); @@ -225,23 +198,18 @@ static int tfp410_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; platform_set_drvdata(pdev, ddata); - if (dev_get_platdata(&pdev->dev)) { - r = tfp410_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = tfp410_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = tfp410_probe_of(pdev); + if (r) + return r; if (gpio_is_valid(ddata->pd_gpio)) { r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c index 677e2545fcbe..80dc47347e21 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c @@ -16,8 +16,7 @@ #include <linux/platform_device.h> #include <linux/gpio/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c index e780fd4f8b46..ace3d818afe5 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c @@ -16,7 +16,7 @@ #include <linux/of.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omap-panel-data.h> #include <video/of_display_timing.h> diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 3414c2609320..b58012b82b6f 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -25,8 +25,7 @@ #include <linux/of_device.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> #include <video/mipi_display.h> /* DSI Virtual channel. Hardcoded for now. */ @@ -1127,40 +1126,6 @@ static struct omap_dss_driver dsicm_ops = { .memory_read = dsicm_memory_read, }; -static int dsicm_probe_pdata(struct platform_device *pdev) -{ - const struct panel_dsicm_platform_data *pdata; - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&pdev->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "failed to find video source\n"); - return -EPROBE_DEFER; - } - ddata->in = in; - - ddata->reset_gpio = pdata->reset_gpio; - - if (pdata->use_ext_te) - ddata->ext_te_gpio = pdata->ext_te_gpio; - else - ddata->ext_te_gpio = -1; - - ddata->ulps_timeout = pdata->ulps_timeout; - - ddata->use_dsi_backlight = pdata->use_dsi_backlight; - - ddata->pin_config = pdata->pin_config; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int dsicm_probe_of(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; @@ -1207,6 +1172,9 @@ static int dsicm_probe(struct platform_device *pdev) dev_dbg(dev, "probe\n"); + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); if (!ddata) return -ENOMEM; @@ -1214,17 +1182,9 @@ static int dsicm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ddata); ddata->pdev = pdev; - if (dev_get_platdata(dev)) { - r = dsicm_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = dsicm_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = dsicm_probe_of(pdev); + if (r) + return r; ddata->timings.x_res = 864; ddata->timings.y_res = 480; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c index 18eb60e9c9ec..f14691ce8d02 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c @@ -16,8 +16,7 @@ #include <linux/mutex.h> #include <linux/gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> static struct omap_video_timings lb035q02_timings = { .x_res = 320, @@ -240,44 +239,6 @@ static struct omap_dss_driver lb035q02_ops = { .get_resolution = omapdss_default_get_resolution, }; -static int lb035q02_probe_pdata(struct spi_device *spi) -{ - const struct panel_lb035q02_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - int r; - - pdata = dev_get_platdata(&spi->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio, - GPIOF_OUT_INIT_LOW, "panel enable"); - if (r) - goto err_gpio; - - ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio); - - ddata->backlight_gpio = pdata->backlight_gpio; - - return 0; -err_gpio: - omap_dss_put_device(ddata->in); - return r; -} - static int lb035q02_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -312,6 +273,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) struct omap_dss_device *dssdev; int r; + if (!spi->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); if (ddata == NULL) return -ENOMEM; @@ -320,17 +284,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) ddata->spi = spi; - if (dev_get_platdata(&spi->dev)) { - r = lb035q02_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = lb035q02_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = lb035q02_probe_of(spi); + if (r) + return r; if (gpio_is_valid(ddata->backlight_gpio)) { r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c index 8a928c9a2fc9..a2cbadd3eca3 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c @@ -18,8 +18,7 @@ #include <linux/gpio.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -233,33 +232,6 @@ static struct omap_dss_driver nec_8048_ops = { }; -static int nec_8048_probe_pdata(struct spi_device *spi) -{ - const struct panel_nec_nl8048hl11_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&spi->dev); - - ddata->qvga_gpio = pdata->qvga_gpio; - ddata->res_gpio = pdata->res_gpio; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int nec_8048_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -296,6 +268,9 @@ static int nec_8048_probe(struct spi_device *spi) dev_dbg(&spi->dev, "%s\n", __func__); + if (!spi->dev.of_node) + return -ENODEV; + spi->mode = SPI_MODE_0; spi->bits_per_word = 32; @@ -315,17 +290,9 @@ static int nec_8048_probe(struct spi_device *spi) ddata->spi = spi; - if (dev_get_platdata(&spi->dev)) { - r = nec_8048_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = nec_8048_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = nec_8048_probe_of(spi); + if (r) + return r; if (gpio_is_valid(ddata->qvga_gpio)) { r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio, diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c index 1954ec913ce5..a8be18a87fa0 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c @@ -17,8 +17,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/regulator/consumer.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -197,69 +196,6 @@ static struct omap_dss_driver sharp_ls_ops = { .get_resolution = omapdss_default_get_resolution, }; -static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags, - char *desc, struct gpio_desc **gpiod) -{ - int r; - - r = devm_gpio_request_one(dev, gpio, flags, desc); - if (r) { - *gpiod = NULL; - return r == -ENOENT ? 0 : r; - } - - *gpiod = gpio_to_desc(gpio); - - return 0; -} - -static int sharp_ls_probe_pdata(struct platform_device *pdev) -{ - const struct panel_sharp_ls037v7dw01_platform_data *pdata; - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev, *in; - int r; - - pdata = dev_get_platdata(&pdev->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&pdev->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW, - "lcd MO", &ddata->mo_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH, - "lcd LR", &ddata->lr_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH, - "lcd UD", &ddata->ud_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW, - "lcd RESB", &ddata->resb_gpio); - if (r) - return r; - r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW, - "lcd INI", &ddata->ini_gpio); - if (r) - return r; - - return 0; -} - static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, const char *desc, struct gpio_desc **gpiod) { @@ -330,23 +266,18 @@ static int sharp_ls_probe(struct platform_device *pdev) struct omap_dss_device *dssdev; int r; + if (!pdev->dev.of_node) + return -ENODEV; + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); if (ddata == NULL) return -ENOMEM; platform_set_drvdata(pdev, ddata); - if (dev_get_platdata(&pdev->dev)) { - r = sharp_ls_probe_pdata(pdev); - if (r) - return r; - } else if (pdev->dev.of_node) { - r = sharp_ls_probe_of(pdev); - if (r) - return r; - } else { - return -ENODEV; - } + r = sharp_ls_probe_of(pdev); + if (r) + return r; ddata->videomode = sharp_ls_timings; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index 31efcca801bd..468560a6daae 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -33,7 +33,7 @@ #include <linux/of.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omap-panel-data.h> #define MIPID_CMD_READ_DISP_ID 0x04 diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c index 4d657f3ab679..b529a8c2b652 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c @@ -28,8 +28,7 @@ #include <linux/delay.h> #include <linux/spi/spi.h> #include <linux/gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> struct panel_drv_data { struct omap_dss_device dssdev; @@ -365,31 +364,6 @@ static struct omap_dss_driver td028ttec1_ops = { .check_timings = td028ttec1_panel_check_timings, }; -static int td028ttec1_panel_probe_pdata(struct spi_device *spi) -{ - const struct panel_tpo_td028ttec1_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&spi->dev); - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int td028ttec1_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -415,6 +389,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dev_dbg(&spi->dev, "%s\n", __func__); + if (!spi->dev.of_node) + return -ENODEV; + spi->bits_per_word = 9; spi->mode = SPI_MODE_3; @@ -432,17 +409,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi) ddata->spi_dev = spi; - if (dev_get_platdata(&spi->dev)) { - r = td028ttec1_panel_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = td028ttec1_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = td028ttec1_probe_of(spi); + if (r) + return r; ddata->videomode = td028ttec1_panel_timings; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c index 68e3b68a2920..51e628b85f4a 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c @@ -19,8 +19,7 @@ #include <linux/slab.h> #include <linux/of_gpio.h> -#include <video/omapdss.h> -#include <video/omap-panel-data.h> +#include <video/omapfb_dss.h> #define TPO_R02_MODE(x) ((x) & 7) #define TPO_R02_MODE_800x480 7 @@ -465,32 +464,6 @@ static struct omap_dss_driver tpo_td043_ops = { }; -static int tpo_td043_probe_pdata(struct spi_device *spi) -{ - const struct panel_tpo_td043mtea1_platform_data *pdata; - struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev); - struct omap_dss_device *dssdev, *in; - - pdata = dev_get_platdata(&spi->dev); - - ddata->nreset_gpio = pdata->nreset_gpio; - - in = omap_dss_find_output(pdata->source); - if (in == NULL) { - dev_err(&spi->dev, "failed to find video source '%s'\n", - pdata->source); - return -EPROBE_DEFER; - } - ddata->in = in; - - ddata->data_lines = pdata->data_lines; - - dssdev = &ddata->dssdev; - dssdev->name = pdata->name; - - return 0; -} - static int tpo_td043_probe_of(struct spi_device *spi) { struct device_node *node = spi->dev.of_node; @@ -524,6 +497,9 @@ static int tpo_td043_probe(struct spi_device *spi) dev_dbg(&spi->dev, "%s\n", __func__); + if (!spi->dev.of_node) + return -ENODEV; + spi->bits_per_word = 16; spi->mode = SPI_MODE_0; @@ -541,17 +517,9 @@ static int tpo_td043_probe(struct spi_device *spi) ddata->spi = spi; - if (dev_get_platdata(&spi->dev)) { - r = tpo_td043_probe_pdata(spi); - if (r) - return r; - } else if (spi->dev.of_node) { - r = tpo_td043_probe_of(spi); - if (r) - return r; - } else { - return -ENODEV; - } + r = tpo_td043_probe_of(spi); + if (r) + return r; ddata->mode = TPO_R02_MODE_800x480; memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma)); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/apply.c b/drivers/video/fbdev/omap2/omapfb/dss/apply.c index 663ccc3bf4e5..2481f4871f66 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/apply.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/apply.c @@ -23,7 +23,7 @@ #include <linux/spinlock.h> #include <linux/jiffies.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c index 5a87179b7312..29de4827589d 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c @@ -35,7 +35,7 @@ #include <linux/suspend.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" @@ -208,8 +208,6 @@ static int __init omap_dss_probe(struct platform_device *pdev) core.default_display_name = def_disp_name; else if (pdata->default_display_name) core.default_display_name = pdata->default_display_name; - else if (pdata->default_device) - core.default_display_name = pdata->default_device->name; register_pm_notifier(&omap_dss_pm_notif_block); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c index 6607db37a5e4..3691bde4ce0a 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c @@ -26,7 +26,7 @@ #include <linux/interrupt.h> #include <linux/seq_file.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index 5491e304f4fe..7a75dfda9845 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -41,7 +41,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c index 038c15b04215..59c9a5c47ca9 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c @@ -18,7 +18,7 @@ */ #include <linux/kernel.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dispc.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c index 75b5286029ee..b3fdbfd0b82d 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c @@ -25,7 +25,7 @@ #include <linux/platform_device.h> #include <linux/sysfs.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c index ef5b9027985d..dd5468695c43 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/display.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c @@ -28,7 +28,7 @@ #include <linux/platform_device.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c index 7953e6a52346..da09806b940c 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c @@ -34,7 +34,7 @@ #include <linux/clk.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c index d63e59807707..9e4800a4e3d1 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c @@ -42,7 +42,7 @@ #include <linux/of_platform.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/mipi_display.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c index bf407b6ba15c..d356a252ab4a 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c @@ -18,7 +18,7 @@ #include <linux/of.h> #include <linux/seq_file.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c index 0078c4d1fc31..47d7f69ad9ad 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c @@ -41,7 +41,7 @@ #include <linux/suspend.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h index 0184a8461df1..a3cc0ca8f9d2 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h @@ -73,6 +73,17 @@ #define FLD_MOD(orig, val, start, end) \ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) +enum omap_dss_clk_source { + OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK + * OMAP4: DSS_FCLK */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK + * OMAP4: PLL1_CLK1 */ + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK + * OMAP4: PLL1_CLK2 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */ + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */ +}; + enum dss_io_pad_mode { DSS_IO_PAD_MODE_RESET, DSS_IO_PAD_MODE_RFBI, diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c index c886a2927f73..8fc843b56b26 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c @@ -23,7 +23,7 @@ #include <linux/err.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h index 53616b02b613..f6de87e078b0 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h @@ -23,7 +23,8 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/hdmi.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> +#include <sound/omap-hdmi-audio.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c index 2e71aec838b1..926a6f20dbb2 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c @@ -33,7 +33,7 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <sound/omap-hdmi-audio.h> #include "hdmi4_core.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c index aade6d99662a..0ee829a165c3 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c @@ -38,7 +38,7 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <sound/omap-hdmi-audio.h> #include "hdmi5_core.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c index 8ea531d2652c..bbfe7e2d4332 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c @@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core) { void __iomem *base = core->base; const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ - const unsigned ss_scl_high = 4000; /* ns */ - const unsigned ss_scl_low = 4700; /* ns */ + const unsigned ss_scl_high = 4600; /* ns */ + const unsigned ss_scl_low = 5400; /* ns */ const unsigned fs_scl_high = 600; /* ns */ const unsigned fs_scl_low = 1300; /* ns */ const unsigned sda_hold = 1000; /* ns */ @@ -442,7 +442,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, c = (ptr[1] >> 6) & 0x3; m = (ptr[1] >> 4) & 0x3; - r = (ptr[1] >> 0) & 0x3; + r = (ptr[1] >> 0) & 0xf; itc = (ptr[2] >> 7) & 0x1; ec = (ptr[2] >> 4) & 0x7; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c index 1b8fcc6c4ba1..189a5ad125a3 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c @@ -4,7 +4,7 @@ #include <linux/kernel.h> #include <linux/err.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c index 1f5d19c119ce..9a13c35fd6d8 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c @@ -13,7 +13,7 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c index 06e23a7c432c..eac3665aba6c 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c @@ -17,7 +17,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c index 7c544bc56fb5..705373e4cf38 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c @@ -14,7 +14,7 @@ #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "hdmi.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c index a7414fb12830..9e2a67fdf4d2 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c @@ -26,7 +26,7 @@ #include <linux/platform_device.h> #include <linux/jiffies.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager.c b/drivers/video/fbdev/omap2/omapfb/dss/manager.c index 08a67f4f6a20..69f86d2cc274 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/manager.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/manager.c @@ -28,7 +28,7 @@ #include <linux/platform_device.h> #include <linux/jiffies.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" @@ -69,7 +69,6 @@ int dss_init_overlay_managers(void) break; } - mgr->caps = 0; mgr->supported_displays = dss_feat_get_supported_displays(mgr->id); mgr->supported_outputs = diff --git a/drivers/video/fbdev/omap2/omapfb/dss/output.c b/drivers/video/fbdev/omap2/omapfb/dss/output.c index 16072159bd24..bed9a978269d 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/output.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/output.c @@ -21,7 +21,7 @@ #include <linux/slab.h> #include <linux/of.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c index 4cc5ddebfb34..f1f6c0aea752 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c @@ -26,7 +26,7 @@ #include <linux/kobject.h> #include <linux/platform_device.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c index 2f7cee985cdd..d6c5d75d2ef8 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c @@ -30,7 +30,7 @@ #include <linux/delay.h> #include <linux/slab.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/pll.c b/drivers/video/fbdev/omap2/omapfb/dss/pll.c index f974ddcd3b6e..0564c5606cd0 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/pll.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/pll.c @@ -22,7 +22,7 @@ #include <linux/regulator/consumer.h> #include <linux/sched.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c index aea6a1d0fb20..562b0c4ae0c6 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c @@ -38,7 +38,7 @@ #include <linux/pm_runtime.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" struct rfbi_reg { u16 idx; }; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c index d747cc6b59e1..c4be732a4714 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c @@ -29,7 +29,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" static struct { diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c index 26e0ee30adf8..392464da12e4 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c @@ -37,7 +37,7 @@ #include <linux/of.h> #include <linux/component.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c index b1ec59e42940..a890540f2037 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c @@ -17,7 +17,7 @@ #include <linux/platform_device.h> #include <linux/sched.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include "dss.h" #include "dss_features.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c index 9ddfdd63b84c..ef69273074ba 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c @@ -30,7 +30,7 @@ #include <linux/export.h> #include <linux/sizes.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #include "omapfb.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c index d3af01c94a58..2fb90cb6803f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c @@ -30,7 +30,7 @@ #include <linux/platform_device.h> #include <linux/omapfb.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #include "omapfb.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c index 18fa9e1d0033..8087a009c54f 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c @@ -29,7 +29,7 @@ #include <linux/mm.h> #include <linux/omapfb.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #include <video/omapvrfb.h> #include "omapfb.h" diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h index 623cd872a367..bcb9ff4a607d 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb.h +++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h @@ -31,7 +31,7 @@ #include <linux/dma-attrs.h> #include <linux/dma-mapping.h> -#include <video/omapdss.h> +#include <video/omapfb_dss.h> #ifdef DEBUG extern bool omapfb_debug; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a400951e8678..689d25ac6a68 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2042,6 +2042,11 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, struct btrfs_bio *bbio = NULL; + /* + * Avoid races with device replace and make sure our bbio has devices + * associated to its stripes that don't go away while we are discarding. + */ + btrfs_bio_counter_inc_blocked(root->fs_info); /* Tell the block device(s) that the sectors can be discarded */ ret = btrfs_map_block(root->fs_info, REQ_DISCARD, bytenr, &num_bytes, &bbio, 0); @@ -2074,6 +2079,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, } btrfs_put_bbio(bbio); } + btrfs_bio_counter_dec(root->fs_info); if (actual_bytes) *actual_bytes = discarded_bytes; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3cd57825c75f..6e953de83f08 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2025,9 +2025,16 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, bio->bi_iter.bi_size = 0; map_length = length; + /* + * Avoid races with device replace and make sure our bbio has devices + * associated to its stripes that don't go away while we are doing the + * read repair operation. + */ + btrfs_bio_counter_inc_blocked(fs_info); ret = btrfs_map_block(fs_info, WRITE, logical, &map_length, &bbio, mirror_num); if (ret) { + btrfs_bio_counter_dec(fs_info); bio_put(bio); return -EIO; } @@ -2037,6 +2044,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, dev = bbio->stripes[mirror_num-1].dev; btrfs_put_bbio(bbio); if (!dev || !dev->bdev || !dev->writeable) { + btrfs_bio_counter_dec(fs_info); bio_put(bio); return -EIO; } @@ -2045,6 +2053,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { /* try to remap that extent elsewhere? */ + btrfs_bio_counter_dec(fs_info); bio_put(bio); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); return -EIO; @@ -2054,6 +2063,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, "read error corrected: ino %llu off %llu (dev %s sector %llu)", btrfs_ino(inode), start, rcu_str_deref(dev->name), sector); + btrfs_bio_counter_dec(fs_info); bio_put(bio); return 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 270499598ed4..8b1212e8f7a8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6979,7 +6979,18 @@ insert: * existing will always be non-NULL, since there must be * extent causing the -EEXIST. */ - if (start >= extent_map_end(existing) || + if (existing->start == em->start && + extent_map_end(existing) == extent_map_end(em) && + em->block_start == existing->block_start) { + /* + * these two extents are the same, it happens + * with inlines especially + */ + free_extent_map(em); + em = existing; + err = 0; + + } else if (start >= extent_map_end(existing) || start <= existing->start) { /* * The existing extent map is the one nearest to diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 559170464d7c..e96634a725c3 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -718,12 +718,13 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, return count; } -void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, +int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, const u64 range_start, const u64 range_len) { struct btrfs_root *root; struct list_head splice; int done; + int total_done = 0; INIT_LIST_HEAD(&splice); @@ -742,6 +743,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, done = btrfs_wait_ordered_extents(root, nr, range_start, range_len); btrfs_put_fs_root(root); + total_done += done; spin_lock(&fs_info->ordered_root_lock); if (nr != -1) { @@ -752,6 +754,8 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, list_splice_tail(&splice, &fs_info->ordered_roots); spin_unlock(&fs_info->ordered_root_lock); mutex_unlock(&fs_info->ordered_operations_mutex); + + return total_done; } /* diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 2049c9be85ee..451507776ff5 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -199,7 +199,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum, int len); int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, const u64 range_start, const u64 range_len); -void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, +int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, const u64 range_start, const u64 range_len); void btrfs_get_logged_extents(struct inode *inode, struct list_head *logged_list, diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 298631eaee78..8428db7cd88f 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -761,12 +761,14 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info) do { enqueued = 0; + mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry(device, &fs_devices->devices, dev_list) { if (atomic_read(&device->reada_in_flight) < MAX_IN_FLIGHT) enqueued += reada_start_machine_dev(fs_info, device); } + mutex_unlock(&fs_devices->device_list_mutex); total += enqueued; } while (enqueued && total < 10000); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 46d847f66e4b..70427ef66b04 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -3582,6 +3582,46 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, */ scrub_pause_on(fs_info); ret = btrfs_inc_block_group_ro(root, cache); + if (!ret && is_dev_replace) { + /* + * If we are doing a device replace wait for any tasks + * that started dellaloc right before we set the block + * group to RO mode, as they might have just allocated + * an extent from it or decided they could do a nocow + * write. And if any such tasks did that, wait for their + * ordered extents to complete and then commit the + * current transaction, so that we can later see the new + * extent items in the extent tree - the ordered extents + * create delayed data references (for cow writes) when + * they complete, which will be run and insert the + * corresponding extent items into the extent tree when + * we commit the transaction they used when running + * inode.c:btrfs_finish_ordered_io(). We later use + * the commit root of the extent tree to find extents + * to copy from the srcdev into the tgtdev, and we don't + * want to miss any new extents. + */ + btrfs_wait_block_group_reservations(cache); + btrfs_wait_nocow_writers(cache); + ret = btrfs_wait_ordered_roots(fs_info, -1, + cache->key.objectid, + cache->key.offset); + if (ret > 0) { + struct btrfs_trans_handle *trans; + + trans = btrfs_join_transaction(root); + if (IS_ERR(trans)) + ret = PTR_ERR(trans); + else + ret = btrfs_commit_transaction(trans, + root); + if (ret) { + scrub_pause_off(fs_info); + btrfs_put_block_group(cache); + break; + } + } + } scrub_pause_off(fs_info); if (ret == 0) { @@ -3602,9 +3642,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, break; } + btrfs_dev_replace_lock(&fs_info->dev_replace, 1); dev_replace->cursor_right = found_key.offset + length; dev_replace->cursor_left = found_key.offset; dev_replace->item_needs_writeback = 1; + btrfs_dev_replace_unlock(&fs_info->dev_replace, 1); ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, found_key.offset, cache, is_dev_replace); @@ -3640,6 +3682,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, scrub_pause_off(fs_info); + btrfs_dev_replace_lock(&fs_info->dev_replace, 1); + dev_replace->cursor_left = dev_replace->cursor_right; + dev_replace->item_needs_writeback = 1; + btrfs_dev_replace_unlock(&fs_info->dev_replace, 1); + if (ro_set) btrfs_dec_block_group_ro(root, cache); @@ -3677,9 +3724,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ret = -ENOMEM; break; } - - dev_replace->cursor_left = dev_replace->cursor_right; - dev_replace->item_needs_writeback = 1; skip: key.offset = found_key.offset + length; btrfs_release_path(path); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index bdc62561ede8..da9e0036a864 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2761,6 +2761,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 dev_extent_len = 0; u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; int i, ret = 0; + struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; /* Just in case */ root = root->fs_info->chunk_root; @@ -2787,12 +2788,19 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, check_system_chunk(trans, extent_root, map->type); unlock_chunks(root->fs_info->chunk_root); + /* + * Take the device list mutex to prevent races with the final phase of + * a device replace operation that replaces the device object associated + * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). + */ + mutex_lock(&fs_devices->device_list_mutex); for (i = 0; i < map->num_stripes; i++) { struct btrfs_device *device = map->stripes[i].dev; ret = btrfs_free_dev_extent(trans, device, map->stripes[i].physical, &dev_extent_len); if (ret) { + mutex_unlock(&fs_devices->device_list_mutex); btrfs_abort_transaction(trans, root, ret); goto out; } @@ -2811,11 +2819,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, if (map->stripes[i].dev) { ret = btrfs_update_device(trans, map->stripes[i].dev); if (ret) { + mutex_unlock(&fs_devices->device_list_mutex); btrfs_abort_transaction(trans, root, ret); goto out; } } } + mutex_unlock(&fs_devices->device_list_mutex); + ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset); if (ret) { btrfs_abort_transaction(trans, root, ret); @@ -5762,20 +5773,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, } } if (found) { - if (physical_of_found + map->stripe_len <= - dev_replace->cursor_left) { - struct btrfs_bio_stripe *tgtdev_stripe = - bbio->stripes + num_stripes; + struct btrfs_bio_stripe *tgtdev_stripe = + bbio->stripes + num_stripes; - tgtdev_stripe->physical = physical_of_found; - tgtdev_stripe->length = - bbio->stripes[index_srcdev].length; - tgtdev_stripe->dev = dev_replace->tgtdev; - bbio->tgtdev_map[index_srcdev] = num_stripes; + tgtdev_stripe->physical = physical_of_found; + tgtdev_stripe->length = + bbio->stripes[index_srcdev].length; + tgtdev_stripe->dev = dev_replace->tgtdev; + bbio->tgtdev_map[index_srcdev] = num_stripes; - tgtdev_indexes++; - num_stripes++; - } + tgtdev_indexes++; + num_stripes++; } } diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 861d611b8c05..ce5f345d70f5 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -380,7 +380,7 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache) * check if the backing cache is updated to FS-Cache * - called by FS-Cache when evaluates if need to invalidate the cache */ -static bool cachefiles_check_consistency(struct fscache_operation *op) +static int cachefiles_check_consistency(struct fscache_operation *op) { struct cachefiles_object *object; struct cachefiles_cache *cache; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index eeb71e5de27a..26a9d10d75e9 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -276,8 +276,10 @@ static void finish_read(struct ceph_osd_request *req) for (i = 0; i < num_pages; i++) { struct page *page = osd_data->pages[i]; - if (rc < 0 && rc != -ENOENT) + if (rc < 0 && rc != -ENOENT) { + ceph_fscache_readpage_cancel(inode, page); goto unlock; + } if (bytes < (int)PAGE_SIZE) { /* zero (remainder of) page */ int s = bytes < 0 ? 0 : bytes; @@ -535,8 +537,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); - ceph_readpage_to_fscache(inode, page); - set_page_writeback(page); err = ceph_osdc_writepages(osdc, ceph_vino(inode), &ci->i_layout, snapc, diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c index c052b5bf219b..238c55b01723 100644 --- a/fs/ceph/cache.c +++ b/fs/ceph/cache.c @@ -25,6 +25,7 @@ #include "cache.h" struct ceph_aux_inode { + u64 version; struct timespec mtime; loff_t size; }; @@ -69,15 +70,8 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc) fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index, &ceph_fscache_fsid_object_def, fsc, true); - - if (fsc->fscache == NULL) { + if (!fsc->fscache) pr_err("Unable to resgister fsid: %p fscache cookie", fsc); - return 0; - } - - fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1); - if (fsc->revalidate_wq == NULL) - return -ENOMEM; return 0; } @@ -105,6 +99,7 @@ static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data, const struct inode* inode = &ci->vfs_inode; memset(&aux, 0, sizeof(aux)); + aux.version = ci->i_version; aux.mtime = inode->i_mtime; aux.size = i_size_read(inode); @@ -131,6 +126,7 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux( return FSCACHE_CHECKAUX_OBSOLETE; memset(&aux, 0, sizeof(aux)); + aux.version = ci->i_version; aux.mtime = inode->i_mtime; aux.size = i_size_read(inode); @@ -181,32 +177,26 @@ static const struct fscache_cookie_def ceph_fscache_inode_object_def = { .now_uncached = ceph_fscache_inode_now_uncached, }; -void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc, - struct ceph_inode_info* ci) +void ceph_fscache_register_inode_cookie(struct inode *inode) { - struct inode* inode = &ci->vfs_inode; + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_fs_client *fsc = ceph_inode_to_client(inode); /* No caching for filesystem */ if (fsc->fscache == NULL) return; /* Only cache for regular files that are read only */ - if ((ci->vfs_inode.i_mode & S_IFREG) == 0) + if (!S_ISREG(inode->i_mode)) return; - /* Avoid multiple racing open requests */ - inode_lock(inode); - - if (ci->fscache) - goto done; - - ci->fscache = fscache_acquire_cookie(fsc->fscache, - &ceph_fscache_inode_object_def, - ci, true); - fscache_check_consistency(ci->fscache); -done: + inode_lock_nested(inode, I_MUTEX_CHILD); + if (!ci->fscache) { + ci->fscache = fscache_acquire_cookie(fsc->fscache, + &ceph_fscache_inode_object_def, + ci, false); + } inode_unlock(inode); - } void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) @@ -222,6 +212,34 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) fscache_relinquish_cookie(cookie, 0); } +static bool ceph_fscache_can_enable(void *data) +{ + struct inode *inode = data; + return !inode_is_open_for_write(inode); +} + +void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + + if (!fscache_cookie_valid(ci->fscache)) + return; + + if (inode_is_open_for_write(inode)) { + dout("fscache_file_set_cookie %p %p disabling cache\n", + inode, filp); + fscache_disable_cookie(ci->fscache, false); + fscache_uncache_all_inode_pages(ci->fscache, inode); + } else { + fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable, + inode); + if (fscache_cookie_enabled(ci->fscache)) { + dout("fscache_file_set_cookie %p %p enabing cache\n", + inode, filp); + } + } +} + static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) { if (!error) @@ -238,8 +256,7 @@ static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int static inline bool cache_valid(struct ceph_inode_info *ci) { - return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) && - (ci->i_fscache_gen == ci->i_rdcache_gen)); + return ci->i_fscache_gen == ci->i_rdcache_gen; } @@ -332,69 +349,27 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page) void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) { - if (fsc->revalidate_wq) - destroy_workqueue(fsc->revalidate_wq); - fscache_relinquish_cookie(fsc->fscache, 0); fsc->fscache = NULL; } -static void ceph_revalidate_work(struct work_struct *work) -{ - int issued; - u32 orig_gen; - struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, - i_revalidate_work); - struct inode *inode = &ci->vfs_inode; - - spin_lock(&ci->i_ceph_lock); - issued = __ceph_caps_issued(ci, NULL); - orig_gen = ci->i_rdcache_gen; - spin_unlock(&ci->i_ceph_lock); - - if (!(issued & CEPH_CAP_FILE_CACHE)) { - dout("revalidate_work lost cache before validation %p\n", - inode); - goto out; - } - - if (!fscache_check_consistency(ci->fscache)) - fscache_invalidate(ci->fscache); - - spin_lock(&ci->i_ceph_lock); - /* Update the new valid generation (backwards sanity check too) */ - if (orig_gen > ci->i_fscache_gen) { - ci->i_fscache_gen = orig_gen; - } - spin_unlock(&ci->i_ceph_lock); - -out: - iput(&ci->vfs_inode); -} - -void ceph_queue_revalidate(struct inode *inode) +/* + * caller should hold CEPH_CAP_FILE_{RD,CACHE} + */ +void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) { - struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); - struct ceph_inode_info *ci = ceph_inode(inode); - - if (fsc->revalidate_wq == NULL || ci->fscache == NULL) + if (cache_valid(ci)) return; - ihold(inode); - - if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq, - &ci->i_revalidate_work)) { - dout("ceph_queue_revalidate %p\n", inode); - } else { - dout("ceph_queue_revalidate %p failed\n)", inode); - iput(inode); + /* resue i_truncate_mutex. There should be no pending + * truncate while the caller holds CEPH_CAP_FILE_RD */ + mutex_lock(&ci->i_truncate_mutex); + if (!cache_valid(ci)) { + if (fscache_check_consistency(ci->fscache)) + fscache_invalidate(ci->fscache); + spin_lock(&ci->i_ceph_lock); + ci->i_fscache_gen = ci->i_rdcache_gen; + spin_unlock(&ci->i_ceph_lock); } -} - -void ceph_fscache_inode_init(struct ceph_inode_info *ci) -{ - ci->fscache = NULL; - /* The first load is verifed cookie open time */ - ci->i_fscache_gen = 1; - INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work); + mutex_unlock(&ci->i_truncate_mutex); } diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h index 5ac591bd012b..7e72c7594f0c 100644 --- a/fs/ceph/cache.h +++ b/fs/ceph/cache.h @@ -34,10 +34,10 @@ void ceph_fscache_unregister(void); int ceph_fscache_register_fs(struct ceph_fs_client* fsc); void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc); -void ceph_fscache_inode_init(struct ceph_inode_info *ci); -void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc, - struct ceph_inode_info* ci); +void ceph_fscache_register_inode_cookie(struct inode *inode); void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci); +void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp); +void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci); int ceph_readpage_from_fscache(struct inode *inode, struct page *page); int ceph_readpages_from_fscache(struct inode *inode, @@ -46,12 +46,11 @@ int ceph_readpages_from_fscache(struct inode *inode, unsigned *nr_pages); void ceph_readpage_to_fscache(struct inode *inode, struct page *page); void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); -void ceph_queue_revalidate(struct inode *inode); -static inline void ceph_fscache_update_objectsize(struct inode *inode) +static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) { - struct ceph_inode_info *ci = ceph_inode(inode); - fscache_attr_changed(ci->fscache); + ci->fscache = NULL; + ci->i_fscache_gen = 0; } static inline void ceph_fscache_invalidate(struct inode *inode) @@ -88,6 +87,11 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode, return fscache_readpages_cancel(ci->fscache, pages); } +static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci) +{ + ci->i_fscache_gen = ci->i_rdcache_gen - 1; +} + #else static inline int ceph_fscache_register(void) @@ -112,8 +116,20 @@ static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) { } -static inline void ceph_fscache_register_inode_cookie(struct ceph_fs_client* parent_fsc, - struct ceph_inode_info* ci) +static inline void ceph_fscache_register_inode_cookie(struct inode *inode) +{ +} + +static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) +{ +} + +static inline void ceph_fscache_file_set_cookie(struct inode *inode, + struct file *filp) +{ +} + +static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) { } @@ -141,10 +157,6 @@ static inline void ceph_readpage_to_fscache(struct inode *inode, { } -static inline void ceph_fscache_update_objectsize(struct inode *inode) -{ -} - static inline void ceph_fscache_invalidate(struct inode *inode) { } @@ -154,10 +166,6 @@ static inline void ceph_invalidate_fscache_page(struct inode *inode, { } -static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) -{ -} - static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) { return 1; @@ -173,7 +181,7 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode, { } -static inline void ceph_queue_revalidate(struct inode *inode) +static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci) { } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index c17b5d76d75e..6f60d0a3d0f9 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2393,6 +2393,9 @@ again: snap_rwsem_locked = true; } *got = need | (have & want); + if ((need & CEPH_CAP_FILE_RD) && + !(*got & CEPH_CAP_FILE_CACHE)) + ceph_disable_fscache_readpage(ci); __take_cap_refs(ci, *got, true); ret = 1; } @@ -2554,6 +2557,9 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, break; } + if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE)) + ceph_fscache_revalidate_cookie(ci); + *got = _got; return 0; } @@ -2795,7 +2801,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, bool writeback = false; bool queue_trunc = false; bool queue_invalidate = false; - bool queue_revalidate = false; bool deleted_inode = false; bool fill_inline = false; @@ -2837,8 +2842,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, ci->i_rdcache_revoking = ci->i_rdcache_gen; } } - - ceph_fscache_invalidate(inode); } /* side effects now are allowed */ @@ -2880,11 +2883,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, } } - /* Do we need to revalidate our fscache cookie. Don't bother on the - * first cache cap as we already validate at cookie creation time. */ - if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1) - queue_revalidate = true; - if (newcaps & CEPH_CAP_ANY_RD) { /* ctime/mtime/atime? */ ceph_decode_timespec(&mtime, &grant->mtime); @@ -2993,11 +2991,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, if (fill_inline) ceph_fill_inline_data(inode, NULL, inline_data, inline_len); - if (queue_trunc) { + if (queue_trunc) ceph_queue_vmtruncate(inode); - ceph_queue_revalidate(inode); - } else if (queue_revalidate) - ceph_queue_revalidate(inode); if (writeback) /* @@ -3199,10 +3194,8 @@ static void handle_cap_trunc(struct inode *inode, truncate_seq, truncate_size, size); spin_unlock(&ci->i_ceph_lock); - if (queue_trunc) { + if (queue_trunc) ceph_queue_vmtruncate(inode); - ceph_fscache_invalidate(inode); - } } /* diff --git a/fs/ceph/file.c b/fs/ceph/file.c index a888df6f2d71..ce2f5795e44b 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -137,23 +137,11 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) { struct ceph_file_info *cf; int ret = 0; - struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); - struct ceph_mds_client *mdsc = fsc->mdsc; switch (inode->i_mode & S_IFMT) { case S_IFREG: - /* First file open request creates the cookie, we want to keep - * this cookie around for the filetime of the inode as not to - * have to worry about fscache register / revoke / operation - * races. - * - * Also, if we know the operation is going to invalidate data - * (non readonly) just nuke the cache right away. - */ - ceph_fscache_register_inode_cookie(mdsc->fsc, ci); - if ((fmode & CEPH_FILE_MODE_WR)) - ceph_fscache_invalidate(inode); + ceph_fscache_register_inode_cookie(inode); + ceph_fscache_file_set_cookie(inode, file); case S_IFDIR: dout("init_file %p %p 0%o (regular)\n", inode, file, inode->i_mode); @@ -1349,7 +1337,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) } retry_snap: - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) { err = -ENOSPC; goto out; } @@ -1407,7 +1395,6 @@ retry_snap: iov_iter_advance(from, written); ceph_put_snap_context(snapc); } else { - loff_t old_size = i_size_read(inode); /* * No need to acquire the i_truncate_mutex. Because * the MDS revokes Fwb caps before sending truncate @@ -1418,8 +1405,6 @@ retry_snap: written = generic_perform_write(file, from, pos); if (likely(written >= 0)) iocb->ki_pos = pos + written; - if (i_size_read(inode) > old_size) - ceph_fscache_update_objectsize(inode); inode_unlock(inode); } @@ -1440,7 +1425,7 @@ retry_snap: ceph_put_cap_refs(ci, got); if (written >= 0) { - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL)) + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL)) iocb->ki_flags |= IOCB_DSYNC; written = generic_write_sync(iocb, written); @@ -1672,8 +1657,8 @@ static long ceph_fallocate(struct file *file, int mode, goto unlock; } - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && - !(mode & FALLOC_FL_PUNCH_HOLE)) { + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && + !(mode & FALLOC_FL_PUNCH_HOLE)) { ret = -ENOSPC; goto unlock; } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 0130a8592191..0168b49fb6ad 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -103,7 +103,6 @@ struct ceph_fs_client { #ifdef CONFIG_CEPH_FSCACHE struct fscache_cookie *fscache; - struct workqueue_struct *revalidate_wq; #endif }; @@ -360,8 +359,7 @@ struct ceph_inode_info { #ifdef CONFIG_CEPH_FSCACHE struct fscache_cookie *fscache; - u32 i_fscache_gen; /* sequence, for delayed fscache validate */ - struct work_struct i_revalidate_work; + u32 i_fscache_gen; #endif struct inode vfs_inode; /* at end */ }; diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 0b2954d7172d..37c134a132c7 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -95,8 +95,6 @@ static struct ctl_table pty_root_table[] = { static DEFINE_MUTEX(allocated_ptys_lock); -static struct vfsmount *devpts_mnt; - struct pts_mount_opts { int setuid; int setgid; @@ -104,7 +102,7 @@ struct pts_mount_opts { kgid_t gid; umode_t mode; umode_t ptmxmode; - int newinstance; + int reserve; int max; }; @@ -117,11 +115,9 @@ static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_mode, "mode=%o"}, -#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES {Opt_ptmxmode, "ptmxmode=%o"}, {Opt_newinstance, "newinstance"}, {Opt_max, "max=%d"}, -#endif {Opt_err, NULL} }; @@ -137,15 +133,48 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) return sb->s_fs_info; } -static inline struct super_block *pts_sb_from_inode(struct inode *inode) +struct pts_fs_info *devpts_acquire(struct file *filp) { -#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES - if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) - return inode->i_sb; -#endif - if (!devpts_mnt) - return NULL; - return devpts_mnt->mnt_sb; + struct pts_fs_info *result; + struct path path; + struct super_block *sb; + int err; + + path = filp->f_path; + path_get(&path); + + /* Has the devpts filesystem already been found? */ + sb = path.mnt->mnt_sb; + if (sb->s_magic != DEVPTS_SUPER_MAGIC) { + /* Is a devpts filesystem at "pts" in the same directory? */ + err = path_pts(&path); + if (err) { + result = ERR_PTR(err); + goto out; + } + + /* Is the path the root of a devpts filesystem? */ + result = ERR_PTR(-ENODEV); + sb = path.mnt->mnt_sb; + if ((sb->s_magic != DEVPTS_SUPER_MAGIC) || + (path.mnt->mnt_root != sb->s_root)) + goto out; + } + + /* + * pty code needs to hold extra references in case of last /dev/tty close + */ + atomic_inc(&sb->s_active); + result = DEVPTS_SB(sb); + +out: + path_put(&path); + return result; +} + +void devpts_release(struct pts_fs_info *fsi) +{ + deactivate_super(fsi->sb); } #define PARSE_MOUNT 0 @@ -154,9 +183,7 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode) /* * parse_mount_options(): * Set @opts to mount options specified in @data. If an option is not - * specified in @data, set it to its default value. The exception is - * 'newinstance' option which can only be set/cleared on a mount (i.e. - * cannot be changed during remount). + * specified in @data, set it to its default value. * * Note: @data may be NULL (in which case all options are set to default). */ @@ -174,9 +201,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; opts->max = NR_UNIX98_PTY_MAX; - /* newinstance makes sense only on initial mount */ + /* Only allow instances mounted from the initial mount + * namespace to tap the reserve pool of ptys. + */ if (op == PARSE_MOUNT) - opts->newinstance = 0; + opts->reserve = + (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns); while ((p = strsep(&data, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; @@ -211,16 +241,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) return -EINVAL; opts->mode = option & S_IALLUGO; break; -#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES case Opt_ptmxmode: if (match_octal(&args[0], &option)) return -EINVAL; opts->ptmxmode = option & S_IALLUGO; break; case Opt_newinstance: - /* newinstance makes sense only on initial mount */ - if (op == PARSE_MOUNT) - opts->newinstance = 1; break; case Opt_max: if (match_int(&args[0], &option) || @@ -228,7 +254,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) return -EINVAL; opts->max = option; break; -#endif default: pr_err("called with bogus options\n"); return -EINVAL; @@ -238,7 +263,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) return 0; } -#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES static int mknod_ptmx(struct super_block *sb) { int mode; @@ -305,12 +329,6 @@ static void update_ptmx_mode(struct pts_fs_info *fsi) inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode; } } -#else -static inline void update_ptmx_mode(struct pts_fs_info *fsi) -{ - return; -} -#endif static int devpts_remount(struct super_block *sb, int *flags, char *data) { @@ -344,11 +362,9 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root) seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, opts->gid)); seq_printf(seq, ",mode=%03o", opts->mode); -#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode); if (opts->max < NR_UNIX98_PTY_MAX) seq_printf(seq, ",max=%d", opts->max); -#endif return 0; } @@ -410,40 +426,11 @@ fail: return -ENOMEM; } -#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES -static int compare_init_pts_sb(struct super_block *s, void *p) -{ - if (devpts_mnt) - return devpts_mnt->mnt_sb == s; - return 0; -} - /* * devpts_mount() * - * If the '-o newinstance' mount option was specified, mount a new - * (private) instance of devpts. PTYs created in this instance are - * independent of the PTYs in other devpts instances. - * - * If the '-o newinstance' option was not specified, mount/remount the - * initial kernel mount of devpts. This type of mount gives the - * legacy, single-instance semantics. - * - * The 'newinstance' option is needed to support multiple namespace - * semantics in devpts while preserving backward compatibility of the - * current 'single-namespace' semantics. i.e all mounts of devpts - * without the 'newinstance' mount option should bind to the initial - * kernel mount, like mount_single(). - * - * Mounts with 'newinstance' option create a new, private namespace. - * - * NOTE: - * - * For single-mount semantics, devpts cannot use mount_single(), - * because mount_single()/sget() find and use the super-block from - * the most recent mount of devpts. But that recent mount may be a - * 'newinstance' mount and mount_single() would pick the newinstance - * super-block instead of the initial super-block. + * Mount a new (private) instance of devpts. PTYs created in this + * instance are independent of the PTYs in other devpts instances. */ static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) @@ -456,18 +443,7 @@ static struct dentry *devpts_mount(struct file_system_type *fs_type, if (error) return ERR_PTR(error); - /* Require newinstance for all user namespace mounts to ensure - * the mount options are not changed. - */ - if ((current_user_ns() != &init_user_ns) && !opts.newinstance) - return ERR_PTR(-EINVAL); - - if (opts.newinstance) - s = sget(fs_type, NULL, set_anon_super, flags, NULL); - else - s = sget(fs_type, compare_init_pts_sb, set_anon_super, flags, - NULL); - + s = sget(fs_type, NULL, set_anon_super, flags, NULL); if (IS_ERR(s)) return ERR_CAST(s); @@ -491,18 +467,6 @@ out_undo_sget: return ERR_PTR(error); } -#else -/* - * This supports only the legacy single-instance semantics (no - * multiple-instance semantics) - */ -static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) -{ - return mount_single(fs_type, flags, data, devpts_fill_super); -} -#endif - static void devpts_kill_sb(struct super_block *sb) { struct pts_fs_info *fsi = DEVPTS_SB(sb); @@ -516,9 +480,7 @@ static struct file_system_type devpts_fs_type = { .name = "devpts", .mount = devpts_mount, .kill_sb = devpts_kill_sb, -#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES .fs_flags = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT, -#endif }; /* @@ -531,16 +493,13 @@ int devpts_new_index(struct pts_fs_info *fsi) int index; int ida_ret; - if (!fsi) - return -ENODEV; - retry: if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) return -ENOMEM; mutex_lock(&allocated_ptys_lock); - if (pty_count >= pty_limit - - (fsi->mount_opts.newinstance ? pty_reserve : 0)) { + if (pty_count >= (pty_limit - + (fsi->mount_opts.reserve ? 0 : pty_reserve))) { mutex_unlock(&allocated_ptys_lock); return -ENOSPC; } @@ -571,30 +530,6 @@ void devpts_kill_index(struct pts_fs_info *fsi, int idx) mutex_unlock(&allocated_ptys_lock); } -/* - * pty code needs to hold extra references in case of last /dev/tty close - */ -struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file) -{ - struct super_block *sb; - struct pts_fs_info *fsi; - - sb = pts_sb_from_inode(ptmx_inode); - if (!sb) - return NULL; - fsi = DEVPTS_SB(sb); - if (!fsi) - return NULL; - - atomic_inc(&sb->s_active); - return fsi; -} - -void devpts_put_ref(struct pts_fs_info *fsi) -{ - deactivate_super(fsi->sb); -} - /** * devpts_pty_new -- create a new inode in /dev/pts/ * @ptmx_inode: inode of the master @@ -607,16 +542,12 @@ void devpts_put_ref(struct pts_fs_info *fsi) struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv) { struct dentry *dentry; - struct super_block *sb; + struct super_block *sb = fsi->sb; struct inode *inode; struct dentry *root; struct pts_mount_opts *opts; char s[12]; - if (!fsi) - return ERR_PTR(-ENODEV); - - sb = fsi->sb; root = sb->s_root; opts = &fsi->mount_opts; @@ -676,20 +607,8 @@ void devpts_pty_kill(struct dentry *dentry) static int __init init_devpts_fs(void) { int err = register_filesystem(&devpts_fs_type); - struct ctl_table_header *table; - if (!err) { - struct vfsmount *mnt; - - table = register_sysctl_table(pty_root_table); - mnt = kern_mount(&devpts_fs_type); - if (IS_ERR(mnt)) { - err = PTR_ERR(mnt); - unregister_filesystem(&devpts_fs_type); - unregister_sysctl_table(table); - } else { - devpts_mnt = mnt; - } + register_sysctl_table(pty_root_table); } return err; } diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 3078b679fcd1..c8c4f79c7ce1 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -887,6 +887,8 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) put_page(results[i]); } + wake_up_bit(&cookie->flags, 0); + _leave(""); } diff --git a/fs/namei.c b/fs/namei.c index 4c4f95ac8aa5..6a82fb7e2127 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1416,21 +1416,28 @@ static void follow_mount(struct path *path) } } +static int path_parent_directory(struct path *path) +{ + struct dentry *old = path->dentry; + /* rare case of legitimate dget_parent()... */ + path->dentry = dget_parent(path->dentry); + dput(old); + if (unlikely(!path_connected(path))) + return -ENOENT; + return 0; +} + static int follow_dotdot(struct nameidata *nd) { while(1) { - struct dentry *old = nd->path.dentry; - if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { - /* rare case of legitimate dget_parent()... */ - nd->path.dentry = dget_parent(nd->path.dentry); - dput(old); - if (unlikely(!path_connected(&nd->path))) - return -ENOENT; + int ret = path_parent_directory(&nd->path); + if (ret) + return ret; break; } if (!follow_up(&nd->path)) @@ -2514,6 +2521,34 @@ struct dentry *lookup_one_len_unlocked(const char *name, } EXPORT_SYMBOL(lookup_one_len_unlocked); +#ifdef CONFIG_UNIX98_PTYS +int path_pts(struct path *path) +{ + /* Find something mounted on "pts" in the same directory as + * the input path. + */ + struct dentry *child, *parent; + struct qstr this; + int ret; + + ret = path_parent_directory(path); + if (ret) + return ret; + + parent = path->dentry; + this.name = "pts"; + this.len = 3; + child = d_hash_and_lookup(parent, &this); + if (!child) + return -ENOENT; + + path->dentry = child; + dput(parent); + follow_mount(path); + return 0; +} +#endif + int user_path_at_empty(int dfd, const char __user *name, unsigned flags, struct path *path, int *empty) { diff --git a/include/acpi/video.h b/include/acpi/video.h index 70a41f742037..5731ccb42585 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -51,7 +51,8 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type); */ extern bool acpi_video_handles_brightness_key_presses(void); extern int acpi_video_get_levels(struct acpi_device *device, - struct acpi_video_device_brightness **dev_br); + struct acpi_video_device_brightness **dev_br, + int *pmax_level); #else static inline int acpi_video_register(void) { return 0; } static inline void acpi_video_unregister(void) { return; } @@ -72,7 +73,8 @@ static inline bool acpi_video_handles_brightness_key_presses(void) return false; } static inline int acpi_video_get_levels(struct acpi_device *device, - struct acpi_video_device_brightness **dev_br) + struct acpi_video_device_brightness **dev_br, + int *pmax_level) { return -ENODEV; } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 84f1a8eefbdb..04310cb08111 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -52,10 +52,12 @@ #include <linux/poll.h> #include <linux/ratelimit.h> #include <linux/sched.h> +#include <linux/seqlock.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> +#include <linux/fence.h> #include <asm/mman.h> #include <asm/pgalloc.h> @@ -66,6 +68,7 @@ #include <drm/drm_agpsupport.h> #include <drm/drm_crtc.h> +#include <drm/drm_fourcc.h> #include <drm/drm_global.h> #include <drm/drm_hashtab.h> #include <drm/drm_mem_util.h> @@ -281,13 +284,14 @@ struct drm_ioctl_desc { /* Event queued up for userspace to read */ struct drm_pending_event { + struct completion *completion; struct drm_event *event; + struct fence *fence; struct list_head link; struct list_head pending_link; struct drm_file *file_priv; pid_t pid; /* pid of requester, no guarantee it's valid by the time we deliver the event, for tracing only */ - void (*destroy)(struct drm_pending_event *event); }; /* initial implementaton using a linked list - todo hashtab */ @@ -392,11 +396,6 @@ struct drm_master { void *driver_priv; }; -/* Size of ringbuffer for vblank timestamps. Just double-buffer - * in initial implementation. - */ -#define DRM_VBLANKTIME_RBSIZE 2 - /* Flags and return codes for get_vblank_timestamp() driver function. */ #define DRM_CALLED_FROM_VBLIRQ 1 #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) @@ -420,8 +419,6 @@ struct drm_driver { void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); - int (*suspend) (struct drm_device *, pm_message_t state); - int (*resume) (struct drm_device *); int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); int (*dma_quiescent) (struct drm_device *); int (*context_dtor) (struct drm_device *dev, int context); @@ -434,7 +431,7 @@ struct drm_driver { * * Driver callback for fetching a raw hardware vblank counter for @crtc. * If a device doesn't have a hardware counter, the driver can simply - * return the value of drm_vblank_count. The DRM core will account for + * use drm_vblank_no_hw_counter() function. The DRM core will account for * missed vblank events while interrupts where disabled based on system * timestamps. * @@ -452,8 +449,8 @@ struct drm_driver { * @pipe: which irq to enable * * Enable vblank interrupts for @crtc. If the device doesn't have - * a hardware vblank counter, this routine should be a no-op, since - * interrupts will have to stay on to keep the count accurate. + * a hardware vblank counter, the driver should use the + * drm_vblank_no_hw_counter() function that keeps a virtual counter. * * RETURNS * Zero on success, appropriate errno if the given @crtc's vblank @@ -467,8 +464,8 @@ struct drm_driver { * @pipe: which irq to enable * * Disable vblank interrupts for @crtc. If the device doesn't have - * a hardware vblank counter, this routine should be a no-op, since - * interrupts will have to stay on to keep the count accurate. + * a hardware vblank counter, the driver should use the + * drm_vblank_no_hw_counter() function that keeps a virtual counter. */ void (*disable_vblank) (struct drm_device *dev, unsigned int pipe); @@ -725,10 +722,10 @@ struct drm_vblank_crtc { wait_queue_head_t queue; /**< VBLANK wait queue */ struct timer_list disable_timer; /* delayed disable timer */ - /* vblank counter, protected by dev->vblank_time_lock for writes */ - u32 count; - /* vblank timestamps, protected by dev->vblank_time_lock for writes */ - struct timeval time[DRM_VBLANKTIME_RBSIZE]; + seqlock_t seqlock; /* protects vblank count and time */ + + u32 count; /* vblank counter */ + struct timeval time; /* vblank timestamp */ atomic_t refcount; /* number of users of vblank interruptsper crtc */ u32 last; /* protected by dev->vbl_lock, used */ @@ -972,18 +969,12 @@ extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, struct timeval *vblanktime); extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, struct timeval *vblanktime); -extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, - struct drm_pending_vblank_event *e); extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e); -extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, - struct drm_pending_vblank_event *e); extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, struct drm_pending_vblank_event *e); extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); -extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); -extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe); extern int drm_crtc_vblank_get(struct drm_crtc *crtc); extern void drm_crtc_vblank_put(struct drm_crtc *crtc); extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); @@ -994,6 +985,7 @@ extern void drm_crtc_vblank_off(struct drm_crtc *crtc); extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); extern void drm_crtc_vblank_on(struct drm_crtc *crtc); extern void drm_vblank_cleanup(struct drm_device *dev); +extern u32 drm_accurate_vblank_count(struct drm_crtc *crtc); extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe); extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 92c84e9ab09a..856a9c85a838 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -30,6 +30,12 @@ #include <drm/drm_crtc.h> +void drm_crtc_commit_put(struct drm_crtc_commit *commit); +static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit) +{ + kref_get(&commit->ref); +} + struct drm_atomic_state * __must_check drm_atomic_state_alloc(struct drm_device *dev); void drm_atomic_state_clear(struct drm_atomic_state *state); @@ -71,7 +77,7 @@ static inline struct drm_crtc_state * drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state, struct drm_crtc *crtc) { - return state->crtc_states[drm_crtc_index(crtc)]; + return state->crtcs[drm_crtc_index(crtc)].state; } /** @@ -86,7 +92,7 @@ static inline struct drm_plane_state * drm_atomic_get_existing_plane_state(struct drm_atomic_state *state, struct drm_plane *plane) { - return state->plane_states[drm_plane_index(plane)]; + return state->planes[drm_plane_index(plane)].state; } /** @@ -106,7 +112,43 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state, if (index >= state->num_connector) return NULL; - return state->connector_states[index]; + return state->connectors[index].state; +} + +/** + * __drm_atomic_get_current_plane_state - get current plane state + * @state: global atomic state object + * @plane: plane to grab + * + * This function returns the plane state for the given plane, either from + * @state, or if the plane isn't part of the atomic state update, from @plane. + * This is useful in atomic check callbacks, when drivers need to peek at, but + * not change, state of other planes, since it avoids threading an error code + * back up the call chain. + * + * WARNING: + * + * Note that this function is in general unsafe since it doesn't check for the + * required locking for access state structures. Drivers must ensure that it is + * safe to access the returned state structure through other means. One common + * example is when planes are fixed to a single CRTC, and the driver knows that + * the CRTC lock is held already. In that case holding the CRTC lock gives a + * read-lock on all planes connected to that CRTC. But if planes can be + * reassigned things get more tricky. In that case it's better to use + * drm_atomic_get_plane_state and wire up full error handling. + * + * Returns: + * + * Read-only pointer to the current plane state. + */ +static inline const struct drm_plane_state * +__drm_atomic_get_current_plane_state(struct drm_atomic_state *state, + struct drm_plane *plane) +{ + if (state->planes[drm_plane_index(plane)].state) + return state->planes[drm_plane_index(plane)].state; + + return plane->state; } int __must_check @@ -139,29 +181,39 @@ int __must_check drm_atomic_check_only(struct drm_atomic_state *state); int __must_check drm_atomic_commit(struct drm_atomic_state *state); int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); -#define for_each_connector_in_state(state, connector, connector_state, __i) \ +#define for_each_connector_in_state(__state, connector, connector_state, __i) \ for ((__i) = 0; \ - (__i) < (state)->num_connector && \ - ((connector) = (state)->connectors[__i], \ - (connector_state) = (state)->connector_states[__i], 1); \ + (__i) < (__state)->num_connector && \ + ((connector) = (__state)->connectors[__i].ptr, \ + (connector_state) = (__state)->connectors[__i].state, 1); \ (__i)++) \ for_each_if (connector) -#define for_each_crtc_in_state(state, crtc, crtc_state, __i) \ +#define for_each_crtc_in_state(__state, crtc, crtc_state, __i) \ for ((__i) = 0; \ - (__i) < (state)->dev->mode_config.num_crtc && \ - ((crtc) = (state)->crtcs[__i], \ - (crtc_state) = (state)->crtc_states[__i], 1); \ + (__i) < (__state)->dev->mode_config.num_crtc && \ + ((crtc) = (__state)->crtcs[__i].ptr, \ + (crtc_state) = (__state)->crtcs[__i].state, 1); \ (__i)++) \ for_each_if (crtc_state) -#define for_each_plane_in_state(state, plane, plane_state, __i) \ +#define for_each_plane_in_state(__state, plane, plane_state, __i) \ for ((__i) = 0; \ - (__i) < (state)->dev->mode_config.num_total_plane && \ - ((plane) = (state)->planes[__i], \ - (plane_state) = (state)->plane_states[__i], 1); \ + (__i) < (__state)->dev->mode_config.num_total_plane && \ + ((plane) = (__state)->planes[__i].ptr, \ + (plane_state) = (__state)->planes[__i].state, 1); \ (__i)++) \ for_each_if (plane_state) + +/** + * drm_atomic_crtc_needs_modeset - compute combined modeset need + * @state: &drm_crtc_state for the CRTC + * + * To give drivers flexibility struct &drm_crtc_state has 3 booleans to track + * whether the state CRTC changed enough to need a full modeset cycle: + * connectors_changed, mode_changed and active_change. This helper simply + * combines these three to compute the overall need for a modeset for @state. + */ static inline bool drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state) { diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index d473dcc91f54..d86ae5dcd7b4 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -38,6 +38,7 @@ int drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_check(struct drm_device *dev, struct drm_atomic_state *state); +void drm_atomic_helper_commit_tail(struct drm_atomic_state *state); int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); @@ -71,8 +72,15 @@ void drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_sta void drm_atomic_helper_disable_planes_on_crtc(struct drm_crtc *crtc, bool atomic); -void drm_atomic_helper_swap_state(struct drm_device *dev, - struct drm_atomic_state *state); +void drm_atomic_helper_swap_state(struct drm_atomic_state *state, + bool stall); + +/* nonblocking commit helpers */ +int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, + bool nonblock); +void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state); +void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state); +void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state); /* implementations for legacy interfaces */ int drm_atomic_helper_update_plane(struct drm_plane *plane, @@ -147,9 +155,9 @@ void __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state); void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, struct drm_connector_state *state); -void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, - u16 *red, u16 *green, u16 *blue, - uint32_t start, uint32_t size); +int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, + uint32_t size); /** * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC @@ -159,7 +167,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, * This iterates over the current state, useful (for example) when applying * atomic state after it has been checked and swapped. To iterate over the * planes which *will* be attached (for ->atomic_check()) see - * drm_crtc_for_each_pending_plane() + * drm_atomic_crtc_state_for_each_plane(). */ #define drm_atomic_crtc_for_each_plane(plane, crtc) \ drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask) @@ -171,11 +179,31 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, * * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be * attached if the specified state is applied. Useful during (for example) - * ->atomic_check() operations, to validate the incoming state + * ->atomic_check() operations, to validate the incoming state. */ #define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \ drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) +/** + * drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state + * @plane: the loop cursor + * @plane_state: loop cursor for the plane's state, must be const + * @crtc_state: the incoming crtc-state + * + * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be + * attached if the specified state is applied. Useful during (for example) + * ->atomic_check() operations, to validate the incoming state. + * + * Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a + * const plane_state. This is useful when a driver just wants to peek at other + * active planes on this crtc, but does not need to change it. + */ +#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \ + drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \ + for_each_if ((plane_state = \ + __drm_atomic_get_current_plane_state((crtc_state)->state, \ + plane))) + /* * drm_atomic_plane_disabling - check whether a plane is being disabled * @plane: plane object diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index d1559cd04e3d..914baa8c161d 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -253,6 +253,8 @@ struct drm_framebuffer { int bits_per_pixel; int flags; uint32_t pixel_format; /* fourcc format */ + int hot_x; + int hot_y; struct list_head filp_head; }; @@ -314,6 +316,7 @@ struct drm_plane_helper_funcs; * update to ensure framebuffer cleanup isn't done too early * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings * @mode: current mode timings + * @mode_blob: &drm_property_blob for @mode * @degamma_lut: Lookup table for converting framebuffer pixel data * before apply the conversion matrix * @ctm: Transformation matrix @@ -478,8 +481,8 @@ struct drm_crtc_funcs { * going on, which should eventually be unified to just one set of * hooks. */ - void (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, - uint32_t start, uint32_t size); + int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, + uint32_t size); /** * @destroy: @@ -708,6 +711,7 @@ struct drm_crtc_funcs { * @dev: parent DRM device * @port: OF node used by drm_of_find_possible_crtcs() * @head: list management + * @name: human readable name, can be overwritten by the driver * @mutex: per-CRTC locking * @base: base KMS object for ID tracking etc. * @primary: primary plane for this CRTC @@ -724,9 +728,6 @@ struct drm_crtc_funcs { * @gamma_store: gamma ramp values * @helper_private: mid-layer private data * @properties: property tracking for this CRTC - * @state: current atomic state for this CRTC - * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for - * legacy IOCTLs * * Each CRTC may have one or more connectors associated with it. This structure * allows the CRTC to be controlled. @@ -738,12 +739,13 @@ struct drm_crtc { char *name; - /* - * crtc mutex + /** + * @mutex: * * This provides a read lock for the overall crtc state (mode, dpms * state, ...) and a write lock for everything which can be update - * without a full modeset (fb, cursor data, ...) + * without a full modeset (fb, cursor data, crtc properties ...). Full + * modeset also need to grab dev->mode_config.connection_mutex. */ struct drm_modeset_lock mutex; @@ -753,6 +755,9 @@ struct drm_crtc { struct drm_plane *primary; struct drm_plane *cursor; + /* position inside the mode_config.list, can be used as a [] idx */ + unsigned index; + /* position of cursor plane on crtc */ int cursor_x; int cursor_y; @@ -779,11 +784,37 @@ struct drm_crtc { struct drm_object_properties properties; + /** + * @state: + * + * Current atomic state for this CRTC. + */ struct drm_crtc_state *state; - /* - * For legacy crtc IOCTLs so that atomic drivers can get at the locking - * acquire context. + /** + * @commit_list: + * + * List of &drm_crtc_commit structures tracking pending commits. + * Protected by @commit_lock. This list doesn't hold its own full + * reference, but burrows it from the ongoing commit. Commit entries + * must be removed from this list once the commit is fully completed, + * but before it's correspoding &drm_atomic_state gets destroyed. + */ + struct list_head commit_list; + + /** + * @commit_lock: + * + * Spinlock to protect @commit_list. + */ + spinlock_t commit_lock; + + /** + * @acquire_ctx: + * + * Per-CRTC implicit acquire context used by atomic drivers for legacy + * IOCTLs, so that atomic drivers can get at the locking acquire + * context. */ struct drm_modeset_acquire_ctx *acquire_ctx; }; @@ -1078,7 +1109,7 @@ struct drm_encoder_funcs { * @dev: parent DRM device * @head: list management * @base: base KMS object - * @name: encoder name + * @name: human readable name, can be overwritten by the driver * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h * @possible_crtcs: bitmask of potential CRTC bindings * @possible_clones: bitmask of potential sibling encoders for cloning @@ -1097,6 +1128,10 @@ struct drm_encoder { struct drm_mode_object base; char *name; int encoder_type; + + /* position inside the mode_config.list, can be used as a [] idx */ + unsigned index; + uint32_t possible_crtcs; uint32_t possible_clones; @@ -1124,7 +1159,8 @@ struct drm_encoder { * @attr: sysfs attributes * @head: list management * @base: base KMS object - * @name: connector name + * @name: human readable name, can be overwritten by the driver + * @connector_id: compacted connector id useful indexing arrays * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h * @connector_type_id: index into connector type enum * @interlace_allowed: can this connector handle interlaced modes? @@ -1137,7 +1173,6 @@ struct drm_encoder { * @funcs: connector control functions * @edid_blob_ptr: DRM property containing EDID if present * @properties: property tracking for this connector - * @path_blob_ptr: DRM blob property data for the DP MST path property * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling * @dpms: current dpms state * @helper_private: mid-layer private data @@ -1200,8 +1235,23 @@ struct drm_connector { struct drm_property_blob *edid_blob_ptr; struct drm_object_properties properties; + /** + * @path_blob_ptr: + * + * DRM blob property data for the DP MST path property. + */ struct drm_property_blob *path_blob_ptr; + /** + * @tile_blob_ptr: + * + * DRM blob property data for the tile property (used mostly by DP MST). + * This is meant for screens which are driven through separate display + * pipelines represented by &drm_crtc, which might not be running with + * genlocked clocks. For tiled panels which are genlocked, like + * dual-link LVDS or dual-link DSI, the driver should try to not expose + * the tiling and virtualize both &drm_crtc and &drm_plane if needed. + */ struct drm_property_blob *tile_blob_ptr; uint8_t polled; /* DRM_CONNECTOR_POLL_* */ @@ -1263,6 +1313,7 @@ struct drm_connector { * plane (in 16.16) * @src_w: width of visible portion of plane (in 16.16) * @src_h: height of visible portion of plane (in 16.16) + * @rotation: rotation of the plane * @state: backpointer to global drm_atomic_state */ struct drm_plane_state { @@ -1503,6 +1554,7 @@ enum drm_plane_type { * struct drm_plane - central DRM plane control structure * @dev: DRM device this plane belongs to * @head: for list management + * @name: human readable name, can be overwritten by the driver * @base: base mode object * @possible_crtcs: pipes this plane can be bound to * @format_types: array of formats supported by this plane @@ -1516,6 +1568,7 @@ enum drm_plane_type { * @properties: property tracking for this plane * @type: type of plane (overlay, primary, cursor) * @state: current atomic state for this plane + * @helper_private: mid-layer private data */ struct drm_plane { struct drm_device *dev; @@ -1523,6 +1576,13 @@ struct drm_plane { char *name; + /** + * @mutex: + * + * Protects modeset plane state, together with the mutex of &drm_crtc + * this plane is linked to (when active, getting actived or getting + * disabled). + */ struct drm_modeset_lock mutex; struct drm_mode_object base; @@ -1543,6 +1603,9 @@ struct drm_plane { enum drm_plane_type type; + /* position inside the mode_config.list, can be used as a [] idx */ + unsigned index; + const struct drm_plane_helper_funcs *helper_private; struct drm_plane_state *state; @@ -1694,18 +1757,136 @@ struct drm_bridge { }; /** + * struct drm_crtc_commit - track modeset commits on a CRTC + * + * This structure is used to track pending modeset changes and atomic commit on + * a per-CRTC basis. Since updating the list should never block this structure + * is reference counted to allow waiters to safely wait on an event to complete, + * without holding any locks. + * + * It has 3 different events in total to allow a fine-grained synchronization + * between outstanding updates:: + * + * atomic commit thread hardware + * + * write new state into hardware ----> ... + * signal hw_done + * switch to new state on next + * ... v/hblank + * + * wait for buffers to show up ... + * + * ... send completion irq + * irq handler signals flip_done + * cleanup old buffers + * + * signal cleanup_done + * + * wait for flip_done <---- + * clean up atomic state + * + * The important bit to know is that cleanup_done is the terminal event, but the + * ordering between flip_done and hw_done is entirely up to the specific driver + * and modeset state change. + * + * For an implementation of how to use this look at + * drm_atomic_helper_setup_commit() from the atomic helper library. + */ +struct drm_crtc_commit { + /** + * @crtc: + * + * DRM CRTC for this commit. + */ + struct drm_crtc *crtc; + + /** + * @ref: + * + * Reference count for this structure. Needed to allow blocking on + * completions without the risk of the completion disappearing + * meanwhile. + */ + struct kref ref; + + /** + * @flip_done: + * + * Will be signaled when the hardware has flipped to the new set of + * buffers. Signals at the same time as when the drm event for this + * commit is sent to userspace, or when an out-fence is singalled. Note + * that for most hardware, in most cases this happens after @hw_done is + * signalled. + */ + struct completion flip_done; + + /** + * @hw_done: + * + * Will be signalled when all hw register changes for this commit have + * been written out. Especially when disabling a pipe this can be much + * later than than @flip_done, since that can signal already when the + * screen goes black, whereas to fully shut down a pipe more register + * I/O is required. + * + * Note that this does not need to include separately reference-counted + * resources like backing storage buffer pinning, or runtime pm + * management. + */ + struct completion hw_done; + + /** + * @cleanup_done: + * + * Will be signalled after old buffers have been cleaned up by calling + * drm_atomic_helper_cleanup_planes(). Since this can only happen after + * a vblank wait completed it might be a bit later. This completion is + * useful to throttle updates and avoid hardware updates getting ahead + * of the buffer cleanup too much. + */ + struct completion cleanup_done; + + /** + * @commit_entry: + * + * Entry on the per-CRTC commit_list. Protected by crtc->commit_lock. + */ + struct list_head commit_entry; + + /** + * @event: + * + * &drm_pending_vblank_event pointer to clean up private events. + */ + struct drm_pending_vblank_event *event; +}; + +struct __drm_planes_state { + struct drm_plane *ptr; + struct drm_plane_state *state; +}; + +struct __drm_crtcs_state { + struct drm_crtc *ptr; + struct drm_crtc_state *state; + struct drm_crtc_commit *commit; +}; + +struct __drm_connnectors_state { + struct drm_connector *ptr; + struct drm_connector_state *state; +}; + +/** * struct drm_atomic_state - the global state object for atomic updates * @dev: parent DRM device * @allow_modeset: allow full modeset * @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL. - * @planes: pointer to array of plane pointers - * @plane_states: pointer to array of plane states pointers + * @planes: pointer to array of structures with per-plane data * @crtcs: pointer to array of CRTC pointers - * @crtc_states: pointer to array of CRTC states pointers * @num_connector: size of the @connectors and @connector_states arrays - * @connectors: pointer to array of connector pointers - * @connector_states: pointer to array of connector states pointers + * @connectors: pointer to array of structures with per-connector data * @acquire_ctx: acquire context for this atomic modeset state update */ struct drm_atomic_state { @@ -1713,15 +1894,20 @@ struct drm_atomic_state { bool allow_modeset : 1; bool legacy_cursor_update : 1; bool legacy_set_config : 1; - struct drm_plane **planes; - struct drm_plane_state **plane_states; - struct drm_crtc **crtcs; - struct drm_crtc_state **crtc_states; + struct __drm_planes_state *planes; + struct __drm_crtcs_state *crtcs; int num_connector; - struct drm_connector **connectors; - struct drm_connector_state **connector_states; + struct __drm_connnectors_state *connectors; struct drm_modeset_acquire_ctx *acquire_ctx; + + /** + * @commit_work: + * + * Work item which can be used by the driver or helpers to execute the + * commit without blocking. + */ + struct work_struct commit_work; }; @@ -2022,8 +2208,6 @@ struct drm_mode_config_funcs { * @connection_mutex: ww mutex protecting connector state and routing * @acquire_ctx: global implicit acquire context used by atomic drivers for * legacy IOCTLs - * @idr_mutex: mutex for KMS ID allocation and management - * @crtc_idr: main KMS ID tracking object * @fb_lock: mutex to protect fb state and lists * @num_fb: number of fbs available * @fb_list: list of framebuffers available @@ -2045,6 +2229,7 @@ struct drm_mode_config_funcs { * @fb_base: base address of the framebuffer * @poll_enabled: track polling support for this device * @poll_running: track polling status for this device + * @delayed_event: track delayed poll uevent deliver for this device * @output_poll_work: delayed work for polling in process context * @property_blob_list: list of all the blob property objects * @blob_lock: mutex for blob property allocation and management @@ -2063,6 +2248,7 @@ struct drm_mode_config_funcs { * @async_page_flip: does this device support async flips on the primary plane? * @cursor_width: hint to userspace for max cursor width * @cursor_height: hint to userspace for max cursor height + * @helper_private: mid-layer private data * * Core mode resource tracking structure. All CRTC, encoders, and connectors * enumerated by the driver are added here, as are global properties. Some @@ -2072,10 +2258,30 @@ struct drm_mode_config { struct mutex mutex; /* protects configuration (mode lists etc.) */ struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */ struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ - struct mutex idr_mutex; /* for IDR management */ - struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ - struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ - /* this is limited to one for now */ + + /** + * @idr_mutex: + * + * Mutex for KMS ID allocation and management. Protects both @crtc_idr + * and @tile_idr. + */ + struct mutex idr_mutex; + + /** + * @crtc_idr: + * + * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc, + * connector, modes - just makes life easier to have only one. + */ + struct idr crtc_idr; + + /** + * @tile_idr: + * + * Use this idr for allocating new IDs for tiled sinks like use in some + * high-res DP MST screens. + */ + struct idr tile_idr; struct mutex fb_lock; /* proctects global and per-file fb lists */ int num_fb; @@ -2177,11 +2383,17 @@ struct drm_mode_config { /* whether async page flip is supported or not */ bool async_page_flip; - /* whether the driver supports fb modifiers */ + /** + * @allow_fb_modifiers: + * + * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call. + */ bool allow_fb_modifiers; /* cursor size */ uint32_t cursor_width, cursor_height; + + struct drm_mode_config_helper_funcs *helper_private; }; /** @@ -2230,7 +2442,18 @@ int drm_crtc_init_with_planes(struct drm_device *dev, const struct drm_crtc_funcs *funcs, const char *name, ...); extern void drm_crtc_cleanup(struct drm_crtc *crtc); -extern unsigned int drm_crtc_index(struct drm_crtc *crtc); + +/** + * drm_crtc_index - find the index of a registered CRTC + * @crtc: CRTC to find index for + * + * Given a registered CRTC, return the index of that CRTC within a DRM + * device's list of CRTCs. + */ +static inline unsigned int drm_crtc_index(struct drm_crtc *crtc) +{ + return crtc->index; +} /** * drm_crtc_mask - find the mask of a registered CRTC @@ -2284,7 +2507,18 @@ int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, ...); -extern unsigned int drm_encoder_index(struct drm_encoder *encoder); + +/** + * drm_encoder_index - find the index of a registered encoder + * @encoder: encoder to find index for + * + * Given a registered encoder, return the index of that encoder within a DRM + * device's list of encoders. + */ +static inline unsigned int drm_encoder_index(struct drm_encoder *encoder) +{ + return encoder->index; +} /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? @@ -2315,7 +2549,18 @@ extern int drm_plane_init(struct drm_device *dev, const uint32_t *formats, unsigned int format_count, bool is_primary); extern void drm_plane_cleanup(struct drm_plane *plane); -extern unsigned int drm_plane_index(struct drm_plane *plane); + +/** + * drm_plane_index - find the index of a registered plane + * @plane: plane to find index for + * + * Given a registered plane, return the index of that plane within a DRM + * device's list of planes. + */ +static inline unsigned int drm_plane_index(struct drm_plane *plane) +{ + return plane->index; +} extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); extern void drm_plane_force_disable(struct drm_plane *plane); extern int drm_plane_check_pixel_format(const struct drm_plane *plane, @@ -2540,20 +2785,14 @@ extern int drm_mode_plane_set_obj_prop(struct drm_plane *plane, extern int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, - int *bpp); -extern int drm_format_num_planes(uint32_t format); -extern int drm_format_plane_cpp(uint32_t format, int plane); -extern int drm_format_horz_chroma_subsampling(uint32_t format); -extern int drm_format_vert_chroma_subsampling(uint32_t format); -extern int drm_format_plane_width(int width, uint32_t format, int plane); -extern int drm_format_plane_height(int height, uint32_t format, int plane); -extern const char *drm_get_format_name(uint32_t format); extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, unsigned int supported_rotations); extern unsigned int drm_rotation_simplify(unsigned int rotation, unsigned int supported_rotations); - +extern void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, + uint degamma_lut_size, + bool has_ctm, + uint gamma_lut_size); /* Helpers */ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 97fa894d4ee2..4b37afa2b73b 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -48,9 +48,6 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb); -extern void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc, - int degamma_lut_size, - int gamma_lut_size); extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder); diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 9d03f167007b..5a848e734422 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -622,6 +622,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf #define EDP_PSR_RECEIVER_CAP_SIZE 2 +#define EDP_DISPLAY_CTL_CAP_SIZE 3 void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]); diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 5b4aa35026a3..db8d4780eaa2 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -212,17 +212,6 @@ struct drm_fb_helper { * needs to be reprobe when fbdev is in control again. */ bool delayed_hotplug; - - /** - * @atomic: - * - * Use atomic updates for restore_fbdev_mode(), etc. This defaults to - * true if driver has DRIVER_ATOMIC feature flag, but drivers can - * override it to true after drm_fb_helper_init() if they support atomic - * modeset but do not yet advertise DRIVER_ATOMIC (note that fb-helper - * does not require ASYNC commits). - */ - bool atomic; }; #ifdef CONFIG_DRM_FBDEV_EMULATION diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h new file mode 100644 index 000000000000..7f90a396cf2b --- /dev/null +++ b/include/drm/drm_fourcc.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ +#ifndef __DRM_FOURCC_H__ +#define __DRM_FOURCC_H__ + +#include <linux/types.h> +#include <uapi/drm/drm_fourcc.h> + +void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp); +int drm_format_num_planes(uint32_t format); +int drm_format_plane_cpp(uint32_t format, int plane); +int drm_format_horz_chroma_subsampling(uint32_t format); +int drm_format_vert_chroma_subsampling(uint32_t format); +int drm_format_plane_width(int width, uint32_t format, int plane); +int drm_format_plane_height(int height, uint32_t format, int plane); +const char *drm_get_format_name(uint32_t format); + +#endif /* __DRM_FOURCC_H__ */ diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 7a9840f8b38e..72f5b15e0738 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -180,6 +180,8 @@ struct mipi_dsi_device { unsigned long mode_flags; }; +#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:" + static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev) { return container_of(dev, struct mipi_dsi_device, dev); @@ -263,6 +265,7 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start, u16 end); int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start, u16 end); +int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param); int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi); int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, enum mipi_dsi_dcs_tear_mode mode); diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 625966a906f2..ff481770d76b 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -169,6 +169,8 @@ enum drm_mode_status { * * The horizontal and vertical timings are defined per the following diagram. * + * :: + * * * Active Front Sync Back * Region Porch Porch diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index d4619dc2eecb..b55f21857a98 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -736,6 +736,11 @@ struct drm_connector_helper_funcs { * inspect dynamic configuration state should instead use * @atomic_best_encoder. * + * You can leave this function to NULL if the connector is only + * attached to a single encoder and you are using the atomic helpers. + * In this case, the core will call drm_atomic_helper_best_encoder() + * for you. + * * RETURNS: * * Encoder that should be used for the given connector and connector @@ -752,8 +757,9 @@ struct drm_connector_helper_funcs { * need to select the best encoder depending upon the desired * configuration and can't select it statically. * - * This function is used by drm_atomic_helper_check_modeset() and either - * this or @best_encoder is required. + * This function is used by drm_atomic_helper_check_modeset(). + * If it is not implemented, the core will fallback to @best_encoder + * (or drm_atomic_helper_best_encoder() if @best_encoder is NULL). * * NOTE: * @@ -925,4 +931,43 @@ static inline void drm_plane_helper_add(struct drm_plane *plane, plane->helper_private = funcs; } +/** + * struct drm_mode_config_helper_funcs - global modeset helper operations + * + * These helper functions are used by the atomic helpers. + */ +struct drm_mode_config_helper_funcs { + /** + * @atomic_commit_tail: + * + * This hook is used by the default atomic_commit() hook implemented in + * drm_atomic_helper_commit() together with the nonblocking commit + * helpers (see drm_atomic_helper_setup_commit() for a starting point) + * to implement blocking and nonblocking commits easily. It is not used + * by the atomic helpers + * + * This hook should first commit the given atomic state to the hardware. + * But drivers can add more waiting calls at the start of their + * implementation, e.g. to wait for driver-internal request for implicit + * syncing, before starting to commit the update to the hardware. + * + * After the atomic update is committed to the hardware this hook needs + * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate + * to be executed by the hardware, for example using + * drm_atomic_helper_wait_for_vblanks(), and then clean up the old + * framebuffers using drm_atomic_helper_cleanup_planes(). + * + * When disabling a CRTC this hook _must_ stall for the commit to + * complete. Vblank waits don't work on disabled CRTC, hence the core + * can't take care of this. And it also can't rely on the vblank event, + * since that can be signalled already when the screen shows black, + * which can happen much earlier than the last hardware access needed to + * shut off the display pipeline completely. + * + * This hook is optional, the default implementation is + * drm_atomic_helper_commit_tail(). + */ + void (*atomic_commit_tail)(struct drm_atomic_state *state); +}; + #endif diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h new file mode 100644 index 000000000000..269039722f91 --- /dev/null +++ b/include/drm/drm_simple_kms_helper.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2016 Noralf Trønnes + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __LINUX_DRM_SIMPLE_KMS_HELPER_H +#define __LINUX_DRM_SIMPLE_KMS_HELPER_H + +struct drm_simple_display_pipe; + +/** + * struct drm_simple_display_pipe_funcs - helper operations for a simple + * display pipeline + */ +struct drm_simple_display_pipe_funcs { + /** + * @enable: + * + * This function should be used to enable the pipeline. + * It is called when the underlying crtc is enabled. + * This hook is optional. + */ + void (*enable)(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *crtc_state); + /** + * @disable: + * + * This function should be used to disable the pipeline. + * It is called when the underlying crtc is disabled. + * This hook is optional. + */ + void (*disable)(struct drm_simple_display_pipe *pipe); + + /** + * @check: + * + * This function is called in the check phase of an atomic update, + * specifically when the underlying plane is checked. + * The simple display pipeline helpers already check that the plane is + * not scaled, fills the entire visible area and is always enabled + * when the crtc is also enabled. + * This hook is optional. + * + * RETURNS: + * + * 0 on success, -EINVAL if the state or the transition can't be + * supported, -ENOMEM on memory allocation failure and -EDEADLK if an + * attempt to obtain another state object ran into a &drm_modeset_lock + * deadlock. + */ + int (*check)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state, + struct drm_crtc_state *crtc_state); + /** + * @update: + * + * This function is called when the underlying plane state is updated. + * This hook is optional. + */ + void (*update)(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state); +}; + +/** + * struct drm_simple_display_pipe - simple display pipeline + * @crtc: CRTC control structure + * @plane: Plane control structure + * @encoder: Encoder control structure + * @connector: Connector control structure + * @funcs: Pipeline control functions (optional) + * + * Simple display pipeline with plane, crtc and encoder collapsed into one + * entity. It should be initialized by calling drm_simple_display_pipe_init(). + */ +struct drm_simple_display_pipe { + struct drm_crtc crtc; + struct drm_plane plane; + struct drm_encoder encoder; + struct drm_connector *connector; + + const struct drm_simple_display_pipe_funcs *funcs; +}; + +int drm_simple_display_pipe_init(struct drm_device *dev, + struct drm_simple_display_pipe *pipe, + const struct drm_simple_display_pipe_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + struct drm_connector *connector); + +#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */ diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 595f85c392ac..b1755f8db36b 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -92,4 +92,7 @@ extern bool i915_gpu_turbo_disable(void); #define I845_TSEG_SIZE_512K (2 << 1) #define I845_TSEG_SIZE_1M (3 << 1) +#define INTEL_BSM 0x5c +#define INTEL_BSM_MASK (0xFFFF << 20) + #endif /* _I915_DRM_H_ */ diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 19b14862d3e0..1b3b6e155392 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -279,6 +279,11 @@ struct ceph_osd_client { struct workqueue_struct *notify_wq; }; +static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag) +{ + return osdc->osdmap->flags & flag; +} + extern int ceph_osdc_setup(void); extern void ceph_osdc_cleanup(void); diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index ddc426b22d81..9ccf4dbe55f8 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -189,11 +189,6 @@ static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd) return !ceph_osd_is_up(map, osd); } -static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag) -{ - return map && (map->flags & flag); -} - extern char *ceph_osdmap_state_str(char *str, int len, int state); extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 5871f292b596..277ab9af9ac2 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h @@ -15,13 +15,12 @@ #include <linux/errno.h> -struct pts_fs_info; - #ifdef CONFIG_UNIX98_PTYS -/* Look up a pts fs info and get a ref to it */ -struct pts_fs_info *devpts_get_ref(struct inode *, struct file *); -void devpts_put_ref(struct pts_fs_info *); +struct pts_fs_info; + +struct pts_fs_info *devpts_acquire(struct file *); +void devpts_release(struct pts_fs_info *); int devpts_new_index(struct pts_fs_info *); void devpts_kill_index(struct pts_fs_info *, int); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 3fe90d494edb..4551c6f2a6c4 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -112,19 +112,24 @@ struct dma_buf_ops { * @file: file pointer used for sharing buffers across, and for refcounting. * @attachments: list of dma_buf_attachment that denotes all devices attached. * @ops: dma_buf_ops associated with this buffer object. + * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap + * @vmapping_counter: used internally to refcnt the vmaps + * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 * @exp_name: name of the exporter; useful for debugging. * @owner: pointer to exporter module; used for refcounting when exporter is a * kernel module. * @list_node: node for dma_buf accounting and debugging. * @priv: exporter specific private data for this buffer object. * @resv: reservation object linked to this dma-buf + * @poll: for userspace poll support + * @cb_excl: for userspace poll support + * @cb_shared: for userspace poll support */ struct dma_buf { size_t size; struct file *file; struct list_head attachments; const struct dma_buf_ops *ops; - /* mutex to serialize list manipulation, attach/detach and vmap/unmap */ struct mutex lock; unsigned vmapping_counter; void *vmap_ptr; @@ -188,9 +193,11 @@ struct dma_buf_export_info { /** * helper macro for exporters; zeros and fills in most common values + * + * @name: export-info name */ -#define DEFINE_DMA_BUF_EXPORT_INFO(a) \ - struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \ +#define DEFINE_DMA_BUF_EXPORT_INFO(name) \ + struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ .owner = THIS_MODULE } /** diff --git a/include/linux/fence-array.h b/include/linux/fence-array.h new file mode 100644 index 000000000000..86baaa45567c --- /dev/null +++ b/include/linux/fence-array.h @@ -0,0 +1,73 @@ +/* + * fence-array: aggregates fence to be waited together + * + * Copyright (C) 2016 Collabora Ltd + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * Authors: + * Gustavo Padovan <gustavo@padovan.org> + * Christian König <christian.koenig@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_FENCE_ARRAY_H +#define __LINUX_FENCE_ARRAY_H + +#include <linux/fence.h> + +/** + * struct fence_array_cb - callback helper for fence array + * @cb: fence callback structure for signaling + * @array: reference to the parent fence array object + */ +struct fence_array_cb { + struct fence_cb cb; + struct fence_array *array; +}; + +/** + * struct fence_array - fence to represent an array of fences + * @base: fence base class + * @lock: spinlock for fence handling + * @num_fences: number of fences in the array + * @num_pending: fences in the array still pending + * @fences: array of the fences + */ +struct fence_array { + struct fence base; + + spinlock_t lock; + unsigned num_fences; + atomic_t num_pending; + struct fence **fences; +}; + +extern const struct fence_ops fence_array_ops; + +/** + * to_fence_array - cast a fence to a fence_array + * @fence: fence to cast to a fence_array + * + * Returns NULL if the fence is not a fence_array, + * or the fence_array otherwise. + */ +static inline struct fence_array *to_fence_array(struct fence *fence) +{ + if (fence->ops != &fence_array_ops) + return NULL; + + return container_of(fence, struct fence_array, base); +} + +struct fence_array *fence_array_create(int num_fences, struct fence **fences, + u64 context, unsigned seqno, + bool signal_on_any); + +#endif /* __LINUX_FENCE_ARRAY_H */ diff --git a/include/linux/fence.h b/include/linux/fence.h index 2b17698b60b8..44d945e96473 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h @@ -49,6 +49,8 @@ struct fence_cb; * @timestamp: Timestamp when the fence was signaled. * @status: Optional, only valid if < 0, must be set before calling * fence_signal, indicates that the fence has completed with an error. + * @child_list: list of children fences + * @active_list: list of active fences * * the flags member must be manipulated and read using the appropriate * atomic ops (bit_*), so taking the spinlock will not be needed most @@ -75,7 +77,8 @@ struct fence { struct rcu_head rcu; struct list_head cb_list; spinlock_t *lock; - unsigned context, seqno; + u64 context; + unsigned seqno; unsigned long flags; ktime_t timestamp; int status; @@ -178,7 +181,7 @@ struct fence_ops { }; void fence_init(struct fence *fence, const struct fence_ops *ops, - spinlock_t *lock, unsigned context, unsigned seqno); + spinlock_t *lock, u64 context, unsigned seqno); void fence_release(struct kref *kref); void fence_free(struct fence *fence); @@ -352,27 +355,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr) return ret < 0 ? ret : 0; } -unsigned fence_context_alloc(unsigned num); +u64 fence_context_alloc(unsigned num); #define FENCE_TRACE(f, fmt, args...) \ do { \ struct fence *__ff = (f); \ if (config_enabled(CONFIG_FENCE_TRACE)) \ - pr_info("f %u#%u: " fmt, \ + pr_info("f %llu#%u: " fmt, \ __ff->context, __ff->seqno, ##args); \ } while (0) #define FENCE_WARN(f, fmt, args...) \ do { \ struct fence *__ff = (f); \ - pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ + pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ ##args); \ } while (0) #define FENCE_ERR(f, fmt, args...) \ do { \ struct fence *__ff = (f); \ - pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ + pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ ##args); \ } while (0) diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 604e1526cd00..13ba552e6c09 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -241,7 +241,7 @@ struct fscache_cache_ops { /* check the consistency between the backing cache and the FS-Cache * cookie */ - bool (*check_consistency)(struct fscache_operation *op); + int (*check_consistency)(struct fscache_operation *op); /* store the updated auxiliary data on an object */ void (*update_object)(struct fscache_object *object); diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index e399029b68c5..645ad06b5d52 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -100,14 +100,16 @@ io_mapping_unmap_atomic(void __iomem *vaddr) } static inline void __iomem * -io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) +io_mapping_map_wc(struct io_mapping *mapping, + unsigned long offset, + unsigned long size) { resource_size_t phys_addr; BUG_ON(offset >= mapping->size); phys_addr = mapping->base + offset; - return ioremap_wc(phys_addr, PAGE_SIZE); + return ioremap_wc(phys_addr, size); } static inline void @@ -155,7 +157,9 @@ io_mapping_unmap_atomic(void __iomem *vaddr) /* Non-atomic map/unmap */ static inline void __iomem * -io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) +io_mapping_map_wc(struct io_mapping *mapping, + unsigned long offset, + unsigned long size) { return ((char __force __iomem *) mapping) + offset; } diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index bfbd707de390..dc493e0f0ff7 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -305,12 +305,12 @@ #define ICC_SGI1R_AFFINITY_1_SHIFT 16 #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) #define ICC_SGI1R_SGI_ID_SHIFT 24 -#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) #define ICC_SGI1R_AFFINITY_2_SHIFT 32 -#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 #define ICC_SGI1R_AFFINITY_3_SHIFT 48 -#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) #include <asm/arch_gicv3.h> diff --git a/include/linux/namei.h b/include/linux/namei.h index ec5ec2818a28..d3d0398f2a1b 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; #define LOOKUP_ROOT 0x2000 #define LOOKUP_EMPTY 0x4000 +extern int path_pts(struct path *path); + extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); static inline int user_path_at(int dfd, const char __user *name, unsigned flags, diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index bf268fa92c5b..fec40271339f 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; static inline bool page_is_young(struct page *page) { - return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline void set_page_young(struct page *page) { - set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool test_and_clear_page_young(struct page *page) { - return test_and_clear_bit(PAGE_EXT_YOUNG, - &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool page_is_idle(struct page *page) { - return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void set_page_idle(struct page *page) { - set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void clear_page_idle(struct page *page) { - clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + clear_bit(PAGE_EXT_IDLE, &page_ext->flags); } #endif /* CONFIG_64BIT */ diff --git a/include/linux/platform_data/omapdss.h b/include/linux/platform_data/omapdss.h new file mode 100644 index 000000000000..679177929045 --- /dev/null +++ b/include/linux/platform_data/omapdss.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2016 Texas Instruments, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __OMAPDSS_PDATA_H +#define __OMAPDSS_PDATA_H + +enum omapdss_version { + OMAPDSS_VER_UNKNOWN = 0, + OMAPDSS_VER_OMAP24xx, + OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */ + OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */ + OMAPDSS_VER_OMAP3630, + OMAPDSS_VER_AM35xx, + OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */ + OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */ + OMAPDSS_VER_OMAP4, /* All other OMAP4s */ + OMAPDSS_VER_OMAP5, + OMAPDSS_VER_AM43xx, + OMAPDSS_VER_DRA7xx, +}; + +/* Board specific data */ +struct omap_dss_board_info { + const char *default_display_name; + int (*dsi_enable_pads)(int dsi_id, unsigned int lane_mask); + void (*dsi_disable_pads)(int dsi_id, unsigned int lane_mask); + int (*set_min_bus_tput)(struct device *dev, unsigned long r); + enum omapdss_version version; +}; + +#endif /* __OMAPDSS_PDATA_H */ diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 49d057655d62..b0f305e77b7f 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class; extern struct lock_class_key reservation_seqcount_class; extern const char reservation_seqcount_string[]; +/** + * struct reservation_object_list - a list of shared fences + * @rcu: for internal use + * @shared_count: table of shared fences + * @shared_max: for growing shared fence table + * @shared: shared fence table + */ struct reservation_object_list { struct rcu_head rcu; u32 shared_count, shared_max; struct fence __rcu *shared[]; }; +/** + * struct reservation_object - a reservation object manages fences for a buffer + * @lock: update side lock + * @seq: sequence count for managing RCU read-side synchronization + * @fence_excl: the exclusive fence, if there is one currently + * @fence: list of current shared fences + * @staged: staged copy of shared fences for RCU updates + */ struct reservation_object { struct ww_mutex lock; seqcount_t seq; @@ -68,6 +83,10 @@ struct reservation_object { #define reservation_object_assert_held(obj) \ lockdep_assert_held(&(obj)->lock.base) +/** + * reservation_object_init - initialize a reservation object + * @obj: the reservation object + */ static inline void reservation_object_init(struct reservation_object *obj) { @@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj) obj->staged = NULL; } +/** + * reservation_object_fini - destroys a reservation object + * @obj: the reservation object + */ static inline void reservation_object_fini(struct reservation_object *obj) { @@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj) ww_mutex_destroy(&obj->lock); } +/** + * reservation_object_get_list - get the reservation object's + * shared fence list, with update-side lock held + * @obj: the reservation object + * + * Returns the shared fence list. Does NOT take references to + * the fence. The obj->lock must be held. + */ static inline struct reservation_object_list * reservation_object_get_list(struct reservation_object *obj) { @@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj) reservation_object_held(obj)); } +/** + * reservation_object_get_excl - get the reservation object's + * exclusive fence, with update-side lock held + * @obj: the reservation object + * + * Returns the exclusive fence (if any). Does NOT take a + * reference. The obj->lock must be held. + * + * RETURNS + * The exclusive fence or NULL + */ static inline struct fence * reservation_object_get_excl(struct reservation_object *obj) { @@ -120,6 +162,17 @@ reservation_object_get_excl(struct reservation_object *obj) reservation_object_held(obj)); } +/** + * reservation_object_get_excl_rcu - get the reservation object's + * exclusive fence, without lock held. + * @obj: the reservation object + * + * If there is an exclusive fence, this atomically increments it's + * reference count and returns it. + * + * RETURNS + * The exclusive fence or NULL if none + */ static inline struct fence * reservation_object_get_excl_rcu(struct reservation_object *obj) { diff --git a/include/linux/sctp.h b/include/linux/sctp.h index dacb5e711994..de1f64318fc4 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -765,6 +765,8 @@ struct sctp_info { __u8 sctpi_s_disable_fragments; __u8 sctpi_s_v4mapped; __u8 sctpi_s_frag_interleave; + __u32 sctpi_s_type; + __u32 __reserved3; }; struct sctp_infox { diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 37dbacf84849..816b7543f81b 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -21,6 +21,9 @@ static inline int do_sys_settimeofday(const struct timespec *tv, struct timespec64 ts64; if (!tv) + return do_sys_settimeofday64(NULL, tz); + + if (!timespec_valid(tv)) return -EINVAL; ts64 = timespec_to_timespec64(*tv); diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index b39a5f3153bd..960bedbdec87 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -165,6 +165,7 @@ int vga_switcheroo_unlock_ddc(struct pci_dev *pdev); int vga_switcheroo_process_delayed_switch(void); +bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev); enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev); void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic); @@ -188,6 +189,7 @@ static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(v static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; } static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; } static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } +static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; } static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; } static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {} diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index d325c81332e3..43a5a0e4524c 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -63,6 +63,8 @@ struct ip6_tnl_encap_ops { u8 *protocol, struct flowi6 *fl6); }; +#ifdef CONFIG_INET + extern const struct ip6_tnl_encap_ops __rcu * ip6tun_encaps[MAX_IPTUN_ENCAP_OPS]; @@ -138,7 +140,6 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev); int ip6_tnl_get_iflink(const struct net_device *dev); int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); -#ifdef CONFIG_INET static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, struct net_device *dev) { diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 401038d2f9b8..fea53f4d92ca 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) } struct qdisc_watchdog { + u64 last_expires; struct hrtimer timer; struct Qdisc *qdisc; }; diff --git a/include/sound/omap-hdmi-audio.h b/include/sound/omap-hdmi-audio.h index afdb416898e0..1df2ff61a4dd 100644 --- a/include/sound/omap-hdmi-audio.h +++ b/include/sound/omap-hdmi-audio.h @@ -16,11 +16,16 @@ * */ -#include <video/omapdss.h> - #ifndef __OMAP_HDMI_AUDIO_H__ #define __OMAP_HDMI_AUDIO_H__ +#include <linux/platform_data/omapdss.h> + +struct omap_dss_audio { + struct snd_aes_iec958 *iec; + struct snd_cea_861_aud_if *cea; +}; + struct omap_hdmi_audio_ops { int (*audio_startup)(struct device *dev, void (*abort_cb)(struct device *dev)); diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 9222db8ccccc..5f030b46cff4 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* @@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices { */ __ETHTOOL_LINK_MODE_LAST - = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, + = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, }; #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index eba5914ba5d1..f4297c8a42fe 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -145,6 +145,8 @@ enum { TCA_POLICE_PEAKRATE, TCA_POLICE_AVRATE, TCA_POLICE_RESULT, + TCA_POLICE_TM, + TCA_POLICE_PAD, __TCA_POLICE_MAX #define TCA_POLICE_RESULT TCA_POLICE_RESULT }; @@ -173,7 +175,7 @@ enum { TCA_U32_DIVISOR, TCA_U32_SEL, TCA_U32_POLICE, - TCA_U32_ACT, + TCA_U32_ACT, TCA_U32_INDEV, TCA_U32_PCNT, TCA_U32_MARK, diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h index 56830d1dc762..e7003ee6e063 100644 --- a/include/video/omap-panel-data.h +++ b/include/video/omap-panel-data.h @@ -27,59 +27,18 @@ #ifndef __OMAP_PANEL_DATA_H #define __OMAP_PANEL_DATA_H -#include <video/omapdss.h> #include <video/display_timing.h> -struct omap_dss_device; - -/** - * encoder_tfp410 platform data - * @name: name for this display entity - * @power_down_gpio: gpio number for PD pin (or -1 if not available) - * @data_lines: number of DPI datalines - */ -struct encoder_tfp410_platform_data { - const char *name; - const char *source; - int power_down_gpio; - int data_lines; -}; - - -/** - * connector_dvi platform data - * @name: name for this display entity - * @source: name of the display entity used as a video source - * @i2c_bus_num: i2c bus number to be used for reading EDID - */ -struct connector_dvi_platform_data { - const char *name; - const char *source; - int i2c_bus_num; -}; - -/** - * connector_hdmi platform data - * @name: name for this display entity - * @source: name of the display entity used as a video source - */ -struct connector_hdmi_platform_data { - const char *name; - const char *source; -}; - /** * connector_atv platform data * @name: name for this display entity * @source: name of the display entity used as a video source - * @connector_type: composite/svideo * @invert_polarity: invert signal polarity */ struct connector_atv_platform_data { const char *name; const char *source; - enum omap_dss_venc_type connector_type; bool invert_polarity; }; @@ -105,33 +64,6 @@ struct panel_dpi_platform_data { }; /** - * panel_dsicm platform data - * @name: name for this display entity - * @source: name of the display entity used as a video source - * @reset_gpio: gpio to reset the panel (or -1) - * @use_ext_te: use external TE GPIO - * @ext_te_gpio: external TE GPIO - * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms) - * @use_dsi_backlight: true if panel uses DSI command to control backlight - * @pin_config: DSI pin configuration - */ -struct panel_dsicm_platform_data { - const char *name; - const char *source; - - int reset_gpio; - - bool use_ext_te; - int ext_te_gpio; - - unsigned ulps_timeout; - - bool use_dsi_backlight; - - struct omap_dsi_pin_config pin_config; -}; - -/** * panel_acx565akm platform data * @name: name for this display entity * @source: name of the display entity used as a video source @@ -147,93 +79,4 @@ struct panel_acx565akm_platform_data { int datapairs; }; -/** - * panel_lb035q02 platform data - * @name: name for this display entity - * @source: name of the display entity used as a video source - * @data_lines: number of DPI datalines - * @backlight_gpio: gpio to enable/disable the backlight (or -1) - * @enable_gpio: gpio to enable/disable the panel (or -1) - */ -struct panel_lb035q02_platform_data { - const char *name; - const char *source; - - int data_lines; - - int backlight_gpio; - int enable_gpio; -}; - -/** - * panel_sharp_ls037v7dw01 platform data - * @name: name for this display entity - * @source: name of the display entity used as a video source - * @data_lines: number of DPI datalines - * @resb_gpio: reset signal GPIO - * @ini_gpio: power on control GPIO - * @mo_gpio: selection for resolution(VGA/QVGA) GPIO - * @lr_gpio: selection for horizontal scanning direction GPIO - * @ud_gpio: selection for vertical scanning direction GPIO - */ -struct panel_sharp_ls037v7dw01_platform_data { - const char *name; - const char *source; - - int data_lines; - - int resb_gpio; - int ini_gpio; - int mo_gpio; - int lr_gpio; - int ud_gpio; -}; - -/** - * panel-tpo-td043mtea1 platform data - * @name: name for this display entity - * @source: name of the display entity used as a video source - * @data_lines: number of DPI datalines - * @nreset_gpio: reset signal - */ -struct panel_tpo_td043mtea1_platform_data { - const char *name; - const char *source; - - int data_lines; - - int nreset_gpio; -}; - -/** - * panel-nec-nl8048hl11 platform data - * @name: name for this display entity - * @source: name of the display entity used as a video source - * @data_lines: number of DPI datalines - * @res_gpio: reset signal - * @qvga_gpio: selection for resolution(QVGA/WVGA) - */ -struct panel_nec_nl8048hl11_platform_data { - const char *name; - const char *source; - - int data_lines; - - int res_gpio; - int qvga_gpio; -}; - -/** - * panel-tpo-td028ttec1 platform data - * @name: name for display entity - * @source: name of the display entity used as a video source - * @data_lines: number of DPI datalines - */ -struct panel_tpo_td028ttec1_platform_data { - const char *name; - const char *source; - - int data_lines; -}; - #endif /* __OMAP_PANEL_DATA_H */ diff --git a/include/video/omapdss.h b/include/video/omapfb_dss.h index 8e14ad7327c9..1d38901d599d 100644 --- a/include/video/omapdss.h +++ b/include/video/omapfb_dss.h @@ -1,27 +1,20 @@ /* - * Copyright (C) 2008 Nokia Corporation - * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> + * Copyright (C) 2016 Texas Instruments, Inc. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ -#ifndef __OMAP_OMAPDSS_H -#define __OMAP_OMAPDSS_H +#ifndef __OMAPFB_DSS_H +#define __OMAPFB_DSS_H #include <linux/list.h> #include <linux/kobject.h> #include <linux/device.h> #include <linux/interrupt.h> +#include <linux/platform_data/omapdss.h> #include <video/videomode.h> @@ -167,11 +160,6 @@ enum omap_dss_display_state { OMAP_DSS_DISPLAY_ACTIVE, }; -struct omap_dss_audio { - struct snd_aes_iec958 *iec; - struct snd_cea_861_aud_if *cea; -}; - enum omap_dss_rotation_type { OMAP_DSS_ROT_DMA = 1 << 0, OMAP_DSS_ROT_VRFB = 1 << 1, @@ -195,25 +183,6 @@ enum omap_overlay_caps { OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5, }; -enum omap_overlay_manager_caps { - OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */ -}; - -enum omap_dss_clk_source { - OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK - * OMAP4: DSS_FCLK */ - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK - * OMAP4: PLL1_CLK1 */ - OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK - * OMAP4: PLL1_CLK2 */ - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */ - OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */ -}; - -enum omap_hdmi_flags { - OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0, -}; - enum omap_dss_output_id { OMAP_DSS_OUTPUT_DPI = 1 << 0, OMAP_DSS_OUTPUT_DBI = 1 << 1, @@ -303,36 +272,6 @@ struct omap_dss_dsi_config { enum omap_dss_dsi_trans_mode trans_mode; }; -enum omapdss_version { - OMAPDSS_VER_UNKNOWN = 0, - OMAPDSS_VER_OMAP24xx, - OMAPDSS_VER_OMAP34xx_ES1, /* OMAP3430 ES1.0, 2.0 */ - OMAPDSS_VER_OMAP34xx_ES3, /* OMAP3430 ES3.0+ */ - OMAPDSS_VER_OMAP3630, - OMAPDSS_VER_AM35xx, - OMAPDSS_VER_OMAP4430_ES1, /* OMAP4430 ES1.0 */ - OMAPDSS_VER_OMAP4430_ES2, /* OMAP4430 ES2.0, 2.1, 2.2 */ - OMAPDSS_VER_OMAP4, /* All other OMAP4s */ - OMAPDSS_VER_OMAP5, - OMAPDSS_VER_AM43xx, - OMAPDSS_VER_DRA7xx, -}; - -/* Board specific data */ -struct omap_dss_board_info { - int num_devices; - struct omap_dss_device **devices; - struct omap_dss_device *default_device; - const char *default_display_name; - int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask); - void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask); - int (*set_min_bus_tput)(struct device *dev, unsigned long r); - enum omapdss_version version; -}; - -/* Init with the board info */ -extern int omap_display_init(struct omap_dss_board_info *board_data); - struct omap_video_timings { /* Unit: pixels */ u16 x_res; @@ -463,7 +402,6 @@ struct omap_overlay_manager { /* static fields */ const char *name; enum omap_channel id; - enum omap_overlay_manager_caps caps; struct list_head overlays; enum omap_display_type supported_displays; enum omap_dss_output_id supported_outputs; @@ -919,4 +857,4 @@ omapdss_of_get_first_endpoint(const struct device_node *parent); struct omap_dss_device * omapdss_of_find_source_for_first_ep(struct device_node *node); -#endif +#endif /* __OMAPFB_DSS_H */ diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 04be7021f848..318858edb1cd 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -365,7 +365,6 @@ static struct file_system_type bpf_fs_type = { .name = "bpf", .mount = bpf_mount, .kill_sb = kill_litter_super, - .fs_flags = FS_USERNS_MOUNT, }; MODULE_ALIAS_FS("bpf"); diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c index c42742208e5e..89b49f6773f0 100644 --- a/kernel/irq/ipi.c +++ b/kernel/irq/ipi.c @@ -125,7 +125,7 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) domain = data->domain; if (WARN_ON(domain == NULL)) - return; + return -EINVAL; if (!irq_domain_is_ipi(domain)) { pr_warn("Trying to destroy a non IPI domain!\n"); diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 8c7392c4fdbd..e99df0ff1d42 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -425,6 +425,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer) { debug_object_free(timer, &hrtimer_debug_descr); } +EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack); #else static inline void debug_hrtimer_init(struct hrtimer *timer) { } diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 77d7d034bac3..b9cfdbfae9aa 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1841,6 +1841,9 @@ config TEST_BITMAP If unsure, say N. +config TEST_UUID + tristate "Test functions located in the uuid module at runtime" + config TEST_RHASHTABLE tristate "Perform selftest on resizable hash table" default n diff --git a/lib/Makefile b/lib/Makefile index 499fb354d627..ff6a7a6c6395 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o obj-$(CONFIG_TEST_PRINTF) += test_printf.o obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o +obj-$(CONFIG_TEST_UUID) += test_uuid.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG diff --git a/lib/test_uuid.c b/lib/test_uuid.c new file mode 100644 index 000000000000..547d3127a3cf --- /dev/null +++ b/lib/test_uuid.c @@ -0,0 +1,133 @@ +/* + * Test cases for lib/uuid.c module. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/uuid.h> + +struct test_uuid_data { + const char *uuid; + uuid_le le; + uuid_be be; +}; + +static const struct test_uuid_data test_uuid_test_data[] = { + { + .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576", + .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), + .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), + }, + { + .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b", + .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), + .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), + }, + { + .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84", + .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), + .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), + }, +}; + +static const char * const test_uuid_wrong_data[] = { + "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */ + "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */ + "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */ +}; + +static unsigned total_tests __initdata; +static unsigned failed_tests __initdata; + +static void __init test_uuid_failed(const char *prefix, bool wrong, bool be, + const char *data, const char *actual) +{ + pr_err("%s test #%u %s %s data: '%s'\n", + prefix, + total_tests, + wrong ? "passed on wrong" : "failed on", + be ? "BE" : "LE", + data); + if (actual && *actual) + pr_err("%s test #%u actual data: '%s'\n", + prefix, + total_tests, + actual); + failed_tests++; +} + +static void __init test_uuid_test(const struct test_uuid_data *data) +{ + uuid_le le; + uuid_be be; + char buf[48]; + + /* LE */ + total_tests++; + if (uuid_le_to_bin(data->uuid, &le)) + test_uuid_failed("conversion", false, false, data->uuid, NULL); + + total_tests++; + if (uuid_le_cmp(data->le, le)) { + sprintf(buf, "%pUl", &le); + test_uuid_failed("cmp", false, false, data->uuid, buf); + } + + /* BE */ + total_tests++; + if (uuid_be_to_bin(data->uuid, &be)) + test_uuid_failed("conversion", false, true, data->uuid, NULL); + + total_tests++; + if (uuid_be_cmp(data->be, be)) { + sprintf(buf, "%pUb", &be); + test_uuid_failed("cmp", false, true, data->uuid, buf); + } +} + +static void __init test_uuid_wrong(const char *data) +{ + uuid_le le; + uuid_be be; + + /* LE */ + total_tests++; + if (!uuid_le_to_bin(data, &le)) + test_uuid_failed("negative", true, false, data, NULL); + + /* BE */ + total_tests++; + if (!uuid_be_to_bin(data, &be)) + test_uuid_failed("negative", true, true, data, NULL); +} + +static int __init test_uuid_init(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++) + test_uuid_test(&test_uuid_test_data[i]); + + for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++) + test_uuid_wrong(test_uuid_wrong_data[i]); + + if (failed_tests == 0) + pr_info("all %u tests passed\n", total_tests); + else + pr_err("failed %u out of %u tests\n", failed_tests, total_tests); + + return failed_tests ? -EINVAL : 0; +} +module_init(test_uuid_init); + +static void __exit test_uuid_exit(void) +{ + /* do nothing */ +} +module_exit(test_uuid_exit); + +MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/uuid.c b/lib/uuid.c index e116ae5fa00f..37687af77ff8 100644 --- a/lib/uuid.c +++ b/lib/uuid.c @@ -106,8 +106,8 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16]) return -EINVAL; for (i = 0; i < 16; i++) { - int hi = hex_to_bin(uuid[si[i]] + 0); - int lo = hex_to_bin(uuid[si[i]] + 1); + int hi = hex_to_bin(uuid[si[i] + 0]); + int lo = hex_to_bin(uuid[si[i] + 1]); b[ei[i]] = (hi << 4) | lo; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 925b431f3f03..58c69c94402a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) * ordering is imposed by list_lru_node->lock taken by * memcg_drain_all_list_lrus(). */ + rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ css_for_each_descendant_pre(css, &memcg->css) { child = mem_cgroup_from_css(css); BUG_ON(child->kmemcg_id != kmemcg_id); @@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) if (!memcg->use_hierarchy) break; } + rcu_read_unlock(); + memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); memcg_free_cache_id(kmemcg_id); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index dfb1ab61fb23..acbc432d1a52 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -625,8 +625,6 @@ void try_oom_reaper(struct task_struct *tsk) if (atomic_read(&mm->mm_users) > 1) { rcu_read_lock(); for_each_process(p) { - bool exiting; - if (!process_shares_mm(p, mm)) continue; if (fatal_signal_pending(p)) @@ -636,10 +634,7 @@ void try_oom_reaper(struct task_struct *tsk) * If the task is exiting make sure the whole thread group * is exiting and cannot acces mm anymore. */ - spin_lock_irq(&p->sighand->siglock); - exiting = signal_group_exit(p->signal); - spin_unlock_irq(&p->sighand->siglock); - if (exiting) + if (signal_group_exit(p->signal)) continue; /* Give up */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f8f3bfc435ee..6903b695ebae 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); INIT_LIST_HEAD(&page->lru); @@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); set_page_private(page, 0); @@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, page = list_last_entry(list, struct page, lru); else page = list_first_entry(list, struct page, lru); - } while (page && check_new_pcp(page)); - __dec_zone_state(zone, NR_ALLOC_BATCH); - list_del(&page->lru); - pcp->count--; + __dec_zone_state(zone, NR_ALLOC_BATCH); + list_del(&page->lru); + pcp->count--; + + } while (check_new_pcp(page)); } else { /* * We most definitely don't want callers attempting to @@ -3023,6 +3030,7 @@ reset_fair: apply_fair = false; fair_skipped = false; reset_alloc_batches(ac->preferred_zoneref->zone); + z = ac->preferred_zoneref; goto zonelist_scan; } @@ -3596,6 +3604,17 @@ retry: */ alloc_flags = gfp_to_alloc_flags(gfp_mask); + /* + * Reset the zonelist iterators if memory policies can be ignored. + * These allocations are high priority and system rather than user + * orientated. + */ + if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) { + ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); + ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, + ac->high_zoneidx, ac->nodemask); + } + /* This is the last chance, in general, before the goto nopage. */ page = get_page_from_freelist(gfp_mask, order, alloc_flags & ~ALLOC_NO_WATERMARKS, ac); @@ -3604,12 +3623,6 @@ retry: /* Allocate without watermarks if the context allows */ if (alloc_flags & ALLOC_NO_WATERMARKS) { - /* - * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds - * the allocation is high priority and these type of - * allocations are system rather than user orientated - */ - ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); page = get_page_from_freelist(gfp_mask, order, ALLOC_NO_WATERMARKS, ac); if (page) @@ -3808,7 +3821,11 @@ retry_cpuset: /* Dirty zone balancing only done in the fast path */ ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); - /* The preferred zone is used for statistics later */ + /* + * The preferred zone is used for statistics but crucially it is + * also used as the starting point for the zonelist iterator. It + * may get reset for allocations that ignore memory policies. + */ ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx, ac.nodemask); if (!ac.preferred_zoneref) { diff --git a/mm/page_owner.c b/mm/page_owner.c index 792b56da13d8..c6cda3e36212 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order) for (i = 0; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); + if (unlikely(!page_ext)) + continue; __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); } } @@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order) void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { struct page_ext *page_ext = lookup_page_ext(page); + struct stack_trace trace = { .nr_entries = 0, .max_entries = ARRAY_SIZE(page_ext->trace_entries), @@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) .skip = 3, }; + if (unlikely(!page_ext)) + return; + save_stack_trace(&trace); page_ext->order = order; @@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) void __set_page_owner_migrate_reason(struct page *page, int reason) { struct page_ext *page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; page_ext->last_migrate_reason = reason; } @@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) gfp_t __get_page_owner_gfp(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + /* + * The caller just returns 0 if no valid gfp + * So return 0 here too. + */ + return 0; return page_ext->gfp_mask; } @@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) struct page_ext *new_ext = lookup_page_ext(newpage); int i; + if (unlikely(!old_ext || !new_ext)) + return; + new_ext->order = old_ext->order; new_ext->gfp_mask = old_ext->gfp_mask; new_ext->nr_entries = old_ext->nr_entries; @@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page) gfp_t gfp_mask = page_ext->gfp_mask; int mt = gfpflags_to_migratetype(gfp_mask); + if (unlikely(!page_ext)) { + pr_alert("There is not page extension available.\n"); + return; + } + if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { pr_alert("page_owner info is not active (free page?)\n"); return; @@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) } page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; /* * Some pages could be missed by concurrent allocation or free, @@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) continue; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; /* Maybe overraping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) diff --git a/mm/page_poison.c b/mm/page_poison.c index 1eae5fad2446..2e647c65916b 100644 --- a/mm/page_poison.c +++ b/mm/page_poison.c @@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } @@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); } @@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page) struct page_ext *page_ext; page_ext = lookup_page_ext(page); - if (!page_ext) + if (unlikely(!page_ext)) return false; return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index cf7ad1a53be0..e11475cdeb7a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases); */ void vm_unmap_ram(const void *mem, unsigned int count) { - unsigned long size = count << PAGE_SHIFT; + unsigned long size = (unsigned long)count << PAGE_SHIFT; unsigned long addr = (unsigned long)mem; BUG_ON(!addr); @@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram); */ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) { - unsigned long size = count << PAGE_SHIFT; + unsigned long size = (unsigned long)count << PAGE_SHIFT; unsigned long addr; void *mem; @@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { struct vm_struct *area; + unsigned long size; /* In bytes */ might_sleep(); if (count > totalram_pages) return NULL; - area = get_vm_area_caller((count << PAGE_SHIFT), flags, - __builtin_return_address(0)); + size = (unsigned long)count << PAGE_SHIFT; + area = get_vm_area_caller(size, flags, __builtin_return_address(0)); if (!area) return NULL; diff --git a/mm/vmstat.c b/mm/vmstat.c index 77e42ef388c2..cb2a67bb4158 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m, continue; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; diff --git a/mm/z3fold.c b/mm/z3fold.c index 34917d55d311..8f9e89ca1d31 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) /* HEADLESS page stored */ bud = HEADLESS; } else { - bud = (handle - zhdr->first_num) & BUDDY_MASK; + bud = handle_to_buddy(handle); switch (bud) { case FIRST: @@ -572,15 +572,19 @@ next: pool->pages_nr--; spin_unlock(&pool->lock); return 0; - } else if (zhdr->first_chunks != 0 && - zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) { - /* Full, add to buddied list */ - list_add(&zhdr->buddy, &pool->buddied); - } else if (!test_bit(PAGE_HEADLESS, &page->private)) { - z3fold_compact_page(zhdr); - /* add to unbuddied list */ - freechunks = num_free_chunks(zhdr); - list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); + } else if (!test_bit(PAGE_HEADLESS, &page->private)) { + if (zhdr->first_chunks != 0 && + zhdr->last_chunks != 0 && + zhdr->middle_chunks != 0) { + /* Full, add to buddied list */ + list_add(&zhdr->buddy, &pool->buddied); + } else { + z3fold_compact_page(zhdr); + /* add to unbuddied list */ + freechunks = num_free_chunks(zhdr); + list_add(&zhdr->buddy, + &pool->unbuddied[freechunks]); + } } /* add to beginning of LRU */ diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a1e273af6fc8..82a116ba590e 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -290,6 +290,10 @@ static void vlan_sync_address(struct net_device *dev, if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) return; + /* vlan continues to inherit address of lower device */ + if (vlan_dev_inherit_address(vlandev, dev)) + goto out; + /* vlan address was different from the old address and is equal to * the new address */ if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && @@ -302,6 +306,7 @@ static void vlan_sync_address(struct net_device *dev, !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) dev_uc_add(dev, vlandev->dev_addr); +out: ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); } diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 9d010a09ab98..cc1557978066 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev, void vlan_setup(struct net_device *dev); int register_vlan_dev(struct net_device *dev); void unregister_vlan_dev(struct net_device *dev, struct list_head *head); +bool vlan_dev_inherit_address(struct net_device *dev, + struct net_device *real_dev); static inline u32 vlan_get_ingress_priority(struct net_device *dev, u16 vlan_tci) diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e7e62570bdb8..86ae75b77390 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -245,6 +245,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); } +bool vlan_dev_inherit_address(struct net_device *dev, + struct net_device *real_dev) +{ + if (dev->addr_assign_type != NET_ADDR_STOLEN) + return false; + + ether_addr_copy(dev->dev_addr, real_dev->dev_addr); + call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); + return true; +} + static int vlan_dev_open(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); @@ -255,7 +266,8 @@ static int vlan_dev_open(struct net_device *dev) !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) return -ENETDOWN; - if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { + if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) && + !vlan_dev_inherit_address(dev, real_dev)) { err = dev_uc_add(real_dev, dev->dev_addr); if (err < 0) goto out; @@ -560,8 +572,10 @@ static int vlan_dev_init(struct net_device *dev) /* ipv6 shared card related stuff */ dev->dev_id = real_dev->dev_id; - if (is_zero_ether_addr(dev->dev_addr)) - eth_hw_addr_inherit(dev, real_dev); + if (is_zero_ether_addr(dev->dev_addr)) { + ether_addr_copy(dev->dev_addr, real_dev->dev_addr); + dev->addr_assign_type = NET_ADDR_STOLEN; + } if (is_zero_ether_addr(dev->broadcast)) memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 4fd6af47383a..adb6e3d21b1e 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c @@ -124,7 +124,7 @@ as_indicate_complete: break; case as_addparty: case as_dropparty: - sk->sk_err_soft = msg->reply; + sk->sk_err_soft = -msg->reply; /* < 0 failure, otherwise ep_ref */ clear_bit(ATM_VF_WAITING, &vcc->flags); break; diff --git a/net/atm/svc.c b/net/atm/svc.c index 3fa0a9ee98d1..878563a8354d 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -546,7 +546,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, schedule(); } finish_wait(sk_sleep(sk), &wait); - error = xchg(&sk->sk_err_soft, 0); + error = -xchg(&sk->sk_err_soft, 0); out: release_sock(sk); return error; @@ -573,7 +573,7 @@ static int svc_dropparty(struct socket *sock, int ep_ref) error = -EUNATCH; goto out; } - error = xchg(&sk->sk_err_soft, 0); + error = -xchg(&sk->sk_err_soft, 0); out: release_sock(sk); return error; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 0160d7d09a1e..89469592076c 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1276,9 +1276,9 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc, const struct ceph_osd_request_target *t, struct ceph_pg_pool_info *pi) { - bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); - bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || + bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); + bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || + ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || __pool_full(pi); WARN_ON(pi->id != t->base_oloc.pool); @@ -1303,8 +1303,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc, bool force_resend = false; bool need_check_tiering = false; bool need_resend = false; - bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap, - CEPH_OSDMAP_SORTBITWISE); + bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); enum calc_target_result ct_res; int ret; @@ -1540,9 +1539,9 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg) */ msg->hdr.data_off = cpu_to_le16(req->r_data_offset); - dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__, - req, req->r_t.target_oid.name_len, req->r_t.target_oid.name, - req->r_t.target_oid.name_len, msg->front.iov_len, data_len); + dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__, + req, req->r_t.target_oid.name, req->r_t.target_oid.name_len, + msg->front.iov_len, data_len); } /* @@ -1590,9 +1589,9 @@ static void maybe_request_map(struct ceph_osd_client *osdc) verify_osdc_locked(osdc); WARN_ON(!osdc->osdmap->epoch); - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) || - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || + ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) || + ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { dout("%s osdc %p continuous\n", __func__, osdc); continuous = true; } else { @@ -1629,19 +1628,19 @@ again: } if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { + ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { dout("req %p pausewr\n", req); req->r_t.paused = true; maybe_request_map(osdc); } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { + ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { dout("req %p pauserd\n", req); req->r_t.paused = true; maybe_request_map(osdc); } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | CEPH_OSD_FLAG_FULL_FORCE)) && - (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || + (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || pool_full(osdc, req->r_t.base_oloc.pool))) { dout("req %p full/pool_full\n", req); pr_warn_ratelimited("FULL or reached pool quota\n"); @@ -2280,7 +2279,7 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq) struct ceph_osd_request *req = lreq->ping_req; struct ceph_osd_req_op *op = &req->r_ops[0]; - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { dout("%s PAUSERD\n", __func__); return; } @@ -2893,6 +2892,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) dout("req %p tid %llu cb\n", req, req->r_tid); __complete_request(req); } + if (m.flags & CEPH_OSD_FLAG_ONDISK) + complete_all(&req->r_safe_completion); + ceph_osdc_put_request(req); } else { if (req->r_unsafe_callback) { dout("req %p tid %llu unsafe-cb\n", req, req->r_tid); @@ -2901,10 +2903,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) WARN_ON(1); } } - if (m.flags & CEPH_OSD_FLAG_ONDISK) - complete_all(&req->r_safe_completion); - ceph_osdc_put_request(req); return; fail_request: @@ -3050,7 +3049,7 @@ static int handle_one_map(struct ceph_osd_client *osdc, bool skipped_map = false; bool was_full; - was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); + was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); set_pool_was_full(osdc); if (incremental) @@ -3088,7 +3087,7 @@ static int handle_one_map(struct ceph_osd_client *osdc, osdc->osdmap = newmap; } - was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); + was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, need_resend, need_resend_linger); @@ -3174,9 +3173,9 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) if (ceph_check_fsid(osdc->client, &fsid) < 0) goto bad; - was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); - was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || + was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); + was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || + ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc); /* incremental maps */ @@ -3238,9 +3237,9 @@ done: * we find out when we are no longer full and stop returning * ENOSPC. */ - pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); - pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || + pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); + pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || + ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc); if (was_pauserd || was_pausewr || pauserd || pausewr) maybe_request_map(osdc); diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index cde52e94732f..03062bb763b3 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -1778,8 +1778,8 @@ int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, oid->name_len); - dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len, - oid->name, raw_pgid->pool, raw_pgid->seed); + dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, + raw_pgid->pool, raw_pgid->seed); return 0; } EXPORT_SYMBOL(ceph_object_locator_to_pg); diff --git a/net/core/hwbm.c b/net/core/hwbm.c index 941c28486896..2cab489ae62e 100644 --- a/net/core/hwbm.c +++ b/net/core/hwbm.c @@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) spin_lock_irqsave(&bm_pool->lock, flags); if (bm_pool->buf_num == bm_pool->size) { pr_warn("pool already filled\n"); + spin_unlock_irqrestore(&bm_pool->lock, flags); return bm_pool->buf_num; } if (buf_num + bm_pool->buf_num > bm_pool->size) { pr_warn("cannot allocate %d buffers for pool\n", buf_num); + spin_unlock_irqrestore(&bm_pool->lock, flags); return 0; } if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { pr_warn("Adding %d buffers to the %d current buffers will overflow\n", buf_num, bm_pool->buf_num); + spin_unlock_irqrestore(&bm_pool->lock, flags); return 0; } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 8604ae245960..8b02df0d354d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2245,10 +2245,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) hrtimer_set_expires(&t.timer, spin_until); remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); - if (remaining <= 0) { - pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); - return; - } + if (remaining <= 0) + goto out; start_time = ktime_get(); if (remaining < 100000) { @@ -2273,7 +2271,9 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) } pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); +out: pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); + destroy_hrtimer_on_stack(&t.timer); } static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index ca207dbf673b..116187b5c267 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -1289,8 +1289,8 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla, nl802154_dev_addr_policy)) return -EINVAL; - if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] && - !attrs[NL802154_DEV_ADDR_ATTR_MODE] && + if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || + !attrs[NL802154_DEV_ADDR_ATTR_MODE] || !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) return -EINVAL; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 377424ea17a4..d39e9e47a26e 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1681,6 +1681,14 @@ static __net_init int inet_init_net(struct net *net) */ net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); + + /* Default values for sysctl-controlled parameters. + * We set them here, in case sysctl is not compiled. + */ + net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; + net->ipv4.sysctl_ip_dynaddr = 0; + net->ipv4.sysctl_ip_early_demux = 1; + return 0; } diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index bb0419582b8d..1cb67de106fe 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -999,10 +999,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) if (!net->ipv4.sysctl_local_reserved_ports) goto err_ports; - net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; - net->ipv4.sysctl_ip_dynaddr = 0; - net->ipv4.sysctl_ip_early_demux = 1; - return 0; err_ports: diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 3f8411328de5..2343e4f2e0bf 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -232,6 +232,15 @@ config IPV6_GRE Saying M here will produce a module called ip6_gre. If unsure, say N. +config IPV6_FOU + tristate + default NET_FOU && IPV6 + +config IPV6_FOU_TUNNEL + tristate + default NET_FOU_IP_TUNNELS && IPV6_FOU + select IPV6_TUNNEL + config IPV6_MULTIPLE_TABLES bool "IPv6: Multiple Routing Tables" select FIB_RULES diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 7ec3129c9ace..6d8ea099213e 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -42,7 +42,7 @@ obj-$(CONFIG_IPV6_VTI) += ip6_vti.o obj-$(CONFIG_IPV6_SIT) += sit.o obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o obj-$(CONFIG_IPV6_GRE) += ip6_gre.o -obj-$(CONFIG_NET_FOU) += fou6.o +obj-$(CONFIG_IPV6_FOU) += fou6.o obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index c972d0b52579..9ea249b9451e 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c @@ -69,7 +69,7 @@ int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, } EXPORT_SYMBOL(gue6_build_header); -#ifdef CONFIG_NET_FOU_IP_TUNNELS +#if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL) static const struct ip6_tnl_encap_ops fou_ip6tun_ops = { .encap_hlen = fou_encap_hlen, diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index af503f518278..f4ac2842d4d9 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -712,6 +712,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) fl6->daddr = p->raddr; fl6->flowi6_oif = p->link; fl6->flowlabel = 0; + fl6->flowi6_proto = IPPROTO_GRE; if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; @@ -1027,6 +1028,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) dev->hard_header_len = LL_MAX_HEADER + t_hlen; dev->mtu = ETH_DATA_LEN - t_hlen; + if (dev->type == ARPHRD_ETHER) + dev->mtu -= ETH_HLEN; if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) dev->mtu -= 8; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index c6f5df1bed12..6c54e03fe9c1 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -128,6 +128,7 @@ static inline struct sock *l2tp_ip6_bind_lookup(struct net *net, */ static int l2tp_ip6_recv(struct sk_buff *skb) { + struct net *net = dev_net(skb->dev); struct sock *sk; u32 session_id; u32 tunnel_id; @@ -154,7 +155,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_find(&init_net, NULL, session_id); + session = l2tp_session_find(net, NULL, session_id); if (session == NULL) goto discard; @@ -188,14 +189,14 @@ pass_up: goto discard; tunnel_id = ntohl(*(__be32 *) &skb->data[4]); - tunnel = l2tp_tunnel_find(&init_net, tunnel_id); + tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel != NULL) sk = tunnel->sock; else { struct ipv6hdr *iph = ipv6_hdr(skb); read_lock_bh(&l2tp_ip6_lock); - sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr, + sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, 0, tunnel_id); read_unlock_bh(&l2tp_ip6_lock); } @@ -263,6 +264,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; + struct net *net = sock_net(sk); __be32 v4addr = 0; int addr_type; int err; @@ -286,7 +288,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) err = -EADDRINUSE; read_lock_bh(&l2tp_ip6_lock); - if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr, + if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) goto out_in_use; read_unlock_bh(&l2tp_ip6_lock); @@ -456,7 +458,7 @@ static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb) return 0; drop: - IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); + IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); kfree_skb(skb); return -1; } diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c index 5dba899131b3..182470847fcf 100644 --- a/net/lapb/lapb_in.c +++ b/net/lapb/lapb_in.c @@ -444,10 +444,9 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, break; case LAPB_FRMR: - lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n", + lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n", lapb->dev, frame->pf, - skb->data[0], skb->data[1], skb->data[2], - skb->data[3], skb->data[4]); + skb->data); lapb_establish_data_link(lapb); lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); lapb_requeue_frames(lapb); diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c index ba4d015bd1a6..482c94d9d958 100644 --- a/net/lapb/lapb_out.c +++ b/net/lapb/lapb_out.c @@ -148,9 +148,7 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type) } } - lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n", - lapb->dev, lapb->state, - skb->data[0], skb->data[1], skb->data[2]); + lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data); if (!lapb_data_transmit(lapb, skb)) kfree_skb(skb); diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c index 9d0a426eccbb..3c1914df641f 100644 --- a/net/lapb/lapb_subr.c +++ b/net/lapb/lapb_subr.c @@ -113,9 +113,7 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, { frame->type = LAPB_ILLEGAL; - lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n", - lapb->dev, lapb->state, - skb->data[0], skb->data[1], skb->data[2]); + lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data); /* We always need to look at 2 bytes, sometimes we need * to look at 3 and those cases are handled below. @@ -284,10 +282,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb) dptr++; *dptr++ = lapb->frmr_type; - lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", + lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n", lapb->dev, lapb->state, - skb->data[1], skb->data[2], skb->data[3], - skb->data[4], skb->data[5]); + &skb->data[1]); } else { dptr = skb_put(skb, 4); *dptr++ = LAPB_FRMR; @@ -299,9 +296,8 @@ void lapb_transmit_frmr(struct lapb_cb *lapb) dptr++; *dptr++ = lapb->frmr_type; - lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n", - lapb->dev, lapb->state, skb->data[1], - skb->data[2], skb->data[3]); + lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n", + lapb->dev, lapb->state, &skb->data[1]); } lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 879185fe183f..9a3eb7a0ebf4 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -137,11 +137,23 @@ static bool is_flow_key_valid(const struct sw_flow_key *key) return !!key->eth.type; } +static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr, + __be16 ethertype) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) { + __be16 diff[] = { ~(hdr->h_proto), ethertype }; + + skb->csum = ~csum_partial((char *)diff, sizeof(diff), + ~skb->csum); + } + + hdr->h_proto = ethertype; +} + static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, const struct ovs_action_push_mpls *mpls) { __be32 *new_mpls_lse; - struct ethhdr *hdr; /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ if (skb->encapsulation) @@ -160,9 +172,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); - hdr = eth_hdr(skb); - hdr->h_proto = mpls->mpls_ethertype; - + update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype); if (!skb->inner_protocol) skb_set_inner_protocol(skb, skb->protocol); skb->protocol = mpls->mpls_ethertype; @@ -193,7 +203,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, * field correctly in the presence of VLAN tags. */ hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); - hdr->h_proto = ethertype; + update_ethertype(skb, hdr, ethertype); if (eth_p_mpls(skb->protocol)) skb->protocol = ethertype; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 330f14e302e8..b884dae692a1 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -239,6 +239,8 @@ override: police->tcfp_t_c = ktime_get_ns(); police->tcf_index = parm->index ? parm->index : tcf_hash_new_index(tn); + police->tcf_tm.install = jiffies; + police->tcf_tm.lastuse = jiffies; h = tcf_hash(police->tcf_index, POL_TAB_MASK); spin_lock_bh(&hinfo->lock); hlist_add_head(&police->tcf_head, &hinfo->htab[h]); @@ -268,6 +270,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a, spin_lock(&police->tcf_lock); bstats_update(&police->tcf_bstats, skb); + tcf_lastuse_update(&police->tcf_tm); if (police->tcfp_ewma_rate && police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { @@ -327,6 +330,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) .refcnt = police->tcf_refcnt - ref, .bindcnt = police->tcf_bindcnt - bind, }; + struct tcf_t t; if (police->rate_present) psched_ratecfg_getrate(&opt.rate, &police->rate); @@ -340,6 +344,13 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) if (police->tcfp_ewma_rate && nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) goto nla_put_failure; + + t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install); + t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse); + t.expires = jiffies_to_clock_t(police->tcf_tm.expires); + if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD)) + goto nla_put_failure; + return skb->len; nla_put_failure: diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 64f71a2155f3..ddf047df5361 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -607,6 +607,10 @@ void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool thr if (throttle) qdisc_throttled(wd->qdisc); + if (wd->last_expires == expires) + return; + + wd->last_expires = expires; hrtimer_start(&wd->timer, ns_to_ktime(expires), HRTIMER_MODE_ABS_PINNED); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index f6bf5818ed4d..d4b4218af6b1 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -928,17 +928,10 @@ ok: } } qdisc_qstats_overlimit(sch); - if (likely(next_event > q->now)) { - if (!test_bit(__QDISC_STATE_DEACTIVATED, - &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { - ktime_t time = ns_to_ktime(next_event); - qdisc_throttled(q->watchdog.qdisc); - hrtimer_start(&q->watchdog.timer, time, - HRTIMER_MODE_ABS_PINNED); - } - } else { + if (likely(next_event > q->now)) + qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true); + else schedule_work(&q->work); - } fin: return skb; } diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 8e3e769dc9ea..1ce724b87618 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c @@ -356,6 +356,9 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p) if (cb->args[4] < cb->args[1]) goto next; + if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs)) + goto next; + if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) goto next; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 777d0324594a..67154b848aa9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4220,6 +4220,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, info->sctpi_s_disable_fragments = sp->disable_fragments; info->sctpi_s_v4mapped = sp->v4mapped; info->sctpi_s_frag_interleave = sp->frag_interleave; + info->sctpi_s_type = sp->type; return 0; } diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 4dfc5c14f8c3..f795b1dd0ccd 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -346,9 +346,15 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1]; + int err; + + if (!attrs[TIPC_NLA_BEARER]) + return -EINVAL; - nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER], - NULL); + err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, + attrs[TIPC_NLA_BEARER], NULL); + if (err) + return err; return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, nla_data(bearer[TIPC_NLA_BEARER_NAME]), @@ -460,14 +466,31 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; + int err; - nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); + if (!attrs[TIPC_NLA_LINK]) + return -EINVAL; - nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP], - NULL); + err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], + NULL); + if (err) + return err; + + if (!link[TIPC_NLA_LINK_PROP]) + return -EINVAL; - nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS], - NULL); + err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX, + link[TIPC_NLA_LINK_PROP], NULL); + if (err) + return err; + + if (!link[TIPC_NLA_LINK_STATS]) + return -EINVAL; + + err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX, + link[TIPC_NLA_LINK_STATS], NULL); + if (err) + return err; name = (char *)TLV_DATA(msg->req); if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) @@ -569,8 +592,15 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, { struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; struct tipc_link_info link_info; + int err; - nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); + if (!attrs[TIPC_NLA_LINK]) + return -EINVAL; + + err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], + NULL); + if (err) + return err; link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); @@ -758,12 +788,23 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, u32 node, depth, type, lowbound, upbound; static const char * const scope_str[] = {"", " zone", " cluster", " node"}; + int err; - nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, - attrs[TIPC_NLA_NAME_TABLE], NULL); + if (!attrs[TIPC_NLA_NAME_TABLE]) + return -EINVAL; - nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL], - NULL); + err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, + attrs[TIPC_NLA_NAME_TABLE], NULL); + if (err) + return err; + + if (!nt[TIPC_NLA_NAME_TABLE_PUBL]) + return -EINVAL; + + err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, + nt[TIPC_NLA_NAME_TABLE_PUBL], NULL); + if (err) + return err; ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); @@ -815,8 +856,15 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, { u32 type, lower, upper; struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; + int err; - nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL); + if (!attrs[TIPC_NLA_PUBL]) + return -EINVAL; + + err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], + NULL); + if (err) + return err; type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]); lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]); @@ -876,7 +924,13 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg, u32 sock_ref; struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; - nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL); + if (!attrs[TIPC_NLA_SOCK]) + return -EINVAL; + + err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], + NULL); + if (err) + return err; sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); @@ -917,9 +971,15 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1]; + int err; + + if (!attrs[TIPC_NLA_MEDIA]) + return -EINVAL; - nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], - NULL); + err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], + NULL); + if (err) + return err; return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, nla_data(media[TIPC_NLA_MEDIA_NAME]), @@ -931,8 +991,15 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg, { struct tipc_node_info node_info; struct nlattr *node[TIPC_NLA_NODE_MAX + 1]; + int err; - nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL); + if (!attrs[TIPC_NLA_NODE]) + return -EINVAL; + + err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], + NULL); + if (err) + return err; node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR])); node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP])); @@ -971,8 +1038,16 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg, { __be32 id; struct nlattr *net[TIPC_NLA_NET_MAX + 1]; + int err; + + if (!attrs[TIPC_NLA_NET]) + return -EINVAL; + + err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], + NULL); + if (err) + return err; - nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL); id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID])); return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 6750595bd7b8..4904ced676d4 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2454,6 +2454,7 @@ sub process { # Check for git id commit length and improperly formed commit descriptions if ($in_commit_log && !$commit_log_possible_stack_dump && + $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i && ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || ($line =~ /\b[0-9a-f]{12,40}\b/i && $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && diff --git a/security/keys/compat.c b/security/keys/compat.c index c8783b3b628c..36c80bf5b89c 100644 --- a/security/keys/compat.c +++ b/security/keys/compat.c @@ -134,7 +134,7 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option, case KEYCTL_DH_COMPUTE: return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3), - arg4); + arg4, compat_ptr(arg5)); default: return -EOPNOTSUPP; diff --git a/security/keys/dh.c b/security/keys/dh.c index 880505a4b9f1..531ed2ec132f 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c @@ -78,7 +78,8 @@ error: } long keyctl_dh_compute(struct keyctl_dh_params __user *params, - char __user *buffer, size_t buflen) + char __user *buffer, size_t buflen, + void __user *reserved) { long ret; MPI base, private, prime, result; @@ -97,6 +98,11 @@ long keyctl_dh_compute(struct keyctl_dh_params __user *params, goto out; } + if (reserved) { + ret = -EINVAL; + goto out; + } + keylen = mpi_from_key(pcopy.prime, buflen, &prime); if (keylen < 0 || !prime) { /* buflen == 0 may be used to query the required buffer size, diff --git a/security/keys/internal.h b/security/keys/internal.h index 8ec7a528365d..a705a7d92ad7 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -260,10 +260,11 @@ static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring) #ifdef CONFIG_KEY_DH_OPERATIONS extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *, - size_t); + size_t, void __user *); #else static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params, - char __user *buffer, size_t buflen) + char __user *buffer, size_t buflen, + void __user *reserved) { return -EOPNOTSUPP; } diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 3b135a0af344..d580ad06b792 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -1688,8 +1688,8 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, case KEYCTL_DH_COMPUTE: return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, - (char __user *) arg3, - (size_t) arg4); + (char __user *) arg3, (size_t) arg4, + (void __user *) arg5); default: return -EOPNOTSUPP; diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c index 64425d352962..888133f9e65d 100644 --- a/sound/soc/omap/omap-hdmi-audio.c +++ b/sound/soc/omap/omap-hdmi-audio.c @@ -28,7 +28,6 @@ #include <sound/asoundef.h> #include <sound/omap-pcm.h> #include <sound/omap-hdmi-audio.h> -#include <video/omapdss.h> #define DRV_NAME "omap-hdmi-audio" diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c index a3f12b3b277b..3a3a699b7489 100644 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c @@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) continue; - if (cpu_if->vgic_elrsr & (1UL << i)) { + if (cpu_if->vgic_elrsr & (1UL << i)) cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; - continue; - } + else + cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); - cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); writel_relaxed(0, base + GICH_LR0 + (i * 4)); } } diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 059595ec3da0..9f6fab74dce7 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, * other thread sync back the IRQ. */ while (irq->vcpu && /* IRQ may have state in an LR somewhere */ - irq->vcpu->cpu != -1) { /* VCPU thread is running */ - BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS); + irq->vcpu->cpu != -1) /* VCPU thread is running */ cond_resched_lock(&irq->irq_lock); - } irq->active = new_active_state; if (new_active_state) diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 8ad42c217770..e31405ee5515 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) } } - /* Clear soft pending state when level IRQs have been acked */ - if (irq->config == VGIC_CONFIG_LEVEL && - !(val & GICH_LR_PENDING_BIT)) { - irq->soft_pending = false; - irq->pending = irq->line_level; + /* + * Clear soft pending state when level irqs have been acked. + * Always regenerate the pending state. + */ + if (irq->config == VGIC_CONFIG_LEVEL) { + if (!(val & GICH_LR_PENDING_BIT)) + irq->soft_pending = false; + + irq->pending = irq->line_level || irq->soft_pending; } spin_unlock(&irq->irq_lock); diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 336a46115937..346b4ad12b49 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) } } - /* Clear soft pending state when level irqs have been acked */ - if (irq->config == VGIC_CONFIG_LEVEL && - !(val & ICH_LR_PENDING_BIT)) { - irq->soft_pending = false; - irq->pending = irq->line_level; + /* + * Clear soft pending state when level irqs have been acked. + * Always regenerate the pending state. + */ + if (irq->config == VGIC_CONFIG_LEVEL) { + if (!(val & ICH_LR_PENDING_BIT)) + irq->soft_pending = false; + + irq->pending = irq->line_level || irq->soft_pending; } spin_unlock(&irq->irq_lock); diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index fe84e1a95dd5..8db197bb6c7a 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c @@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm, irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, lockdep_is_held(&kvm->irq_lock)); - if (gsi < irq_rt->nr_rt_entries) { + if (irq_rt && gsi < irq_rt->nr_rt_entries) { hlist_for_each_entry(e, &irq_rt->map[gsi], link) { entries[n] = *e; ++n; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 37af23052470..02e98f3131bd 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2935,7 +2935,7 @@ static long kvm_vm_ioctl(struct file *filp, case KVM_SET_GSI_ROUTING: { struct kvm_irq_routing routing; struct kvm_irq_routing __user *urouting; - struct kvm_irq_routing_entry *entries; + struct kvm_irq_routing_entry *entries = NULL; r = -EFAULT; if (copy_from_user(&routing, argp, sizeof(routing))) @@ -2945,15 +2945,17 @@ static long kvm_vm_ioctl(struct file *filp, goto out; if (routing.flags) goto out; - r = -ENOMEM; - entries = vmalloc(routing.nr * sizeof(*entries)); - if (!entries) - goto out; - r = -EFAULT; - urouting = argp; - if (copy_from_user(entries, urouting->entries, - routing.nr * sizeof(*entries))) - goto out_free_irq_routing; + if (routing.nr) { + r = -ENOMEM; + entries = vmalloc(routing.nr * sizeof(*entries)); + if (!entries) + goto out; + r = -EFAULT; + urouting = argp; + if (copy_from_user(entries, urouting->entries, + routing.nr * sizeof(*entries))) + goto out_free_irq_routing; + } r = kvm_set_irq_routing(kvm, entries, routing.nr, routing.flags); out_free_irq_routing: |