summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/pci/fixup.c3
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/ast/Kconfig16
-rw-r--r--drivers/gpu/drm/ast/Makefile9
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h144
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c244
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h356
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c341
-rw-r--r--drivers/gpu/drm/ast/ast_main.c527
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1160
-rw-r--r--drivers/gpu/drm/ast/ast_post.c1780
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h265
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c453
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig12
-rw-r--r--drivers/gpu/drm/cirrus/Makefile5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c108
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h246
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c307
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c335
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c629
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c453
-rw-r--r--drivers/gpu/drm/drm_crtc.c302
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_gem.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c4
-rw-r--r--drivers/gpu/drm/drm_vm.c18
-rw-r--r--drivers/gpu/drm/gma500/Makefile3
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c60
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c321
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c17
-rw-r--r--drivers/gpu/drm/gma500/gtt.c12
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c24
-rw-r--r--drivers/gpu/drm/gma500/mdfld_device.c449
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c24
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c330
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c134
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c126
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c60
-rw-r--r--drivers/gpu/drm/gma500/opregion.c18
-rw-r--r--drivers/gpu/drm/gma500/opregion.h22
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c64
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c59
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h174
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c286
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c11
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig15
-rw-r--r--drivers/gpu/drm/mgag200/Makefile5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c116
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h276
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c294
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c156
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c388
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1533
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h661
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c452
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c10
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c216
-rw-r--r--drivers/gpu/drm/radeon/ni.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c40
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c42
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c99
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c363
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h167
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c75
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c111
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c546
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h13
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h23
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c270
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c323
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c181
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c5
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c277
-rw-r--r--include/drm/drm.h6
-rw-r--r--include/drm/drmP.h4
-rw-r--r--include/drm/drm_crtc.h42
-rw-r--r--include/drm/drm_mode.h15
-rw-r--r--include/drm/radeon_drm.h1
-rw-r--r--include/linux/vga_switcheroo.h19
100 files changed, 14293 insertions, 2595 deletions
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index d0e6e403b4f6..82487d3d5879 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -6,6 +6,7 @@
#include <linux/dmi.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/vgaarb.h>
#include <asm/pci_x86.h>
static void __devinit pci_fixup_i450nx(struct pci_dev *d)
@@ -348,6 +349,8 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n");
+ if (!vga_default_device())
+ vga_set_default_device(pdev);
}
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e354bc0b052a..23120c00a881 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -186,3 +186,9 @@ source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/gma500/Kconfig"
source "drivers/gpu/drm/udl/Kconfig"
+
+source "drivers/gpu/drm/ast/Kconfig"
+
+source "drivers/gpu/drm/mgag200/Kconfig"
+
+source "drivers/gpu/drm/cirrus/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c20da5bda355..f65f65ed0ddf 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -34,6 +34,8 @@ obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
obj-$(CONFIG_DRM_I915) += i915/
+obj-$(CONFIG_DRM_MGAG200) += mgag200/
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
@@ -42,4 +44,5 @@ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
+obj-$(CONFIG_DRM_AST) += ast/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
new file mode 100644
index 000000000000..a277b1257888
--- /dev/null
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -0,0 +1,16 @@
+config DRM_AST
+ tristate "AST server chips"
+ depends on DRM && PCI && EXPERIMENTAL
+ select DRM_TTM
+ select FB_SYS_COPYAREA
+ select FB_SYS_FILLRECT
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ Say yes for experimental AST GPU driver. Do not enable
+ this driver without having a working -modesetting,
+ and a version of AST that knows to fail if KMS
+ is bound to the driver. These GPUs are commonly found
+ in server chipsets.
+
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
new file mode 100644
index 000000000000..8df4f284ee24
--- /dev/null
+++ b/drivers/gpu/drm/ast/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o
+
+obj-$(CONFIG_DRM_AST) := ast.o \ No newline at end of file
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
new file mode 100644
index 000000000000..cc04539c0ff3
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_dram_tables.h
@@ -0,0 +1,144 @@
+#ifndef AST_DRAM_TABLES_H
+#define AST_DRAM_TABLES_H
+
+/* DRAM timing tables */
+struct ast_dramstruct {
+ u16 index;
+ u32 data;
+};
+
+static const struct ast_dramstruct ast2000_dram_table_data[] = {
+ { 0x0108, 0x00000000 },
+ { 0x0120, 0x00004a21 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xFFFFFFFF },
+ { 0x0004, 0x00000089 },
+ { 0x0008, 0x22331353 },
+ { 0x000C, 0x0d07000b },
+ { 0x0010, 0x11113333 },
+ { 0x0020, 0x00110350 },
+ { 0x0028, 0x1e0828f0 },
+ { 0x0024, 0x00000001 },
+ { 0x001C, 0x00000000 },
+ { 0x0014, 0x00000003 },
+ { 0xFF00, 0x00000043 },
+ { 0x0018, 0x00000131 },
+ { 0x0014, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x0018, 0x00000031 },
+ { 0x0014, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x0028, 0x1e0828f1 },
+ { 0x0024, 0x00000003 },
+ { 0x002C, 0x1f0f28fb },
+ { 0x0030, 0xFFFFFE01 },
+ { 0xFFFF, 0xFFFFFFFF }
+};
+
+static const struct ast_dramstruct ast1100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x000041f0 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00050000 },
+ { 0x0004, 0x00000585 },
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x22201724 },
+ { 0x0018, 0x1e29011a },
+ { 0x0020, 0x00c82222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x032aa02a },
+ { 0x0064, 0x002d3000 },
+ { 0x0068, 0x00000000 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x002C, 0x00000732 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000632 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00004c41 },
+ { 0xffff, 0xffffffff },
+};
+
+static const struct ast_dramstruct ast2100_dram_table_data[] = {
+ { 0x2000, 0x1688a8a8 },
+ { 0x2020, 0x00004120 },
+ { 0xFF00, 0x00000043 },
+ { 0x0000, 0xfc600309 },
+ { 0x006C, 0x00909090 },
+ { 0x0064, 0x00070000 },
+ { 0x0004, 0x00000489 },
+ { 0x0008, 0x0011030f },
+ { 0x0010, 0x32302926 },
+ { 0x0018, 0x274c0122 },
+ { 0x0020, 0x00ce2222 },
+ { 0x0014, 0x01001523 },
+ { 0x001C, 0x1024010d },
+ { 0x0024, 0x00cb2522 },
+ { 0x0038, 0xffffff82 },
+ { 0x003C, 0x00000000 },
+ { 0x0040, 0x00000000 },
+ { 0x0044, 0x00000000 },
+ { 0x0048, 0x00000000 },
+ { 0x004C, 0x00000000 },
+ { 0x0050, 0x00000000 },
+ { 0x0054, 0x00000000 },
+ { 0x0058, 0x00000000 },
+ { 0x005C, 0x00000000 },
+ { 0x0060, 0x0f2aa02a },
+ { 0x0064, 0x003f3005 },
+ { 0x0068, 0x02020202 },
+ { 0x0070, 0x00000000 },
+ { 0x0074, 0x00000000 },
+ { 0x0078, 0x00000000 },
+ { 0x007C, 0x00000000 },
+ { 0x0034, 0x00000001 },
+ { 0xFF00, 0x00000043 },
+ { 0x002C, 0x00000942 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000005 },
+ { 0x0028, 0x00000007 },
+ { 0x0028, 0x00000003 },
+ { 0x0028, 0x00000001 },
+ { 0x000C, 0x00005a08 },
+ { 0x002C, 0x00000842 },
+ { 0x0028, 0x00000001 },
+ { 0x0030, 0x000003c0 },
+ { 0x0028, 0x00000003 },
+ { 0x0030, 0x00000040 },
+ { 0x0028, 0x00000003 },
+ { 0x000C, 0x00005a21 },
+ { 0x0034, 0x00007c03 },
+ { 0x0120, 0x00005061 },
+ { 0xffff, 0xffffffff },
+};
+
+#endif
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
new file mode 100644
index 000000000000..d0c4574ef49c
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "ast_drv.h"
+
+int ast_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, ast_modeset, int, 0400);
+
+#define PCI_VENDOR_ASPEED 0x1a03
+
+static struct drm_driver driver;
+
+#define AST_VGA_DEVICE(id, info) { \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, \
+ .class_mask = 0xff0000, \
+ .vendor = PCI_VENDOR_ASPEED, \
+ .device = id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL),
+ AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL),
+ /* AST_VGA_DEVICE(PCI_CHIP_AST1180, NULL), - don't bind to 1180 for now */
+ {0, 0, 0},
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int __devinit
+ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void
+ast_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+
+
+static int ast_drm_freeze(struct drm_device *dev)
+{
+ drm_kms_helper_poll_disable(dev);
+
+ pci_save_state(dev->pdev);
+
+ console_lock();
+ ast_fbdev_set_suspend(dev, 1);
+ console_unlock();
+ return 0;
+}
+
+static int ast_drm_thaw(struct drm_device *dev)
+{
+ int error = 0;
+
+ ast_post_gpu(dev);
+
+ drm_mode_config_reset(dev);
+ mutex_lock(&dev->mode_config.mutex);
+ drm_helper_resume_force_mode(dev);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ console_lock();
+ ast_fbdev_set_suspend(dev, 0);
+ console_unlock();
+ return error;
+}
+
+static int ast_drm_resume(struct drm_device *dev)
+{
+ int ret;
+
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ ret = ast_drm_thaw(dev);
+ if (ret)
+ return ret;
+
+ drm_kms_helper_poll_enable(dev);
+ return 0;
+}
+
+static int ast_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ int error;
+
+ error = ast_drm_freeze(ddev);
+ if (error)
+ return error;
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+static int ast_pm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ return ast_drm_resume(ddev);
+}
+
+static int ast_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ if (!ddev || !ddev->dev_private)
+ return -ENODEV;
+ return ast_drm_freeze(ddev);
+
+}
+
+static int ast_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+ return ast_drm_thaw(ddev);
+}
+
+static int ast_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *ddev = pci_get_drvdata(pdev);
+
+ return ast_drm_freeze(ddev);
+}
+
+static const struct dev_pm_ops ast_pm_ops = {
+ .suspend = ast_pm_suspend,
+ .resume = ast_pm_resume,
+ .freeze = ast_pm_freeze,
+ .thaw = ast_pm_thaw,
+ .poweroff = ast_pm_poweroff,
+ .restore = ast_pm_resume,
+};
+
+static struct pci_driver ast_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = ast_pci_probe,
+ .remove = ast_pci_remove,
+ .driver.pm = &ast_pm_ops,
+};
+
+static const struct file_operations ast_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = ast_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
+ .dev_priv_size = 0,
+
+ .load = ast_driver_load,
+ .unload = ast_driver_unload,
+
+ .fops = &ast_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_init_object = ast_gem_init_object,
+ .gem_free_object = ast_gem_free_object,
+ .dumb_create = ast_dumb_create,
+ .dumb_map_offset = ast_dumb_mmap_offset,
+ .dumb_destroy = ast_dumb_destroy,
+
+};
+
+static int __init ast_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && ast_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (ast_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &ast_pci_driver);
+}
+static void __exit ast_exit(void)
+{
+ drm_pci_exit(&driver, &ast_pci_driver);
+}
+
+module_init(ast_init);
+module_exit(ast_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
new file mode 100644
index 000000000000..d4af9edcbb97
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#ifndef __AST_DRV_H__
+#define __AST_DRV_H__
+
+#include "drm_fb_helper.h"
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#define DRIVER_AUTHOR "Dave Airlie"
+
+#define DRIVER_NAME "ast"
+#define DRIVER_DESC "AST"
+#define DRIVER_DATE "20120228"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#define PCI_CHIP_AST2000 0x2000
+#define PCI_CHIP_AST2100 0x2010
+#define PCI_CHIP_AST1180 0x1180
+
+
+enum ast_chip {
+ AST2000,
+ AST2100,
+ AST1100,
+ AST2200,
+ AST2150,
+ AST2300,
+ AST1180,
+};
+
+#define AST_DRAM_512Mx16 0
+#define AST_DRAM_1Gx16 1
+#define AST_DRAM_512Mx32 2
+#define AST_DRAM_1Gx32 3
+#define AST_DRAM_2Gx16 6
+#define AST_DRAM_4Gx16 7
+
+struct ast_fbdev;
+
+struct ast_private {
+ struct drm_device *dev;
+
+ void __iomem *regs;
+ void __iomem *ioregs;
+
+ enum ast_chip chip;
+ bool vga2_clone;
+ uint32_t dram_bus_width;
+ uint32_t dram_type;
+ uint32_t mclk;
+ uint32_t vram_size;
+
+ struct ast_fbdev *fbdev;
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+ struct drm_gem_object *cursor_cache;
+ uint64_t cursor_cache_gpu_addr;
+ struct ttm_bo_kmap_obj cache_kmap;
+ int next_cursor;
+};
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags);
+int ast_driver_unload(struct drm_device *dev);
+
+struct ast_gem_object;
+
+#define AST_IO_AR_PORT_WRITE (0x40)
+#define AST_IO_MISC_PORT_WRITE (0x42)
+#define AST_IO_SEQ_PORT (0x44)
+#define AST_DAC_INDEX_READ (0x3c7)
+#define AST_IO_DAC_INDEX_WRITE (0x48)
+#define AST_IO_DAC_DATA (0x49)
+#define AST_IO_GR_PORT (0x4E)
+#define AST_IO_CRTC_PORT (0x54)
+#define AST_IO_INPUT_STATUS1_READ (0x5A)
+#define AST_IO_MISC_PORT_READ (0x4C)
+
+#define __ast_read(x) \
+static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->regs + reg); \
+return val;\
+}
+
+__ast_read(8);
+__ast_read(16);
+__ast_read(32)
+
+#define __ast_io_read(x) \
+static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
+u##x val = 0;\
+val = ioread##x(ast->ioregs + reg); \
+return val;\
+}
+
+__ast_io_read(8);
+__ast_io_read(16);
+__ast_io_read(32);
+
+#define __ast_write(x) \
+static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+ iowrite##x(val, ast->regs + reg);\
+ }
+
+__ast_write(8);
+__ast_write(16);
+__ast_write(32);
+
+#define __ast_io_write(x) \
+static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
+ iowrite##x(val, ast->ioregs + reg);\
+ }
+
+__ast_io_write(8);
+__ast_io_write(16);
+#undef __ast_io_write
+
+static inline void ast_set_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t val)
+{
+ ast_io_write16(ast, base, ((u16)val << 8) | index);
+}
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t mask, uint8_t val);
+uint8_t ast_get_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index);
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index, uint8_t mask);
+
+static inline void ast_open_key(struct ast_private *ast)
+{
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
+}
+
+#define AST_VIDMEM_SIZE_8M 0x00800000
+#define AST_VIDMEM_SIZE_16M 0x01000000
+#define AST_VIDMEM_SIZE_32M 0x02000000
+#define AST_VIDMEM_SIZE_64M 0x04000000
+#define AST_VIDMEM_SIZE_128M 0x08000000
+
+#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M
+
+#define AST_MAX_HWC_WIDTH 64
+#define AST_MAX_HWC_HEIGHT 64
+
+#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2)
+#define AST_HWC_SIGNATURE_SIZE 32
+
+#define AST_DEFAULT_HWC_NUM 2
+/* define for signature structure */
+#define AST_HWC_SIGNATURE_CHECKSUM 0x00
+#define AST_HWC_SIGNATURE_SizeX 0x04
+#define AST_HWC_SIGNATURE_SizeY 0x08
+#define AST_HWC_SIGNATURE_X 0x0C
+#define AST_HWC_SIGNATURE_Y 0x10
+#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
+#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+
+
+struct ast_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+};
+
+struct ast_connector {
+ struct drm_connector base;
+ struct ast_i2c_chan *i2c;
+};
+
+struct ast_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ struct drm_gem_object *cursor_bo;
+ uint64_t cursor_addr;
+ int cursor_width, cursor_height;
+ u8 offset_x, offset_y;
+};
+
+struct ast_encoder {
+ struct drm_encoder base;
+};
+
+struct ast_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct ast_fbdev {
+ struct drm_fb_helper helper;
+ struct ast_framebuffer afb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+};
+
+#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
+#define to_ast_connector(x) container_of(x, struct ast_connector, base)
+#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
+#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base)
+
+struct ast_vbios_stdtable {
+ u8 misc;
+ u8 seq[4];
+ u8 crtc[25];
+ u8 ar[20];
+ u8 gr[9];
+};
+
+struct ast_vbios_enhtable {
+ u32 ht;
+ u32 hde;
+ u32 hfp;
+ u32 hsync;
+ u32 vt;
+ u32 vde;
+ u32 vfp;
+ u32 vsync;
+ u32 dclk_index;
+ u32 flags;
+ u32 refresh_rate;
+ u32 refresh_rate_index;
+ u32 mode_id;
+};
+
+struct ast_vbios_dclk_info {
+ u8 param1;
+ u8 param2;
+ u8 param3;
+};
+
+struct ast_vbios_mode_info {
+ struct ast_vbios_stdtable *std_table;
+ struct ast_vbios_enhtable *enh_table;
+};
+
+extern int ast_mode_init(struct drm_device *dev);
+extern void ast_mode_fini(struct drm_device *dev);
+
+int ast_framebuffer_init(struct drm_device *dev,
+ struct ast_framebuffer *ast_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+int ast_fbdev_init(struct drm_device *dev);
+void ast_fbdev_fini(struct drm_device *dev);
+void ast_fbdev_set_suspend(struct drm_device *dev, int state);
+
+struct ast_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_ast_bo(gobj) container_of((gobj), struct ast_bo, gem)
+
+static inline struct ast_bo *
+ast_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct ast_bo, bo);
+}
+
+
+#define to_ast_obj(x) container_of(x, struct ast_gem_object, base)
+
+#define AST_MM_ALIGN_SHIFT 4
+#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
+
+extern int ast_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+extern int ast_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
+extern int ast_gem_init_object(struct drm_gem_object *obj);
+extern void ast_gem_free_object(struct drm_gem_object *obj);
+extern int ast_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+int ast_mm_init(struct ast_private *ast);
+void ast_mm_fini(struct ast_private *ast);
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct ast_bo **pastbo);
+
+int ast_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int ast_bo_unpin(struct ast_bo *bo);
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait);
+void ast_bo_unreserve(struct ast_bo *bo);
+void ast_ttm_placement(struct ast_bo *bo, int domain);
+int ast_bo_push_sysram(struct ast_bo *bo);
+int ast_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* ast post */
+void ast_post_gpu(struct drm_device *dev);
+#endif
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
new file mode 100644
index 000000000000..2fc8e9e860b1
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "ast_drv.h"
+
+static void ast_dirty_update(struct ast_fbdev *afbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = afbdev->afb.obj;
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ ast_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ ast_bo_unreserve(bo);
+}
+
+static void ast_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_fillrect(info, rect);
+ ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void ast_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_copyarea(info, area);
+ ast_dirty_update(afbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void ast_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct ast_fbdev *afbdev = info->par;
+ sys_imageblit(info, image);
+ ast_dirty_update(afbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+static struct fb_ops astfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = ast_fillrect,
+ .fb_copyarea = ast_copyarea,
+ .fb_imageblit = ast_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int astfb_create_object(struct ast_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = ast_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int astfb_create(struct ast_fbdev *afbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ int size, ret;
+ struct device *device = &dev->pdev->dev;
+ void *sysram;
+ struct drm_gem_object *gobj = NULL;
+ struct ast_bo *bo = NULL;
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+ bo = gem_to_ast_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->par = afbdev;
+
+ ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
+ if (ret)
+ goto out;
+
+ afbdev->sysram = sysram;
+ afbdev->size = size;
+
+ fb = &afbdev->afb.base;
+ afbdev->helper.fb = fb;
+ afbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "astdrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &astfb_ops;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n",
+ fb->width, fb->height);
+
+ return 0;
+out:
+ return ret;
+}
+
+static void ast_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ ast_crtc->lut_r[regno] = red >> 8;
+ ast_crtc->lut_g[regno] = green >> 8;
+ ast_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ *red = ast_crtc->lut_r[regno] << 8;
+ *green = ast_crtc->lut_g[regno] << 8;
+ *blue = ast_crtc->lut_b[regno] << 8;
+}
+
+static int ast_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = astfb_create(afbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
+ .gamma_set = ast_fb_gamma_set,
+ .gamma_get = ast_fb_gamma_get,
+ .fb_probe = ast_find_or_create_single,
+};
+
+static void ast_fbdev_destroy(struct drm_device *dev,
+ struct ast_fbdev *afbdev)
+{
+ struct fb_info *info;
+ struct ast_framebuffer *afb = &afbdev->afb;
+ if (afbdev->helper.fbdev) {
+ info = afbdev->helper.fbdev;
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (afb->obj) {
+ drm_gem_object_unreference_unlocked(afb->obj);
+ afb->obj = NULL;
+ }
+ drm_fb_helper_fini(&afbdev->helper);
+
+ vfree(afbdev->sysram);
+ drm_framebuffer_cleanup(&afb->base);
+}
+
+int ast_fbdev_init(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_fbdev *afbdev;
+ int ret;
+
+ afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL);
+ if (!afbdev)
+ return -ENOMEM;
+
+ ast->fbdev = afbdev;
+ afbdev->helper.funcs = &ast_fb_helper_funcs;
+ ret = drm_fb_helper_init(dev, &afbdev->helper,
+ 1, 1);
+ if (ret) {
+ kfree(afbdev);
+ return ret;
+ }
+
+ drm_fb_helper_single_add_all_connectors(&afbdev->helper);
+ drm_fb_helper_initial_config(&afbdev->helper, 32);
+ return 0;
+}
+
+void ast_fbdev_fini(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (!ast->fbdev)
+ return;
+
+ ast_fbdev_destroy(dev, ast->fbdev);
+ kfree(ast->fbdev);
+ ast->fbdev = NULL;
+}
+
+void ast_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (!ast->fbdev)
+ return;
+
+ fb_set_suspend(ast->fbdev->helper.fbdev, state);
+}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
new file mode 100644
index 000000000000..95ae55b8214b
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "ast_drv.h"
+
+
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+#include "ast_dram_tables.h"
+
+void ast_set_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index,
+ uint8_t mask, uint8_t val)
+{
+ u8 tmp;
+ ast_io_write8(ast, base, index);
+ tmp = (ast_io_read8(ast, base + 1) & mask) | val;
+ ast_set_index_reg(ast, base, index, tmp);
+}
+
+uint8_t ast_get_index_reg(struct ast_private *ast,
+ uint32_t base, uint8_t index)
+{
+ uint8_t ret;
+ ast_io_write8(ast, base, index);
+ ret = ast_io_read8(ast, base + 1);
+ return ret;
+}
+
+uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ uint32_t base, uint8_t index, uint8_t mask)
+{
+ uint8_t ret;
+ ast_io_write8(ast, base, index);
+ ret = ast_io_read8(ast, base + 1) & mask;
+ return ret;
+}
+
+
+static int ast_detect_chip(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ if (dev->pdev->device == PCI_CHIP_AST1180) {
+ ast->chip = AST1100;
+ DRM_INFO("AST 1180 detected\n");
+ } else {
+ if (dev->pdev->revision >= 0x20) {
+ ast->chip = AST2300;
+ DRM_INFO("AST 2300 detected\n");
+ } else if (dev->pdev->revision >= 0x10) {
+ uint32_t data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+ data = ast_read32(ast, 0x1207c);
+ switch (data & 0x0300) {
+ case 0x0200:
+ ast->chip = AST1100;
+ DRM_INFO("AST 1100 detected\n");
+ break;
+ case 0x0100:
+ ast->chip = AST2200;
+ DRM_INFO("AST 2200 detected\n");
+ break;
+ case 0x0000:
+ ast->chip = AST2150;
+ DRM_INFO("AST 2150 detected\n");
+ break;
+ default:
+ ast->chip = AST2100;
+ DRM_INFO("AST 2100 detected\n");
+ break;
+ }
+ ast->vga2_clone = false;
+ } else {
+ ast->chip = 2000;
+ DRM_INFO("AST 2000 detected\n");
+ }
+ }
+ return 0;
+}
+
+static int ast_get_dram_info(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ uint32_t data, data2;
+ uint32_t denum, num, div, ref_pll;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ data = ast_read32(ast, 0x10004);
+
+ if (data & 0x400)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
+
+ if (ast->chip == AST2300) {
+ switch (data & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 1:
+ ast->dram_type = AST_DRAM_1Gx16;
+ break;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (data & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (data & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
+ break;
+ }
+ }
+
+ data = ast_read32(ast, 0x10120);
+ data2 = ast_read32(ast, 0x10170);
+ if (data2 & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = data & 0x1f;
+ num = (data & 0x3fe0) >> 5;
+ data = (data & 0xc000) >> 14;
+ switch (data) {
+ case 3:
+ div = 0x4;
+ break;
+ case 2:
+ case 1:
+ div = 0x2;
+ break;
+ default:
+ div = 0x1;
+ break;
+ }
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+ return 0;
+}
+
+uint32_t ast_get_max_dclk(struct drm_device *dev, int bpp)
+{
+ struct ast_private *ast = dev->dev_private;
+ uint32_t dclk, jreg;
+ uint32_t dram_bus_width, mclk, dram_bandwidth, actual_dram_bandwidth, dram_efficency = 500;
+
+ dram_bus_width = ast->dram_bus_width;
+ mclk = ast->mclk;
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST1100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2150 ||
+ ast->dram_bus_width == 16)
+ dram_efficency = 600;
+ else if (ast->chip == AST2300)
+ dram_efficency = 400;
+
+ dram_bandwidth = mclk * dram_bus_width * 2 / 8;
+ actual_dram_bandwidth = dram_bandwidth * dram_efficency / 1000;
+
+ if (ast->chip == AST1180)
+ dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+ else {
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if ((jreg & 0x08) && (ast->chip == AST2000))
+ dclk = actual_dram_bandwidth / ((bpp + 1 + 16) / 8);
+ else if ((jreg & 0x08) && (bpp == 8))
+ dclk = actual_dram_bandwidth / ((bpp + 1 + 24) / 8);
+ else
+ dclk = actual_dram_bandwidth / ((bpp + 1) / 8);
+ }
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2300 ||
+ ast->chip == AST1180) {
+ if (dclk > 200)
+ dclk = 200;
+ } else {
+ if (dclk > 165)
+ dclk = 165;
+ }
+
+ return dclk;
+}
+
+static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
+ if (ast_fb->obj)
+ drm_gem_object_unreference_unlocked(ast_fb->obj);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int ast_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int *handle)
+{
+ return -EINVAL;
+}
+
+static const struct drm_framebuffer_funcs ast_fb_funcs = {
+ .destroy = ast_user_framebuffer_destroy,
+ .create_handle = ast_user_framebuffer_create_handle,
+};
+
+
+int ast_framebuffer_init(struct drm_device *dev,
+ struct ast_framebuffer *ast_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
+ ast_fb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+ast_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct ast_framebuffer *ast_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
+ if (!ast_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(ast_fb);
+ return ERR_PTR(ret);
+ }
+ return &ast_fb->base;
+}
+
+static const struct drm_mode_config_funcs ast_mode_funcs = {
+ .fb_create = ast_user_framebuffer_create,
+};
+
+static u32 ast_get_vram_info(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+
+ ast_open_key(ast);
+
+ jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff);
+ switch (jreg & 3) {
+ case 0: return AST_VIDMEM_SIZE_8M;
+ case 1: return AST_VIDMEM_SIZE_16M;
+ case 2: return AST_VIDMEM_SIZE_32M;
+ case 3: return AST_VIDMEM_SIZE_64M;
+ }
+ return AST_VIDMEM_DEFAULT_SIZE;
+}
+
+int ast_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct ast_private *ast;
+ int ret = 0;
+
+ ast = kzalloc(sizeof(struct ast_private), GFP_KERNEL);
+ if (!ast)
+ return -ENOMEM;
+
+ dev->dev_private = ast;
+ ast->dev = dev;
+
+ ast->regs = pci_iomap(dev->pdev, 1, 0);
+ if (!ast->regs) {
+ ret = -EIO;
+ goto out_free;
+ }
+ ast->ioregs = pci_iomap(dev->pdev, 2, 0);
+ if (!ast->ioregs) {
+ ret = -EIO;
+ goto out_free;
+ }
+
+ ast_detect_chip(dev);
+
+ if (ast->chip != AST1180) {
+ ast_get_dram_info(dev);
+ ast->vram_size = ast_get_vram_info(dev);
+ DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ goto out_free;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.funcs = (void *)&ast_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ if (ast->chip == AST2100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2300 ||
+ ast->chip == AST1180) {
+ dev->mode_config.max_width = 1920;
+ dev->mode_config.max_height = 2048;
+ } else {
+ dev->mode_config.max_width = 1600;
+ dev->mode_config.max_height = 1200;
+ }
+
+ ret = ast_mode_init(dev);
+ if (ret)
+ goto out_free;
+
+ ret = ast_fbdev_init(dev);
+ if (ret)
+ goto out_free;
+
+ return 0;
+out_free:
+ kfree(ast);
+ dev->dev_private = NULL;
+ return ret;
+}
+
+int ast_driver_unload(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ ast_mode_fini(dev);
+ ast_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ ast_mm_fini(ast);
+ pci_iounmap(dev->pdev, ast->ioregs);
+ pci_iounmap(dev->pdev, ast->regs);
+ kfree(ast);
+ return 0;
+}
+
+int ast_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct ast_bo *astbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = ast_bo_create(dev, size, 0, 0, &astbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &astbo->gem;
+ return 0;
+}
+
+int ast_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = ast_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int ast_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int ast_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void ast_bo_unref(struct ast_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+void ast_gem_free_object(struct drm_gem_object *obj)
+{
+ struct ast_bo *ast_bo = gem_to_ast_bo(obj);
+
+ if (!ast_bo)
+ return;
+ ast_bo_unref(&ast_bo);
+}
+
+
+static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+int
+ast_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct ast_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_ast_bo(obj);
+ *offset = ast_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
+
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
new file mode 100644
index 000000000000..65f9d231af14
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -0,0 +1,1160 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "ast_drv.h"
+
+#include "ast_tables.h"
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c);
+static int ast_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height);
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y);
+
+static inline void ast_load_palette_index(struct ast_private *ast,
+ u8 index, u8 red, u8 green,
+ u8 blue)
+{
+ ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, red);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, green);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+ ast_io_write8(ast, AST_IO_DAC_DATA, blue);
+ ast_io_read8(ast, AST_IO_SEQ_PORT);
+}
+
+static void ast_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < 256; i++)
+ ast_load_palette_index(ast, i, ast_crtc->lut_r[i],
+ ast_crtc->lut_g[i], ast_crtc->lut_b[i]);
+}
+
+static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate;
+ u32 hborder, vborder;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ vbios_mode->std_table = &vbios_stdtable[VGAModeIndex];
+ color_index = VGAModeIndex - 1;
+ break;
+ case 16:
+ vbios_mode->std_table = &vbios_stdtable[HiCModeIndex];
+ color_index = HiCModeIndex;
+ break;
+ case 24:
+ case 32:
+ vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex];
+ color_index = TrueCModeIndex;
+ break;
+ default:
+ return false;
+ }
+
+ switch (crtc->mode.crtc_hdisplay) {
+ case 640:
+ vbios_mode->enh_table = &res_640x480[refresh_rate_index];
+ break;
+ case 800:
+ vbios_mode->enh_table = &res_800x600[refresh_rate_index];
+ break;
+ case 1024:
+ vbios_mode->enh_table = &res_1024x768[refresh_rate_index];
+ break;
+ case 1280:
+ if (crtc->mode.crtc_vdisplay == 800)
+ vbios_mode->enh_table = &res_1280x800[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1280x1024[refresh_rate_index];
+ break;
+ case 1440:
+ vbios_mode->enh_table = &res_1440x900[refresh_rate_index];
+ break;
+ case 1600:
+ vbios_mode->enh_table = &res_1600x1200[refresh_rate_index];
+ break;
+ case 1680:
+ vbios_mode->enh_table = &res_1680x1050[refresh_rate_index];
+ break;
+ case 1920:
+ if (crtc->mode.crtc_vdisplay == 1080)
+ vbios_mode->enh_table = &res_1920x1080[refresh_rate_index];
+ else
+ vbios_mode->enh_table = &res_1920x1200[refresh_rate_index];
+ break;
+ default:
+ return false;
+ }
+
+ refresh_rate = drm_mode_vrefresh(mode);
+ while (vbios_mode->enh_table->refresh_rate < refresh_rate) {
+ vbios_mode->enh_table++;
+ if ((vbios_mode->enh_table->refresh_rate > refresh_rate) ||
+ (vbios_mode->enh_table->refresh_rate == 0xff)) {
+ vbios_mode->enh_table--;
+ break;
+ }
+ }
+
+ hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0;
+ vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0;
+
+ adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht;
+ adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder;
+ adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder;
+ adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder +
+ vbios_mode->enh_table->hfp;
+ adjusted_mode->crtc_hsync_end = (vbios_mode->enh_table->hde + hborder +
+ vbios_mode->enh_table->hfp +
+ vbios_mode->enh_table->hsync);
+
+ adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt;
+ adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder;
+ adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder;
+ adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder +
+ vbios_mode->enh_table->vfp;
+ adjusted_mode->crtc_vsync_end = (vbios_mode->enh_table->vde + vborder +
+ vbios_mode->enh_table->vfp +
+ vbios_mode->enh_table->vsync);
+
+ refresh_rate_index = vbios_mode->enh_table->refresh_rate_index;
+ mode_id = vbios_mode->enh_table->mode_id;
+
+ if (ast->chip == AST1180) {
+ /* TODO 1180 */
+ } else {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, (u8)((color_index & 0xf) << 4));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, refresh_rate_index & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff);
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->fb->bits_per_pixel);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8);
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, adjusted_mode->crtc_vdisplay);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, adjusted_mode->crtc_vdisplay >> 8);
+ }
+
+ return true;
+
+
+}
+static void ast_set_std_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_vbios_stdtable *stdtable;
+ u32 i;
+ u8 jreg;
+
+ stdtable = vbios_mode->std_table;
+
+ jreg = stdtable->misc;
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+
+ /* Set SEQ */
+ ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03);
+ for (i = 0; i < 4; i++) {
+ jreg = stdtable->seq[i];
+ if (!i)
+ jreg |= 0x20;
+ ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1) , jreg);
+ }
+
+ /* Set CRTC */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+ for (i = 0; i < 25; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]);
+
+ /* set AR */
+ jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ for (i = 0; i < 20; i++) {
+ jreg = stdtable->ar[i];
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg);
+ }
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00);
+
+ jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
+ ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20);
+
+ /* Set GR */
+ for (i = 0; i < 9; i++)
+ ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
+}
+
+static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
+ u16 temp;
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00);
+
+ temp = (mode->crtc_htotal >> 3) - 5;
+ if (temp & 0x100)
+ jregAC |= 0x01; /* HT D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp);
+
+ temp = (mode->crtc_hdisplay >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x04; /* HDE D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp);
+
+ temp = (mode->crtc_hblank_start >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x10; /* HBS D[8] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp);
+
+ temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f;
+ if (temp & 0x20)
+ jreg05 |= 0x80; /* HBE D[5] */
+ if (temp & 0x40)
+ jregAD |= 0x01; /* HBE D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, (temp & 0x1f));
+
+ temp = (mode->crtc_hsync_start >> 3) - 1;
+ if (temp & 0x100)
+ jregAC |= 0x40; /* HRS D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp);
+
+ temp = ((mode->crtc_hsync_end >> 3) - 1) & 0x3f;
+ if (temp & 0x20)
+ jregAD |= 0x04; /* HRE D[5] */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD);
+
+ /* vert timings */
+ temp = (mode->crtc_vtotal) - 2;
+ if (temp & 0x100)
+ jreg07 |= 0x01;
+ if (temp & 0x200)
+ jreg07 |= 0x20;
+ if (temp & 0x400)
+ jregAE |= 0x01;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp);
+
+ temp = (mode->crtc_vsync_start) - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x04;
+ if (temp & 0x200)
+ jreg07 |= 0x80;
+ if (temp & 0x400)
+ jregAE |= 0x08;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp);
+
+ temp = (mode->crtc_vsync_end - 1) & 0x3f;
+ if (temp & 0x10)
+ jregAE |= 0x20;
+ if (temp & 0x20)
+ jregAE |= 0x40;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf);
+
+ temp = mode->crtc_vdisplay - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x02;
+ if (temp & 0x200)
+ jreg07 |= 0x40;
+ if (temp & 0x400)
+ jregAE |= 0x02;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp);
+
+ temp = mode->crtc_vblank_start - 1;
+ if (temp & 0x100)
+ jreg07 |= 0x08;
+ if (temp & 0x200)
+ jreg09 |= 0x20;
+ if (temp & 0x400)
+ jregAE |= 0x04;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp);
+
+ temp = mode->crtc_vblank_end - 1;
+ if (temp & 0x100)
+ jregAE |= 0x10;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp);
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, (jregAE | 0x80));
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
+}
+
+static void ast_set_offset_reg(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ u16 offset;
+
+ offset = crtc->fb->pitches[0] >> 3;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
+}
+
+static void ast_set_dclk_reg(struct drm_device *dev, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_vbios_dclk_info *clk_info;
+
+ clk_info = &dclk_table[vbios_mode->enh_table->dclk_index];
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, clk_info->param1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, clk_info->param2);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f,
+ (clk_info->param3 & 0x80) | ((clk_info->param3 & 0x3) << 4));
+}
+
+static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ jregA0 = 0x70;
+ jregA3 = 0x01;
+ jregA8 = 0x00;
+ break;
+ case 15:
+ case 16:
+ jregA0 = 0x70;
+ jregA3 = 0x04;
+ jregA8 = 0x02;
+ break;
+ case 32:
+ jregA0 = 0x70;
+ jregA3 = 0x08;
+ jregA8 = 0x02;
+ break;
+ }
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
+
+ /* Set Threshold */
+ if (ast->chip == AST2300) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
+ } else if (ast->chip == AST2100 ||
+ ast->chip == AST1100 ||
+ ast->chip == AST2200 ||
+ ast->chip == AST2150) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f);
+ } else {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f);
+ }
+}
+
+void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 jreg;
+
+ jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ);
+ jreg |= (vbios_mode->enh_table->flags & SyncNN);
+ ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
+}
+
+bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct ast_vbios_mode_info *vbios_mode)
+{
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+void ast_set_start_address_crt1(struct drm_crtc *crtc, unsigned offset)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u32 addr;
+
+ addr = offset >> 2;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, (u8)((addr >> 8) & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, (u8)((addr >> 16) & 0xff));
+
+}
+
+static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+
+ if (ast->chip == AST1180)
+ return;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+ ast_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
+ break;
+ }
+}
+
+static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+/* ast is different - we will force move buffers out of VRAM */
+static int ast_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct ast_framebuffer *ast_fb;
+ struct ast_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ ast_fb = to_ast_framebuffer(fb);
+ obj = ast_fb->obj;
+ bo = gem_to_ast_bo(obj);
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ ast_bo_push_sysram(bo);
+ ast_bo_unreserve(bo);
+ }
+
+ ast_fb = to_ast_framebuffer(crtc->fb);
+ obj = ast_fb->obj;
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ ast_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&ast->fbdev->afb == ast_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+ }
+ ast_bo_unreserve(bo);
+
+ ast_set_start_address_crt1(crtc, (u32)gpu_addr);
+
+ return 0;
+}
+
+static int ast_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return ast_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int ast_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_vbios_mode_info vbios_mode;
+ bool ret;
+ if (ast->chip == AST1180) {
+ DRM_ERROR("AST 1180 modesetting not supported\n");
+ return -EINVAL;
+ }
+
+ ret = ast_get_vbios_mode_info(crtc, mode, adjusted_mode, &vbios_mode);
+ if (ret == false)
+ return -EINVAL;
+ ast_open_key(ast);
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04);
+
+ ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_offset_reg(crtc);
+ ast_set_dclk_reg(dev, adjusted_mode, &vbios_mode);
+ ast_set_ext_reg(crtc, adjusted_mode, &vbios_mode);
+ ast_set_sync_reg(dev, adjusted_mode, &vbios_mode);
+ ast_set_dac_reg(crtc, adjusted_mode, &vbios_mode);
+
+ ast_crtc_mode_set_base(crtc, x, y, old_fb);
+
+ return 0;
+}
+
+static void ast_crtc_disable(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_prepare(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_commit(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0);
+}
+
+
+static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
+ .dpms = ast_crtc_dpms,
+ .mode_fixup = ast_crtc_mode_fixup,
+ .mode_set = ast_crtc_mode_set,
+ .mode_set_base = ast_crtc_mode_set_base,
+ .disable = ast_crtc_disable,
+ .load_lut = ast_crtc_load_lut,
+ .disable = ast_crtc_disable,
+ .prepare = ast_crtc_prepare,
+ .commit = ast_crtc_commit,
+
+};
+
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+
+}
+
+static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ int end = (start + size > 256) ? 256 : start + size, i;
+
+ /* userspace palettes are always correct as is */
+ for (i = start; i < end; i++) {
+ ast_crtc->lut_r[i] = red[i] >> 8;
+ ast_crtc->lut_g[i] = green[i] >> 8;
+ ast_crtc->lut_b[i] = blue[i] >> 8;
+ }
+ ast_crtc_load_lut(crtc);
+}
+
+
+static void ast_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_funcs ast_crtc_funcs = {
+ .cursor_set = ast_cursor_set,
+ .cursor_move = ast_cursor_move,
+ .reset = ast_crtc_reset,
+ .set_config = drm_crtc_helper_set_config,
+ .gamma_set = ast_crtc_gamma_set,
+ .destroy = ast_crtc_destroy,
+};
+
+int ast_crtc_init(struct drm_device *dev)
+{
+ struct ast_crtc *crtc;
+ int i;
+
+ crtc = kzalloc(sizeof(struct ast_crtc), GFP_KERNEL);
+ if (!crtc)
+ return -ENOMEM;
+
+ drm_crtc_init(dev, &crtc->base, &ast_crtc_funcs);
+ drm_mode_crtc_set_gamma_size(&crtc->base, 256);
+ drm_crtc_helper_add(&crtc->base, &ast_crtc_helper_funcs);
+
+ for (i = 0; i < 256; i++) {
+ crtc->lut_r[i] = i;
+ crtc->lut_g[i] = i;
+ crtc->lut_b[i] = i;
+ }
+ return 0;
+}
+
+static void ast_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+
+static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+
+static const struct drm_encoder_funcs ast_enc_funcs = {
+ .destroy = ast_encoder_destroy,
+};
+
+static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool ast_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void ast_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void ast_encoder_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void ast_encoder_commit(struct drm_encoder *encoder)
+{
+
+}
+
+
+static const struct drm_encoder_helper_funcs ast_enc_helper_funcs = {
+ .dpms = ast_encoder_dpms,
+ .mode_fixup = ast_mode_fixup,
+ .prepare = ast_encoder_prepare,
+ .commit = ast_encoder_commit,
+ .mode_set = ast_encoder_mode_set,
+};
+
+int ast_encoder_init(struct drm_device *dev)
+{
+ struct ast_encoder *ast_encoder;
+
+ ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
+ if (!ast_encoder)
+ return -ENOMEM;
+
+ drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(&ast_encoder->base, &ast_enc_helper_funcs);
+
+ ast_encoder->base.possible_crtcs = 1;
+ return 0;
+}
+
+static int ast_get_modes(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_get_edid(connector, &ast_connector->i2c->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(&ast_connector->base, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ return ret;
+ } else
+ drm_mode_connector_update_edid_property(&ast_connector->base, NULL);
+ return 0;
+}
+
+static int ast_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void ast_connector_destroy(struct drm_connector *connector)
+{
+ struct ast_connector *ast_connector = to_ast_connector(connector);
+ ast_i2c_destroy(ast_connector->i2c);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static enum drm_connector_status
+ast_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
+ .mode_valid = ast_mode_valid,
+ .get_modes = ast_get_modes,
+ .best_encoder = ast_best_single_encoder,
+};
+
+static const struct drm_connector_funcs ast_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = ast_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = ast_connector_destroy,
+};
+
+int ast_connector_init(struct drm_device *dev)
+{
+ struct ast_connector *ast_connector;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ ast_connector = kzalloc(sizeof(struct ast_connector), GFP_KERNEL);
+ if (!ast_connector)
+ return -ENOMEM;
+
+ connector = &ast_connector->base;
+ drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &ast_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+
+ encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ast_connector->i2c = ast_i2c_create(dev);
+ if (!ast_connector->i2c)
+ DRM_ERROR("failed to add ddc bus for connector\n");
+
+ return 0;
+}
+
+/* allocate cursor cache and pin at start of VRAM */
+int ast_cursor_init(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ int size;
+ int ret;
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ uint64_t gpu_addr;
+
+ size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM;
+
+ ret = ast_gem_create(dev, size, true, &obj);
+ if (ret)
+ return ret;
+ bo = gem_to_ast_bo(obj);
+ ret = ast_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ goto fail;
+
+ ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ ast_bo_unreserve(bo);
+ if (ret)
+ goto fail;
+
+ /* kmap the object */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap);
+ if (ret)
+ goto fail;
+
+ ast->cursor_cache = obj;
+ ast->cursor_cache_gpu_addr = gpu_addr;
+ DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
+ return 0;
+fail:
+ return ret;
+}
+
+void ast_cursor_fini(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ ttm_bo_kunmap(&ast->cache_kmap);
+ drm_gem_object_unreference_unlocked(ast->cursor_cache);
+}
+
+int ast_mode_init(struct drm_device *dev)
+{
+ ast_cursor_init(dev);
+ ast_crtc_init(dev);
+ ast_encoder_init(dev);
+ ast_connector_init(dev);
+ return 0;
+}
+
+void ast_mode_fini(struct drm_device *dev)
+{
+ ast_cursor_fini(dev);
+}
+
+static int get_clock(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ uint32_t val;
+
+ val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
+ return val & 1 ? 1 : 0;
+}
+
+static int get_data(void *i2c_priv)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ uint32_t val;
+
+ val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
+ return val & 1 ? 1 : 0;
+}
+
+static void set_clock(void *i2c_priv, int clock)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((clock & 0x01) ? 0 : 1);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static void set_data(void *i2c_priv, int data)
+{
+ struct ast_i2c_chan *i2c = i2c_priv;
+ struct ast_private *ast = i2c->dev->dev_private;
+ int i;
+ u8 ujcrb7, jtemp;
+
+ for (i = 0; i < 0x10000; i++) {
+ ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+ jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
+ if (ujcrb7 == jtemp)
+ break;
+ }
+}
+
+static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev)
+{
+ struct ast_i2c_chan *i2c;
+ int ret;
+
+ i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ "AST i2c bit bus");
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 20;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = set_data;
+ i2c->bit.setscl = set_clock;
+ i2c->bit.getsda = get_data;
+ i2c->bit.getscl = get_clock;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register bit i2c\n");
+ goto out_free;
+ }
+
+ return i2c;
+out_free:
+ kfree(i2c);
+ return NULL;
+}
+
+static void ast_i2c_destroy(struct ast_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
+void ast_show_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ u8 jreg;
+
+ jreg = 0x2;
+ /* enable ARGB cursor */
+ jreg |= 1;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
+}
+
+void ast_hide_cursor(struct drm_crtc *crtc)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
+}
+
+static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
+{
+ union {
+ u32 ul;
+ u8 b[4];
+ } srcdata32[2], data32;
+ union {
+ u16 us;
+ u8 b[2];
+ } data16;
+ u32 csum = 0;
+ s32 alpha_dst_delta, last_alpha_dst_delta;
+ u8 *srcxor, *dstxor;
+ int i, j;
+ u32 per_pixel_copy, two_pixel_copy;
+
+ alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
+ last_alpha_dst_delta = alpha_dst_delta - (width << 1);
+
+ srcxor = src;
+ dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
+ per_pixel_copy = width & 1;
+ two_pixel_copy = width >> 1;
+
+ for (j = 0; j < height; j++) {
+ for (i = 0; i < two_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+ data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
+ data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
+
+ writel(data32.ul, dstxor);
+ csum += data32.ul;
+
+ dstxor += 4;
+ srcxor += 8;
+
+ }
+
+ for (i = 0; i < per_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ writew(data16.us, dstxor);
+ csum += (u32)data16.us;
+
+ dstxor += 2;
+ srcxor += 4;
+ }
+ dstxor += last_alpha_dst_delta;
+ }
+ return csum;
+}
+
+static int ast_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct ast_private *ast = crtc->dev->dev_private;
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct ast_bo *bo;
+ uint64_t gpu_addr;
+ u32 csum;
+ int ret;
+ struct ttm_bo_kmap_obj uobj_map;
+ u8 *src, *dst;
+ bool src_isiomem, dst_isiomem;
+ if (!handle) {
+ ast_hide_cursor(crtc);
+ return 0;
+ }
+
+ if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
+ return -ENOENT;
+ }
+ bo = gem_to_ast_bo(obj);
+
+ ret = ast_bo_reserve(bo, false);
+ if (ret)
+ goto fail;
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
+
+ src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
+ dst = ttm_kmap_obj_virtual(&ast->cache_kmap, &dst_isiomem);
+
+ if (src_isiomem == true)
+ DRM_ERROR("src cursor bo should be in main memory\n");
+ if (dst_isiomem == false)
+ DRM_ERROR("dst bo should be in VRAM\n");
+
+ dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+
+ /* do data transfer to cursor cache */
+ csum = copy_cursor_image(src, dst, width, height);
+
+ /* write checksum + signature */
+ ttm_bo_kunmap(&uobj_map);
+ ast_bo_unreserve(bo);
+ {
+ u8 *dst = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+
+ /* set pattern offset */
+ gpu_addr = ast->cursor_cache_gpu_addr;
+ gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor;
+ gpu_addr >>= 3;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff);
+ }
+ ast_crtc->cursor_width = width;
+ ast_crtc->cursor_height = height;
+ ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width;
+ ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height;
+
+ ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM;
+
+ ast_show_cursor(crtc);
+
+ drm_gem_object_unreference_unlocked(obj);
+ return 0;
+fail:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+static int ast_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
+ struct ast_private *ast = crtc->dev->dev_private;
+ int x_offset, y_offset;
+ u8 *sig;
+
+ sig = (u8 *)ast->cache_kmap.virtual + (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE;
+ writel(x, sig + AST_HWC_SIGNATURE_X);
+ writel(y, sig + AST_HWC_SIGNATURE_Y);
+
+ x_offset = ast_crtc->offset_x;
+ y_offset = ast_crtc->offset_y;
+ if (x < 0) {
+ x_offset = (-x) + ast_crtc->offset_x;
+ x = 0;
+ }
+
+ if (y < 0) {
+ y_offset = (-y) + ast_crtc->offset_y;
+ y = 0;
+ }
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, (x & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, ((x >> 8) & 0x0f));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, (y & 0xff));
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
+
+ /* dummy write to fire HWC */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
new file mode 100644
index 000000000000..6edbee63b0cb
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -0,0 +1,1780 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include "drmP.h"
+#include "ast_drv.h"
+
+#include "ast_dram_tables.h"
+
+static void ast_init_dram_2300(struct drm_device *dev);
+
+static void
+ast_enable_vga(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+
+ ast_io_write8(ast, 0x43, 0x01);
+ ast_io_write8(ast, 0x42, 0x01);
+}
+
+#if 0 /* will use later */
+static bool
+ast_is_vga_enabled(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 ch;
+
+ if (ast->chip == AST1180) {
+ /* TODO 1180 */
+ } else {
+ ch = ast_io_read8(ast, 0x43);
+ if (ch) {
+ ast_open_key(ast);
+ ch = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff);
+ return ch & 0x04;
+ }
+ }
+ return 0;
+}
+#endif
+
+static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff };
+static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
+
+static void
+ast_set_def_ext_reg(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 i, index, reg;
+ const u8 *ext_reg_info;
+
+ /* reset scratch */
+ for (i = 0x81; i <= 0x8f; i++)
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00);
+
+ if (ast->chip == AST2300) {
+ if (dev->pdev->revision >= 0x20)
+ ext_reg_info = extreginfo_ast2300;
+ else
+ ext_reg_info = extreginfo_ast2300a0;
+ } else
+ ext_reg_info = extreginfo;
+
+ index = 0xa0;
+ while (*ext_reg_info != 0xff) {
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, *ext_reg_info);
+ index++;
+ ext_reg_info++;
+ }
+
+ /* disable standard IO/MEM decode if secondary */
+ /* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */
+
+ /* Set Ext. Default */
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00);
+
+ /* Enable RAMDAC for A1 */
+ reg = 0x04;
+ if (ast->chip == AST2300)
+ reg |= 0x20;
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
+}
+
+static inline u32 mindwm(struct ast_private *ast, u32 r)
+{
+ ast_write32(ast, 0xf004, r & 0xffff0000);
+ ast_write32(ast, 0xf000, 0x1);
+
+ return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
+}
+
+static inline void moutdwm(struct ast_private *ast, u32 r, u32 v)
+{
+ ast_write32(ast, 0xf004, r & 0xffff0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10000 + (r & 0x0000ffff), v);
+}
+
+/*
+ * AST2100/2150 DLL CBR Setting
+ */
+#define CBR_SIZE_AST2150 ((16 << 10) - 1)
+#define CBR_PASSNUM_AST2150 5
+#define CBR_THRESHOLD_AST2150 10
+#define CBR_THRESHOLD2_AST2150 10
+#define TIMEOUT_AST2150 5000000
+
+#define CBR_PATNUM_AST2150 8
+
+static const u32 pattern_AST2150[14] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0xFFFE0001,
+ 0x683501FE,
+ 0x0F1929B0,
+ 0x2D0B4346,
+ 0x60767F02,
+ 0x6FBE36A6,
+ 0x3A253035,
+ 0x3019686D,
+ 0x41C6167E,
+ 0x620152BF,
+ 0x20F050E0
+};
+
+static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+
+#if 0 /* unused in DDX driver - here for completeness */
+static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x40;
+ if (++timeout > TIMEOUT_AST2150) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0xffffffff;
+ }
+ } while (!data);
+ data = (mindwm(ast, 0x1e6e0070) & 0x80) >> 7;
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return data;
+}
+#endif
+
+static int cbrtest_ast2150(struct ast_private *ast)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ if (mmctestburst2_ast2150(ast, i))
+ return 0;
+ return 1;
+}
+
+static int cbrscan_ast2150(struct ast_private *ast, int busw)
+{
+ u32 patcnt, loop;
+
+ for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) {
+ if (cbrtest_ast2150(ast))
+ break;
+ }
+ if (loop == CBR_PASSNUM_AST2150)
+ return 0;
+ }
+ return 1;
+}
+
+
+static void cbrdlli_ast2150(struct ast_private *ast, int busw)
+{
+ u32 dll_min[4], dll_max[4], dlli, data, passcnt;
+
+cbr_start:
+ dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff;
+ dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0;
+ passcnt = 0;
+
+ for (dlli = 0; dlli < 100; dlli++) {
+ moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+ data = cbrscan_ast2150(ast, busw);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dll_min[0] > dlli)
+ dll_min[0] = dlli;
+ if (dll_max[0] < dlli)
+ dll_max[0] = dlli;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+ }
+ if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150)
+ goto cbr_start;
+
+ dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4);
+ moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24));
+}
+
+
+
+static void ast_init_dram_reg(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ u8 j;
+ u32 data, temp, i;
+ const struct ast_dramstruct *dram_reg_info;
+
+ j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+
+ if ((j & 0x80) == 0) { /* VGA only */
+ if (ast->chip == AST2000) {
+ dram_reg_info = ast2000_dram_table_data;
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x10100, 0xa8);
+
+ do {
+ ;
+ } while (ast_read32(ast, 0x10100) != 0xa8);
+ } else {/* AST2100/1100 */
+ if (ast->chip == AST2100 || ast->chip == 2200)
+ dram_reg_info = ast2100_dram_table_data;
+ else
+ dram_reg_info = ast1100_dram_table_data;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688A8A8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x01);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x01);
+ }
+
+ while (dram_reg_info->index != 0xffff) {
+ if (dram_reg_info->index == 0xff00) {/* delay fn */
+ for (i = 0; i < 15; i++)
+ udelay(dram_reg_info->data);
+ } else if (dram_reg_info->index == 0x4 && ast->chip != AST2000) {
+ data = dram_reg_info->data;
+ if (ast->dram_type == AST_DRAM_1Gx16)
+ data = 0x00000d89;
+ else if (ast->dram_type == AST_DRAM_1Gx32)
+ data = 0x00000c8d;
+
+ temp = ast_read32(ast, 0x12070);
+ temp &= 0xc;
+ temp <<= 2;
+ ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp);
+ } else
+ ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data);
+ dram_reg_info++;
+ }
+
+ /* AST 2100/2150 DRAM calibration */
+ data = ast_read32(ast, 0x10120);
+ if (data == 0x5061) { /* 266Mhz */
+ data = ast_read32(ast, 0x10004);
+ if (data & 0x40)
+ cbrdlli_ast2150(ast, 16); /* 16 bits */
+ else
+ cbrdlli_ast2150(ast, 32); /* 32 bits */
+ }
+
+ switch (ast->chip) {
+ case AST2000:
+ temp = ast_read32(ast, 0x10140);
+ ast_write32(ast, 0x10140, temp | 0x40);
+ break;
+ case AST1100:
+ case AST2100:
+ case AST2200:
+ case AST2150:
+ temp = ast_read32(ast, 0x1200c);
+ ast_write32(ast, 0x1200c, temp & 0xfffffffd);
+ temp = ast_read32(ast, 0x12040);
+ ast_write32(ast, 0x12040, temp | 0x40);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* wait ready */
+ do {
+ j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ } while ((j & 0x40) == 0);
+}
+
+void ast_post_gpu(struct drm_device *dev)
+{
+ u32 reg;
+ struct ast_private *ast = dev->dev_private;
+
+ pci_read_config_dword(ast->dev->pdev, 0x04, &reg);
+ reg |= 0x3;
+ pci_write_config_dword(ast->dev->pdev, 0x04, reg);
+
+ ast_enable_vga(dev);
+ ast_open_key(ast);
+ ast_set_def_ext_reg(dev);
+
+ if (ast->chip == AST2300)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
+}
+
+/* AST 2300 DRAM settings */
+#define AST_DDR3 0
+#define AST_DDR2 1
+
+struct ast2300_dram_param {
+ u32 dram_type;
+ u32 dram_chipid;
+ u32 dram_freq;
+ u32 vram_size;
+ u32 odt;
+ u32 wodt;
+ u32 rodt;
+ u32 dram_config;
+ u32 reg_PERIOD;
+ u32 reg_MADJ;
+ u32 reg_SADJ;
+ u32 reg_MRS;
+ u32 reg_EMRS;
+ u32 reg_AC1;
+ u32 reg_AC2;
+ u32 reg_DQSIC;
+ u32 reg_DRV;
+ u32 reg_IOZ;
+ u32 reg_DQIDLY;
+ u32 reg_FREQ;
+ u32 madj_max;
+ u32 dll2_finetune_step;
+};
+
+/*
+ * DQSI DLL CBR Setting
+ */
+#define CBR_SIZE1 ((4 << 10) - 1)
+#define CBR_SIZE2 ((64 << 10) - 1)
+#define CBR_PASSNUM 5
+#define CBR_PASSNUM2 5
+#define CBR_THRESHOLD 10
+#define CBR_THRESHOLD2 10
+#define TIMEOUT 5000000
+#define CBR_PATNUM 8
+
+static const u32 pattern[8] = {
+ 0xFF00FF00,
+ 0xCC33CC33,
+ 0xAA55AA55,
+ 0x88778877,
+ 0x92CC4D6E,
+ 0x543D3CDE,
+ 0xF1E843C7,
+ 0x7C61D253
+};
+
+#if 0 /* unused in DDX, included for completeness */
+static int mmc_test_burst(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x000000c1 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x3000;
+ if (data & 0x2000) {
+ return 0;
+ }
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 0;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ return 1;
+}
+#endif
+
+static int mmc_test_burst2(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000041 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return -1;
+ }
+ } while (!data);
+ data = mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return data;
+}
+
+#if 0 /* Unused in DDX here for completeness */
+static int mmc_test_single(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x000000c5 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x3000;
+ if (data & 0x2000)
+ return 0;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return 0;
+ }
+ } while (!data);
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return 1;
+}
+#endif
+
+static int mmc_test_single2(struct ast_private *ast, u32 datagen)
+{
+ u32 data, timeout;
+
+ moutdwm(ast, 0x1e6e0070, 0x00000000);
+ moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3));
+ timeout = 0;
+ do {
+ data = mindwm(ast, 0x1e6e0070) & 0x1000;
+ if (++timeout > TIMEOUT) {
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return -1;
+ }
+ } while (!data);
+ data = mindwm(ast, 0x1e6e0078);
+ data = (data | (data >> 16)) & 0xffff;
+ moutdwm(ast, 0x1e6e0070, 0x0);
+ return data;
+}
+
+static int cbr_test(struct ast_private *ast)
+{
+ u32 data;
+ int i;
+ data = mmc_test_single2(ast, 0);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ for (i = 0; i < 8; i++) {
+ data = mmc_test_burst2(ast, i);
+ if ((data & 0xff) && (data & 0xff00))
+ return 0;
+ }
+ if (!data)
+ return 3;
+ else if (data & 0xff)
+ return 2;
+ return 1;
+}
+
+static int cbr_scan(struct ast_private *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 3;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ if ((data = cbr_test(ast)) != 0) {
+ data2 &= data;
+ if (!data2)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+static u32 cbr_test2(struct ast_private *ast)
+{
+ u32 data;
+
+ data = mmc_test_burst2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+ data |= mmc_test_single2(ast, 0);
+ if (data == 0xffff)
+ return 0;
+
+ return ~data & 0xffff;
+}
+
+static u32 cbr_scan2(struct ast_private *ast)
+{
+ u32 data, data2, patcnt, loop;
+
+ data2 = 0xffff;
+ for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) {
+ moutdwm(ast, 0x1e6e007c, pattern[patcnt]);
+ for (loop = 0; loop < CBR_PASSNUM2; loop++) {
+ if ((data = cbr_test2(ast)) != 0) {
+ data2 &= data;
+ if (!data)
+ return 0;
+ break;
+ }
+ }
+ if (loop == CBR_PASSNUM2)
+ return 0;
+ }
+ return data2;
+}
+
+#if 0 /* unused in DDX - added for completeness */
+static void finetuneDQI(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+ gold_sadj[0] = (mindwm(ast, 0x1E6E0024) >> 16) & 0xffff;
+ gold_sadj[1] = gold_sadj[0] >> 8;
+ gold_sadj[0] = gold_sadj[0] & 0xff;
+ gold_sadj[0] = (gold_sadj[0] + gold_sadj[1]) >> 1;
+ gold_sadj[1] = gold_sadj[0];
+
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 3) {
+ dlli = 3;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[1] >= dlli) {
+ dlli = (gold_sadj[1] - dlli) >> 1;
+ if (dlli > 3) {
+ dlli = 3;
+ } else {
+ dlli = (dlli - 1) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[1]) >> 1;
+ dlli += 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI */
+#endif
+
+static void finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt;
+
+FINETUNE_START:
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE1);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ passcnt = 0;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ gold_sadj[0] += dllmin[cnt];
+ passcnt++;
+ }
+ }
+ if (passcnt != 16) {
+ goto FINETUNE_START;
+ }
+ gold_sadj[0] = gold_sadj[0] >> 4;
+ gold_sadj[1] = gold_sadj[0];
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[0] >= dlli) {
+ dlli = ((gold_sadj[0] - dlli) * 19) >> 5;
+ if (dlli > 3) {
+ dlli = 3;
+ }
+ } else {
+ dlli = ((dlli - gold_sadj[0]) * 19) >> 5;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = dllmin[cnt];
+ if (gold_sadj[1] >= dlli) {
+ dlli = ((gold_sadj[1] - dlli) * 19) >> 5;
+ if (dlli > 3) {
+ dlli = 3;
+ } else {
+ dlli = (dlli - 1) & 0x7;
+ }
+ } else {
+ dlli = ((dlli - gold_sadj[1]) * 19) >> 5;
+ dlli += 1;
+ if (dlli > 4) {
+ dlli = 4;
+ }
+ dlli = (8 - dlli) & 0x7;
+ }
+ data |= dlli << 21;
+ }
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L */
+
+static void finetuneDQI_L2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, data2;
+
+ for (cnt = 0; cnt < 16; cnt++) {
+ dllmin[cnt] = 0xff;
+ dllmax[cnt] = 0x0;
+ }
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan2(ast);
+ if (data != 0) {
+ mask = 0x00010001;
+ for (cnt = 0; cnt < 16; cnt++) {
+ if (data & mask) {
+ if (dllmin[cnt] > dlli) {
+ dllmin[cnt] = dlli;
+ }
+ if (dllmax[cnt] < dlli) {
+ dllmax[cnt] = dlli;
+ }
+ }
+ mask <<= 1;
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD2) {
+ break;
+ }
+ }
+ gold_sadj[0] = 0x0;
+ gold_sadj[1] = 0xFF;
+ for (cnt = 0; cnt < 8; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ if (gold_sadj[0] < dllmin[cnt]) {
+ gold_sadj[0] = dllmin[cnt];
+ }
+ if (gold_sadj[1] > dllmax[cnt]) {
+ gold_sadj[1] = dllmax[cnt];
+ }
+ }
+ }
+ gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+ gold_sadj[1] = mindwm(ast, 0x1E6E0080);
+
+ data = 0;
+ for (cnt = 0; cnt < 8; cnt++) {
+ data >>= 3;
+ data2 = gold_sadj[1] & 0x7;
+ gold_sadj[1] >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 3) {
+ data2 = (data2 + dlli) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 4) {
+ data2 = (data2 - dlli) & 0x7;
+ }
+ }
+ }
+ data |= data2 << 21;
+ }
+ moutdwm(ast, 0x1E6E0080, data);
+
+ gold_sadj[0] = 0x0;
+ gold_sadj[1] = 0xFF;
+ for (cnt = 8; cnt < 16; cnt++) {
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ if (gold_sadj[0] < dllmin[cnt]) {
+ gold_sadj[0] = dllmin[cnt];
+ }
+ if (gold_sadj[1] > dllmax[cnt]) {
+ gold_sadj[1] = dllmax[cnt];
+ }
+ }
+ }
+ gold_sadj[0] = (gold_sadj[1] + gold_sadj[0]) >> 1;
+ gold_sadj[1] = mindwm(ast, 0x1E6E0084);
+
+ data = 0;
+ for (cnt = 8; cnt < 16; cnt++) {
+ data >>= 3;
+ data2 = gold_sadj[1] & 0x7;
+ gold_sadj[1] >>= 3;
+ if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) {
+ dlli = (dllmin[cnt] + dllmax[cnt]) >> 1;
+ if (gold_sadj[0] >= dlli) {
+ dlli = (gold_sadj[0] - dlli) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 3) {
+ data2 = (data2 + dlli) & 0x7;
+ }
+ } else {
+ dlli = (dlli - gold_sadj[0]) >> 1;
+ if (dlli > 0) {
+ dlli = 1;
+ }
+ if (data2 != 4) {
+ data2 = (data2 - dlli) & 0x7;
+ }
+ }
+ }
+ data |= data2 << 21;
+ }
+ moutdwm(ast, 0x1E6E0084, data);
+
+} /* finetuneDQI_L2 */
+
+static void cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 dllmin[2], dllmax[2], dlli, data, data2, passcnt;
+
+
+ finetuneDQI_L(ast, param);
+ finetuneDQI_L2(ast, param);
+
+CBR_START2:
+ dllmin[0] = dllmin[1] = 0xff;
+ dllmax[0] = dllmax[1] = 0x0;
+ passcnt = 0;
+ for (dlli = 0; dlli < 76; dlli++) {
+ moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24));
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+
+ moutdwm(ast, 0x1E6E0074, CBR_SIZE2);
+ data = cbr_scan(ast);
+ if (data != 0) {
+ if (data & 0x1) {
+ if (dllmin[0] > dlli) {
+ dllmin[0] = dlli;
+ }
+ if (dllmax[0] < dlli) {
+ dllmax[0] = dlli;
+ }
+ }
+ if (data & 0x2) {
+ if (dllmin[1] > dlli) {
+ dllmin[1] = dlli;
+ }
+ if (dllmax[1] < dlli) {
+ dllmax[1] = dlli;
+ }
+ }
+ passcnt++;
+ } else if (passcnt >= CBR_THRESHOLD) {
+ break;
+ }
+ }
+ if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) {
+ goto CBR_START2;
+ }
+ if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) {
+ goto CBR_START2;
+ }
+ dlli = (dllmin[1] + dllmax[1]) >> 1;
+ dlli <<= 8;
+ dlli += (dllmin[0] + dllmax[0]) >> 1;
+ moutdwm(ast, 0x1E6E0068, (mindwm(ast, 0x1E6E0068) & 0xFFFF) | (dlli << 16));
+
+ data = (mindwm(ast, 0x1E6E0080) >> 24) & 0x1F;
+ data2 = (mindwm(ast, 0x1E6E0018) & 0xff80ffff) | (data << 16);
+ moutdwm(ast, 0x1E6E0018, data2);
+ moutdwm(ast, 0x1E6E0024, 0x8001 | (data << 1) | (param->dll2_finetune_step << 8));
+
+ /* Wait DQSI latch phase calibration */
+ moutdwm(ast, 0x1E6E0074, 0x00000010);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x00000003);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+} /* CBRDLL2 */
+
+static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = 0x00020000 + (trap << 16);
+ trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19);
+ trap_MRS = 0x00000010 + (trap << 4);
+ trap_MRS |= ((trap & 0x2) << 18);
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 336:
+ moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 0;
+ param->reg_AC1 = 0x22202725;
+ param->reg_AC2 = 0xAA007613 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x04001400 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ break;
+ default:
+ case 396:
+ moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+ break;
+
+ case 408:
+ moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302825;
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x04001600 | trap_MRS;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DRV = 0x000000FA;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ default:
+ case AST_DRAM_512Mx16:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC009617 | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC009622 | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00963F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xCD44961A;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00081830;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 4;
+ break;
+ case 504:
+ moutdwm(ast, 0x1E6E2020, 0x0270);
+ param->wodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xDE44A61D;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000000;
+ param->reg_IOZ = 0x070000BB;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 4;
+ break;
+ case 528:
+ moutdwm(ast, 0x1E6E2020, 0x0290);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302926;
+ param->reg_AC2 = 0xEF44B61E;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00081A30;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000088;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302A37;
+ param->reg_AC2 = 0xEF56B61E;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 136;
+ param->dll2_finetune_step = 3;
+ break;
+ case 600:
+ moutdwm(ast, 0x1E6E2020, 0x02E1);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xDF56B61F;
+ param->reg_DQSIC = 0x0000014D;
+ param->reg_MRS = 0x00101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000023;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000058C0;
+ param->madj_max = 132;
+ param->dll2_finetune_step = 3;
+ break;
+ case 624:
+ moutdwm(ast, 0x1E6E2020, 0x0160);
+ param->reg_MADJ = 0x00136868;
+ param->reg_SADJ = 0x00004534;
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x32302A37;
+ param->reg_AC2 = 0xEF56B621;
+ param->reg_DQSIC = 0x0000015A;
+ param->reg_MRS = 0x02101A50;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000078;
+ param->reg_FREQ = 0x000059C0;
+ param->madj_max = 128;
+ param->dll2_finetune_step = 3;
+ break;
+ } /* switch freq */
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x130;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x131;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x132;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x133;
+ break;
+ }; /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case AST_VIDMEM_SIZE_8M:
+ param->dram_config |= 0x00;
+ break;
+ case AST_VIDMEM_SIZE_16M:
+ param->dram_config |= 0x04;
+ break;
+ case AST_VIDMEM_SIZE_32M:
+ param->dram_config |= 0x08;
+ break;
+ case AST_VIDMEM_SIZE_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+
+}
+
+static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2;
+
+ moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ moutdwm(ast, 0x1E6E0018, 0x00000100);
+ moutdwm(ast, 0x1E6E0024, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ moutdwm(ast, 0x1E6E0004, param->dram_config);
+ moutdwm(ast, 0x1E6E0008, 0x90040f);
+ moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ moutdwm(ast, 0x1E6E0080, 0x00000000);
+ moutdwm(ast, 0x1E6E0084, 0x00000000);
+ moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ moutdwm(ast, 0x1E6E0018, 0x4040A170);
+ moutdwm(ast, 0x1E6E0018, 0x20402370);
+ moutdwm(ast, 0x1E6E0038, 0x00000000);
+ moutdwm(ast, 0x1E6E0040, 0xFF444444);
+ moutdwm(ast, 0x1E6E0044, 0x22222222);
+ moutdwm(ast, 0x1E6E0048, 0x22222222);
+ moutdwm(ast, 0x1E6E004C, 0x00000002);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+ moutdwm(ast, 0x1E6E0054, 0);
+ moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0074, 0x00000000);
+ moutdwm(ast, 0x1E6E0078, 0x00000000);
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max) {
+ break;
+ }
+ moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000) {
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ } else {
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ }
+ data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ data = mindwm(ast, 0x1E6E0018) | 0xC00;
+ moutdwm(ast, 0x1E6E0018, data);
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00000040);
+ udelay(50);
+ /* Mode Register Setting */
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000005);
+ moutdwm(ast, 0x1E6E0028, 0x00000007);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt) {
+ data = 0x300;
+ }
+ if (param->rodt) {
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ }
+ moutdwm(ast, 0x1E6E0034, data | 0x3);
+
+ /* Wait DQI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0080);
+ } while (!(data & 0x40000000));
+ /* Wait DQSI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0020);
+ } while (!(data & 0x00000800));
+ /* Calibrate the DQSI delay */
+ cbr_dll2(ast, param);
+
+ moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+ /* ECC Memory Initialization */
+#ifdef ECC
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+
+}
+
+static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 trap, trap_AC2, trap_MRS;
+
+ moutdwm(ast, 0x1E6E2000, 0x1688A8A8);
+
+ /* Ger trap info */
+ trap = (mindwm(ast, 0x1E6E2070) >> 25) & 0x3;
+ trap_AC2 = (trap << 20) | (trap << 16);
+ trap_AC2 += 0x00110000;
+ trap_MRS = 0x00000040 | (trap << 4);
+
+
+ param->reg_MADJ = 0x00034C4C;
+ param->reg_SADJ = 0x00001800;
+ param->reg_DRV = 0x000000F0;
+ param->reg_PERIOD = param->dram_freq;
+ param->rodt = 0;
+
+ switch (param->dram_freq) {
+ case 264:
+ moutdwm(ast, 0x1E6E2020, 0x0130);
+ param->wodt = 0;
+ param->reg_AC1 = 0x11101513;
+ param->reg_AC2 = 0x78117011;
+ param->reg_DQSIC = 0x00000092;
+ param->reg_MRS = 0x00000842;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x000000F0;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x0000005A;
+ param->reg_FREQ = 0x00004AC0;
+ param->madj_max = 138;
+ param->dll2_finetune_step = 3;
+ break;
+ case 336:
+ moutdwm(ast, 0x1E6E2020, 0x0190);
+ param->wodt = 1;
+ param->reg_AC1 = 0x22202613;
+ param->reg_AC2 = 0xAA009016 | trap_AC2;
+ param->reg_DQSIC = 0x000000BA;
+ param->reg_MRS = 0x00000A02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000074;
+ param->reg_FREQ = 0x00004DC0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 3;
+ break;
+ default:
+ case 396:
+ moutdwm(ast, 0x1E6E2020, 0x03F1);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+
+ case 408:
+ moutdwm(ast, 0x1E6E2020, 0x01F0);
+ param->wodt = 1;
+ param->rodt = 0;
+ param->reg_AC1 = 0x33302714;
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ param->reg_DQSIC = 0x000000E2;
+ param->reg_MRS = 0x00000C02 | trap_MRS;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x000000FA;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000089;
+ param->reg_FREQ = 0x000050C0;
+ param->madj_max = 96;
+ param->dll2_finetune_step = 4;
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->reg_AC2 = 0xCC00B016 | trap_AC2;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->reg_AC2 = 0xCC00B01B | trap_AC2;
+ break;
+ case AST_DRAM_2Gx16:
+ param->reg_AC2 = 0xCC00B02B | trap_AC2;
+ break;
+ case AST_DRAM_4Gx16:
+ param->reg_AC2 = 0xCC00B03F | trap_AC2;
+ break;
+ }
+
+ break;
+ case 456:
+ moutdwm(ast, 0x1E6E2020, 0x0230);
+ param->wodt = 0;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xCD44B01E;
+ param->reg_DQSIC = 0x000000FC;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000000;
+ param->reg_DRV = 0x00000000;
+ param->reg_IOZ = 0x00000034;
+ param->reg_DQIDLY = 0x00000097;
+ param->reg_FREQ = 0x000052C0;
+ param->madj_max = 88;
+ param->dll2_finetune_step = 3;
+ break;
+ case 504:
+ moutdwm(ast, 0x1E6E2020, 0x0261);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xDE44C022;
+ param->reg_DQSIC = 0x00000117;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A0;
+ param->reg_FREQ = 0x000054C0;
+ param->madj_max = 79;
+ param->dll2_finetune_step = 3;
+ break;
+ case 528:
+ moutdwm(ast, 0x1E6E2020, 0x0120);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x33302815;
+ param->reg_AC2 = 0xEF44D024;
+ param->reg_DQSIC = 0x00000125;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F9;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000A7;
+ param->reg_FREQ = 0x000055C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 552:
+ moutdwm(ast, 0x1E6E2020, 0x02A1);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E025;
+ param->reg_DQSIC = 0x00000132;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000040;
+ param->reg_DRV = 0x0000000A;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000AD;
+ param->reg_FREQ = 0x000056C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ case 576:
+ moutdwm(ast, 0x1E6E2020, 0x0140);
+ param->wodt = 1;
+ param->rodt = 1;
+ param->reg_AC1 = 0x43402915;
+ param->reg_AC2 = 0xFF44E027;
+ param->reg_DQSIC = 0x0000013F;
+ param->reg_MRS = 0x00000E72;
+ param->reg_EMRS = 0x00000004;
+ param->reg_DRV = 0x000000F5;
+ param->reg_IOZ = 0x00000045;
+ param->reg_DQIDLY = 0x000000B3;
+ param->reg_FREQ = 0x000057C0;
+ param->madj_max = 76;
+ param->dll2_finetune_step = 3;
+ break;
+ }
+
+ switch (param->dram_chipid) {
+ case AST_DRAM_512Mx16:
+ param->dram_config = 0x100;
+ break;
+ default:
+ case AST_DRAM_1Gx16:
+ param->dram_config = 0x121;
+ break;
+ case AST_DRAM_2Gx16:
+ param->dram_config = 0x122;
+ break;
+ case AST_DRAM_4Gx16:
+ param->dram_config = 0x123;
+ break;
+ }; /* switch size */
+
+ switch (param->vram_size) {
+ default:
+ case AST_VIDMEM_SIZE_8M:
+ param->dram_config |= 0x00;
+ break;
+ case AST_VIDMEM_SIZE_16M:
+ param->dram_config |= 0x04;
+ break;
+ case AST_VIDMEM_SIZE_32M:
+ param->dram_config |= 0x08;
+ break;
+ case AST_VIDMEM_SIZE_64M:
+ param->dram_config |= 0x0c;
+ break;
+ }
+}
+
+static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
+{
+ u32 data, data2;
+
+ moutdwm(ast, 0x1E6E0000, 0xFC600309);
+ moutdwm(ast, 0x1E6E0018, 0x00000100);
+ moutdwm(ast, 0x1E6E0024, 0x00000000);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ);
+ moutdwm(ast, 0x1E6E0068, param->reg_SADJ);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000);
+ udelay(10);
+
+ moutdwm(ast, 0x1E6E0004, param->dram_config);
+ moutdwm(ast, 0x1E6E0008, 0x90040f);
+ moutdwm(ast, 0x1E6E0010, param->reg_AC1);
+ moutdwm(ast, 0x1E6E0014, param->reg_AC2);
+ moutdwm(ast, 0x1E6E0020, param->reg_DQSIC);
+ moutdwm(ast, 0x1E6E0080, 0x00000000);
+ moutdwm(ast, 0x1E6E0084, 0x00000000);
+ moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY);
+ moutdwm(ast, 0x1E6E0018, 0x4040A130);
+ moutdwm(ast, 0x1E6E0018, 0x20402330);
+ moutdwm(ast, 0x1E6E0038, 0x00000000);
+ moutdwm(ast, 0x1E6E0040, 0xFF808000);
+ moutdwm(ast, 0x1E6E0044, 0x88848466);
+ moutdwm(ast, 0x1E6E0048, 0x44440008);
+ moutdwm(ast, 0x1E6E004C, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+ moutdwm(ast, 0x1E6E0054, 0);
+ moutdwm(ast, 0x1E6E0060, param->reg_DRV);
+ moutdwm(ast, 0x1E6E006C, param->reg_IOZ);
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0074, 0x00000000);
+ moutdwm(ast, 0x1E6E0078, 0x00000000);
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+
+ /* Wait MCLK2X lock to MCLK */
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) {
+ data2 = (mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4;
+ if ((data2 & 0xff) > param->madj_max) {
+ break;
+ }
+ moutdwm(ast, 0x1E6E0064, data2);
+ if (data2 & 0x00100000) {
+ data2 = ((data2 & 0xff) >> 3) + 3;
+ } else {
+ data2 = ((data2 & 0xff) >> 2) + 5;
+ }
+ data = mindwm(ast, 0x1E6E0068) & 0xffff00ff;
+ data2 += data & 0xff;
+ data = data | (data2 << 8);
+ moutdwm(ast, 0x1E6E0068, data);
+ udelay(10);
+ moutdwm(ast, 0x1E6E0064, mindwm(ast, 0x1E6E0064) | 0xC0000);
+ udelay(10);
+ data = mindwm(ast, 0x1E6E0018) & 0xfffff1ff;
+ moutdwm(ast, 0x1E6E0018, data);
+ data = data | 0x200;
+ moutdwm(ast, 0x1E6E0018, data);
+ do {
+ data = mindwm(ast, 0x1E6E001C);
+ } while (!(data & 0x08000000));
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00005C04);
+ udelay(10);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ moutdwm(ast, 0x1E6E0034, 0x00000000);
+ data = mindwm(ast, 0x1E6E001C);
+ data = (data >> 8) & 0xff;
+ }
+ data = mindwm(ast, 0x1E6E0018) | 0xC00;
+ moutdwm(ast, 0x1E6E0018, data);
+
+ moutdwm(ast, 0x1E6E0034, 0x00000001);
+ moutdwm(ast, 0x1E6E000C, 0x00000000);
+ udelay(50);
+ /* Mode Register Setting */
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000005);
+ moutdwm(ast, 0x1E6E0028, 0x00000007);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+
+ moutdwm(ast, 0x1E6E000C, 0x00005C08);
+ moutdwm(ast, 0x1E6E002C, param->reg_MRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000001);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+ moutdwm(ast, 0x1E6E0030, param->reg_EMRS);
+ moutdwm(ast, 0x1E6E0028, 0x00000003);
+
+ moutdwm(ast, 0x1E6E000C, 0x7FFF5C01);
+ data = 0;
+ if (param->wodt) {
+ data = 0x500;
+ }
+ if (param->rodt) {
+ data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3);
+ }
+ moutdwm(ast, 0x1E6E0034, data | 0x3);
+ moutdwm(ast, 0x1E6E0120, param->reg_FREQ);
+
+ /* Wait DQI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0080);
+ } while (!(data & 0x40000000));
+ /* Wait DQSI delay lock */
+ do {
+ data = mindwm(ast, 0x1E6E0020);
+ } while (!(data & 0x00000800));
+ /* Calibrate the DQSI delay */
+ cbr_dll2(ast, param);
+
+ /* ECC Memory Initialization */
+#ifdef ECC
+ moutdwm(ast, 0x1E6E007C, 0x00000000);
+ moutdwm(ast, 0x1E6E0070, 0x221);
+ do {
+ data = mindwm(ast, 0x1E6E0070);
+ } while (!(data & 0x00001000));
+ moutdwm(ast, 0x1E6E0070, 0x00000000);
+ moutdwm(ast, 0x1E6E0050, 0x80000000);
+ moutdwm(ast, 0x1E6E0050, 0x00000000);
+#endif
+
+}
+
+static void ast_init_dram_2300(struct drm_device *dev)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast2300_dram_param param;
+ u32 temp;
+ u8 reg;
+
+ reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ if ((reg & 0x80) == 0) {/* vga only */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ ast_write32(ast, 0x12000, 0x1688a8a8);
+ do {
+ ;
+ } while (ast_read32(ast, 0x12000) != 0x1);
+
+ ast_write32(ast, 0x10000, 0xfc600309);
+ do {
+ ;
+ } while (ast_read32(ast, 0x10000) != 0x1);
+
+ /* Slow down CPU/AHB CLK in VGA only mode */
+ temp = ast_read32(ast, 0x12008);
+ temp |= 0x73;
+ ast_write32(ast, 0x12008, temp);
+
+ param.dram_type = AST_DDR3;
+ if (temp & 0x01000000)
+ param.dram_type = AST_DDR2;
+ param.dram_chipid = ast->dram_type;
+ param.dram_freq = ast->mclk;
+ param.vram_size = ast->vram_size;
+
+ if (param.dram_type == AST_DDR3) {
+ get_ddr3_info(ast, &param);
+ ddr3_init(ast, &param);
+ } else {
+ get_ddr2_info(ast, &param);
+ ddr2_init(ast, &param);
+ }
+
+ temp = mindwm(ast, 0x1e6e2040);
+ moutdwm(ast, 0x1e6e2040, temp | 0x40);
+ }
+
+ /* wait ready */
+ do {
+ reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ } while ((reg & 0x40) == 0);
+}
+
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
new file mode 100644
index 000000000000..95fa6aba26bc
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of the authors not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission. The authors makes no representations
+ * about the suitability of this software for any purpose. It is provided
+ * "as is" without express or implied warranty.
+ *
+ * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/* Ported from xf86-video-ast driver */
+
+#ifndef AST_TABLES_H
+#define AST_TABLES_H
+
+/* Std. Table Index Definition */
+#define TextModeIndex 0
+#define EGAModeIndex 1
+#define VGAModeIndex 2
+#define HiCModeIndex 3
+#define TrueCModeIndex 4
+
+#define Charx8Dot 0x00000001
+#define HalfDCLK 0x00000002
+#define DoubleScanMode 0x00000004
+#define LineCompareOff 0x00000008
+#define SyncPP 0x00000000
+#define SyncPN 0x00000040
+#define SyncNP 0x00000080
+#define SyncNN 0x000000C0
+#define HBorder 0x00000020
+#define VBorder 0x00000010
+#define WideScreenMode 0x00000100
+
+
+/* DCLK Index */
+#define VCLK25_175 0x00
+#define VCLK28_322 0x01
+#define VCLK31_5 0x02
+#define VCLK36 0x03
+#define VCLK40 0x04
+#define VCLK49_5 0x05
+#define VCLK50 0x06
+#define VCLK56_25 0x07
+#define VCLK65 0x08
+#define VCLK75 0x09
+#define VCLK78_75 0x0A
+#define VCLK94_5 0x0B
+#define VCLK108 0x0C
+#define VCLK135 0x0D
+#define VCLK157_5 0x0E
+#define VCLK162 0x0F
+/* #define VCLK193_25 0x10 */
+#define VCLK154 0x10
+#define VCLK83_5 0x11
+#define VCLK106_5 0x12
+#define VCLK146_25 0x13
+#define VCLK148_5 0x14
+
+static struct ast_vbios_dclk_info dclk_table[] = {
+ {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
+ {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
+ {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
+ {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+};
+
+static struct ast_vbios_stdtable vbios_stdtable[] = {
+ /* MD_2_3_400 */
+ {
+ 0x67,
+ {0x00,0x03,0x00,0x02},
+ {0x5f,0x4f,0x50,0x82,0x55,0x81,0xbf,0x1f,
+ 0x00,0x4f,0x0d,0x0e,0x00,0x00,0x00,0x00,
+ 0x9c,0x8e,0x8f,0x28,0x1f,0x96,0xb9,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+ 0x0c,0x00,0x0f,0x08},
+ {0x00,0x00,0x00,0x00,0x00,0x10,0x0e,0x00,
+ 0xff}
+ },
+ /* Mode12/ExtEGATable */
+ {
+ 0xe3,
+ {0x01,0x0f,0x00,0x06},
+ {0x5f,0x4f,0x50,0x82,0x55,0x81,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xe9,0x8b,0xdf,0x28,0x00,0xe7,0x04,0xe3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x14,0x07,
+ 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f,
+ 0x01,0x00,0x0f,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtVGATable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtHiCTable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+ /* ExtTrueCTable */
+ {
+ 0x2f,
+ {0x01,0x0f,0x00,0x0e},
+ {0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
+ 0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+ 0xff},
+ {0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
+ 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
+ 0x01,0x00,0x00,0x00},
+ {0x00,0x00,0x00,0x00,0x00,0x00,0x05,0x0f,
+ 0xff}
+ },
+};
+
+static struct ast_vbios_enhtable res_640x480[] = {
+ { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E },
+ { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */
+ (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E },
+ { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */
+ (SyncNN | Charx8Dot) , 75, 3, 0x2E },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */
+ (SyncNN | Charx8Dot) , 85, 4, 0x2E },
+ { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */
+ (SyncNN | Charx8Dot) , 0xFF, 4, 0x2E },
+};
+
+static struct ast_vbios_enhtable res_800x600[] = {
+ {1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */
+ (SyncPP | Charx8Dot), 56, 1, 0x30 },
+ {1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 2, 0x30 },
+ {1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */
+ (SyncPP | Charx8Dot), 72, 3, 0x30 },
+ {1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 4, 0x30 },
+ {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */
+ (SyncPP | Charx8Dot), 84, 5, 0x30 },
+ {1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 5, 0x30 },
+};
+
+
+static struct ast_vbios_enhtable res_1024x768[] = {
+ {1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */
+ (SyncNN | Charx8Dot), 60, 1, 0x31 },
+ {1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */
+ (SyncNN | Charx8Dot), 70, 2, 0x31 },
+ {1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 3, 0x31 },
+ {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */
+ (SyncPP | Charx8Dot), 84, 4, 0x31 },
+ {1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 4, 0x31 },
+};
+
+static struct ast_vbios_enhtable res_1280x1024[] = {
+ {1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x32 },
+ {1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */
+ (SyncPP | Charx8Dot), 75, 2, 0x32 },
+ {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */
+ (SyncPP | Charx8Dot), 85, 3, 0x32 },
+ {1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 3, 0x32 },
+};
+
+static struct ast_vbios_enhtable res_1600x1200[] = {
+ {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */
+ (SyncPP | Charx8Dot), 60, 1, 0x33 },
+ {2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */
+ (SyncPP | Charx8Dot), 0xFF, 1, 0x33 },
+};
+
+static struct ast_vbios_enhtable res_1920x1200[] = {
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot), 60, 1, 0x34 },
+ {2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz */
+ (SyncNP | Charx8Dot), 0xFF, 1, 0x34 },
+};
+
+/* 16:10 */
+static struct ast_vbios_enhtable res_1280x800[] = {
+ {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x35 },
+ {1680, 1280, 72,128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x35 },
+
+};
+
+static struct ast_vbios_enhtable res_1440x900[] = {
+ {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x36 },
+ {1904, 1440, 80,152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x36 },
+};
+
+static struct ast_vbios_enhtable res_1680x1050[] = {
+ {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x37 },
+ {2240, 1680,104,176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */
+ (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x37 },
+};
+
+/* HDTV */
+static struct ast_vbios_enhtable res_1920x1080[] = {
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 60, 1, 0x38 },
+ {2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */
+ (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode), 0xFF, 1, 0x38 },
+};
+#endif
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
new file mode 100644
index 000000000000..aad12f747175
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "ast_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct ast_private *
+ast_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct ast_private, ttm.bdev);
+}
+
+static int
+ast_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+ast_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int ast_ttm_global_init(struct ast_private *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &ast_ttm_mem_global_init;
+ global_ref->release = &ast_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ ast->ttm.bo_global_ref.mem_glob =
+ ast->ttm.mem_global_ref.object;
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+ast_ttm_global_release(struct ast_private *ast)
+{
+ if (ast->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct ast_bo *bo;
+
+ bo = container_of(tbo, struct ast_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &ast_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct ast_bo *astbo = ast_bo(bo);
+
+ if (!ast_ttm_bo_is_ast_bo(bo))
+ return;
+
+ ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
+ *pl = astbo->placement;
+}
+
+static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct ast_private *ast = ast_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int ast_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void ast_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func ast_tt_backend_func = {
+ .destroy = &ast_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &ast_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int ast_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver ast_bo_driver = {
+ .ttm_tt_create = ast_ttm_tt_create,
+ .ttm_tt_populate = ast_ttm_tt_populate,
+ .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
+ .init_mem_type = ast_bo_init_mem_type,
+ .evict_flags = ast_bo_evict_flags,
+ .move = ast_bo_move,
+ .verify_access = ast_bo_verify_access,
+ .io_mem_reserve = &ast_ttm_io_mem_reserve,
+ .io_mem_free = &ast_ttm_io_mem_free,
+};
+
+int ast_mm_init(struct ast_private *ast)
+{
+ int ret;
+ struct drm_device *dev = ast->dev;
+ struct ttm_bo_device *bdev = &ast->ttm.bdev;
+
+ ret = ast_ttm_global_init(ast);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&ast->ttm.bdev,
+ ast->ttm.bo_global_ref.ref.object,
+ &ast_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ ast->vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void ast_mm_fini(struct ast_private *ast)
+{
+ struct drm_device *dev = ast->dev;
+ ttm_bo_device_release(&ast->ttm.bdev);
+
+ ast_ttm_global_release(ast);
+
+ if (ast->fb_mtrr >= 0) {
+ drm_mtrr_del(ast->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ ast->fb_mtrr = -1;
+ }
+}
+
+void ast_ttm_placement(struct ast_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void ast_bo_unreserve(struct ast_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int ast_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct ast_bo **pastbo)
+{
+ struct ast_private *ast = dev->dev_private;
+ struct ast_bo *astbo;
+ size_t acc_size;
+ int ret;
+
+ astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
+ if (!astbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &astbo->gem, size);
+ if (ret) {
+ kfree(astbo);
+ return ret;
+ }
+
+ astbo->gem.driver_private = NULL;
+ astbo->bo.bdev = &ast->ttm.bdev;
+
+ ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
+ sizeof(struct ast_bo));
+
+ ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
+ ttm_bo_type_device, &astbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ ast_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pastbo = astbo;
+ return 0;
+}
+
+static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = ast_bo_gpu_offset(bo);
+ }
+
+ ast_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = ast_bo_gpu_offset(bo);
+ return 0;
+}
+
+int ast_bo_unpin(struct ast_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ast_bo_push_sysram(struct ast_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int ast_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct ast_private *ast;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ ast = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
new file mode 100644
index 000000000000..fc154dd75296
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -0,0 +1,12 @@
+config DRM_CIRRUS_QEMU
+ tristate "Cirrus driver for QEMU emulated device"
+ depends on DRM && PCI && EXPERIMENTAL
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ This is a KMS driver for emulated cirrus device in qemu.
+ It is *NOT* intended for real cirrus devices. This requires
+ the modesetting userspace X.org driver.
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
new file mode 100644
index 000000000000..69ffe7006d55
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+cirrus-y := cirrus_main.o cirrus_mode.o \
+ cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
+
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
new file mode 100644
index 000000000000..d7038230b71e
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012 Red Hat <mjg@redhat.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "cirrus_drv.h"
+
+int cirrus_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, cirrus_modeset, int, 0400);
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+
+static struct drm_driver driver;
+
+/* only bind to the cirrus chip in qemu */
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
+ 0, 0 },
+ {0,}
+};
+
+static int __devinit
+cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static const struct file_operations cirrus_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = cirrus_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+};
+static struct drm_driver driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
+ .load = cirrus_driver_load,
+ .unload = cirrus_driver_unload,
+ .fops = &cirrus_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+ .gem_init_object = cirrus_gem_init_object,
+ .gem_free_object = cirrus_gem_free_object,
+ .dumb_create = cirrus_dumb_create,
+ .dumb_map_offset = cirrus_dumb_mmap_offset,
+ .dumb_destroy = cirrus_dumb_destroy,
+};
+
+static struct pci_driver cirrus_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = cirrus_pci_probe,
+ .remove = cirrus_pci_remove,
+};
+
+static int __init cirrus_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && cirrus_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (cirrus_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+ drm_pci_exit(&driver, &cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
new file mode 100644
index 000000000000..21bdfa8836f7
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#ifndef __CIRRUS_DRV_H__
+#define __CIRRUS_DRV_H__
+
+#include <video/vga.h>
+
+#include <drm/drm_fb_helper.h>
+
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#define DRIVER_AUTHOR "Matthew Garrett"
+
+#define DRIVER_NAME "cirrus"
+#define DRIVER_DESC "qemu Cirrus emulation"
+#define DRIVER_DATE "20110418"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define CIRRUSFB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg))
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+#define WREG_SEQ(reg, v) \
+ do { \
+ WREG8(SEQ_INDEX, reg); \
+ WREG8(SEQ_DATA, v); \
+ } while (0) \
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+#define WREG_CRT(reg, v) \
+ do { \
+ WREG8(CRT_INDEX, reg); \
+ WREG8(CRT_DATA, v); \
+ } while (0) \
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+#define WREG_GFX(reg, v) \
+ do { \
+ WREG8(GFX_INDEX, reg); \
+ WREG8(GFX_DATA, v); \
+ } while (0) \
+
+/*
+ * Cirrus has a "hidden" DAC register that can be accessed by writing to
+ * the pixel mask register to reset the state, then reading from the register
+ * four times. The next write will then pass to the DAC
+ */
+#define VGA_DAC_MASK 0x6
+
+#define WREG_HDR(v) \
+ do { \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ RREG8(VGA_DAC_MASK); \
+ WREG8(VGA_DAC_MASK, v); \
+ } while (0) \
+
+
+#define CIRRUS_MAX_FB_HEIGHT 4096
+#define CIRRUS_MAX_FB_WIDTH 4096
+
+#define CIRRUS_DPMS_CLEARED (-1)
+
+#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base)
+#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base)
+#define to_cirrus_framebuffer(x) container_of(x, struct cirrus_framebuffer, base)
+
+struct cirrus_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int last_dpms;
+ bool enabled;
+};
+
+struct cirrus_fbdev;
+struct cirrus_mode_info {
+ bool mode_config_initialized;
+ struct cirrus_crtc *crtc;
+ /* pointer to fbdev info structure */
+ struct cirrus_fbdev *gfbdev;
+};
+
+struct cirrus_encoder {
+ struct drm_encoder base;
+ int last_dpms;
+};
+
+struct cirrus_connector {
+ struct drm_connector base;
+};
+
+struct cirrus_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct cirrus_mc {
+ resource_size_t vram_size;
+ resource_size_t vram_base;
+};
+
+struct cirrus_device {
+ struct drm_device *dev;
+ unsigned long flags;
+
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ void __iomem *rmmio;
+
+ struct cirrus_mc mc;
+ struct cirrus_mode_info mode_info;
+
+ int num_crtc;
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+};
+
+
+struct cirrus_fbdev {
+ struct drm_fb_helper helper;
+ struct cirrus_framebuffer gfb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+};
+
+struct cirrus_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem)
+
+static inline struct cirrus_bo *
+cirrus_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct cirrus_bo, bo);
+}
+
+
+#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+ /* cirrus_mode.c */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+
+ /* cirrus_main.c */
+int cirrus_device_init(struct cirrus_device *cdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev,
+ uint32_t flags);
+void cirrus_device_fini(struct cirrus_device *cdev);
+int cirrus_gem_init_object(struct drm_gem_object *obj);
+void cirrus_gem_free_object(struct drm_gem_object *obj);
+int cirrus_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+int cirrus_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int cirrus_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int cirrus_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+ struct cirrus_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+ /* cirrus_display.c */
+int cirrus_modeset_init(struct cirrus_device *cdev);
+void cirrus_modeset_fini(struct cirrus_device *cdev);
+
+ /* cirrus_fbdev.c */
+int cirrus_fbdev_init(struct cirrus_device *cdev);
+void cirrus_fbdev_fini(struct cirrus_device *cdev);
+
+
+
+ /* cirrus_irq.c */
+void cirrus_driver_irq_preinstall(struct drm_device *dev);
+int cirrus_driver_irq_postinstall(struct drm_device *dev);
+void cirrus_driver_irq_uninstall(struct drm_device *dev);
+irqreturn_t cirrus_driver_irq_handler(DRM_IRQ_ARGS);
+
+ /* cirrus_kms.c */
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags);
+int cirrus_driver_unload(struct drm_device *dev);
+extern struct drm_ioctl_desc cirrus_ioctls[];
+extern int cirrus_max_ioctl;
+
+int cirrus_mm_init(struct cirrus_device *cirrus);
+void cirrus_mm_fini(struct cirrus_device *cirrus);
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct cirrus_bo **pcirrusbo);
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma);
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait);
+void cirrus_bo_unreserve(struct cirrus_bo *bo);
+int cirrus_bo_push_sysram(struct cirrus_bo *bo);
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+#endif /* __CIRRUS_DRV_H__ */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
new file mode 100644
index 000000000000..9a276a536992
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_fb_helper.h"
+
+#include <linux/fb.h>
+
+#include "cirrus_drv.h"
+
+static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct cirrus_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = afbdev->gfb.obj;
+ bo = gem_to_cirrus_bo(obj);
+
+ ret = cirrus_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ cirrus_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * afbdev->gfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ cirrus_bo_unreserve(bo);
+}
+
+static void cirrus_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_fillrect(info, rect);
+ cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void cirrus_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_copyarea(info, area);
+ cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void cirrus_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct cirrus_fbdev *afbdev = info->par;
+ sys_imageblit(info, image);
+ cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+
+static struct fb_ops cirrusfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = cirrus_fillrect,
+ .fb_copyarea = cirrus_copyarea,
+ .fb_imageblit = cirrus_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (bpp > 24)
+ return -EINVAL;
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = cirrus_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int cirrusfb_create(struct cirrus_fbdev *gfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = gfbdev->helper.dev;
+ struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct device *device = &dev->pdev->dev;
+ void *sysram;
+ struct drm_gem_object *gobj = NULL;
+ struct cirrus_bo *bo = NULL;
+ int size, ret;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ bo = gem_to_cirrus_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = gfbdev;
+
+ ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ gfbdev->sysram = sysram;
+ gfbdev->size = size;
+
+ fb = &gfbdev->gfb.base;
+ if (!fb) {
+ DRM_INFO("fb is NULL\n");
+ return -EINVAL;
+ }
+
+ /* setup helper */
+ gfbdev->helper.fb = fb;
+ gfbdev->helper.fbdev = info;
+
+ strcpy(info->fix.id, "cirrusdrmfb");
+
+
+ info->flags = FBINFO_DEFAULT;
+ info->fbops = &cirrusfb_ops;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+ info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = cdev->mc.vram_size;
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+
+ info->fix.mmio_start = 0;
+ info->fix.mmio_len = 0;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
+ DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
+ DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
+ DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO(" pitch is %d\n", fb->pitches[0]);
+
+ return 0;
+out_iounmap:
+ return ret;
+}
+
+static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size
+ *sizes)
+{
+ struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = cirrusfb_create(gfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static int cirrus_fbdev_destroy(struct drm_device *dev,
+ struct cirrus_fbdev *gfbdev)
+{
+ struct fb_info *info;
+ struct cirrus_framebuffer *gfb = &gfbdev->gfb;
+
+ if (gfbdev->helper.fbdev) {
+ info = gfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (gfb->obj) {
+ drm_gem_object_unreference_unlocked(gfb->obj);
+ gfb->obj = NULL;
+ }
+
+ vfree(gfbdev->sysram);
+ drm_fb_helper_fini(&gfbdev->helper);
+ drm_framebuffer_cleanup(&gfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
+ .gamma_set = cirrus_crtc_fb_gamma_set,
+ .gamma_get = cirrus_crtc_fb_gamma_get,
+ .fb_probe = cirrus_fb_find_or_create_single,
+};
+
+int cirrus_fbdev_init(struct cirrus_device *cdev)
+{
+ struct cirrus_fbdev *gfbdev;
+ int ret;
+ int bpp_sel = 24;
+
+ /*bpp_sel = 8;*/
+ gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
+ if (!gfbdev)
+ return -ENOMEM;
+
+ cdev->mode_info.gfbdev = gfbdev;
+ gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
+ cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
+ if (ret) {
+ kfree(gfbdev);
+ return ret;
+ }
+ drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
+ drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
+
+ return 0;
+}
+
+void cirrus_fbdev_fini(struct cirrus_device *cdev)
+{
+ if (!cdev->mode_info.gfbdev)
+ return;
+
+ cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
+ kfree(cdev->mode_info.gfbdev);
+ cdev->mode_info.gfbdev = NULL;
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
new file mode 100644
index 000000000000..e3c122578417
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "cirrus_drv.h"
+
+
+static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
+ if (cirrus_fb->obj)
+ drm_gem_object_unreference_unlocked(cirrus_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
+ .destroy = cirrus_user_framebuffer_destroy,
+ .create_handle = cirrus_user_framebuffer_create_handle,
+};
+
+int cirrus_framebuffer_init(struct drm_device *dev,
+ struct cirrus_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+cirrus_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct cirrus_framebuffer *cirrus_fb;
+ int ret;
+ u32 bpp, depth;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ /* cirrus can't handle > 24bpp framebuffers at all */
+ if (bpp > 24)
+ return ERR_PTR(-EINVAL);
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
+ if (!cirrus_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(cirrus_fb);
+ return ERR_PTR(ret);
+ }
+ return &cirrus_fb->base;
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_funcs = {
+ .fb_create = cirrus_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void cirrus_vram_fini(struct cirrus_device *cdev)
+{
+ iounmap(cdev->rmmio);
+ cdev->rmmio = NULL;
+ if (cdev->mc.vram_base)
+ release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int cirrus_vram_init(struct cirrus_device *cdev)
+{
+ /* BAR 0 is VRAM */
+ cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
+ /* We have 4MB of VRAM */
+ cdev->mc.vram_size = 4 * 1024 * 1024;
+
+ if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
+ "cirrusdrmfb_vram")) {
+ DRM_ERROR("can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Our emulated hardware has two sets of memory. One is video RAM and can
+ * simply be used as a linear framebuffer - the other provides mmio access
+ * to the display registers. The latter can also be accessed via IO port
+ * access, but we map the range and use mmio to program them instead
+ */
+
+int cirrus_device_init(struct cirrus_device *cdev,
+ struct drm_device *ddev,
+ struct pci_dev *pdev, uint32_t flags)
+{
+ int ret;
+
+ cdev->dev = ddev;
+ cdev->flags = flags;
+
+ /* Hardcode the number of CRTCs to 1 */
+ cdev->num_crtc = 1;
+
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
+ cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
+
+ if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
+ "cirrusdrmfb_mmio")) {
+ DRM_ERROR("can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
+
+ if (cdev->rmmio == NULL)
+ return -ENOMEM;
+
+ ret = cirrus_vram_init(cdev);
+ if (ret) {
+ release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+ return ret;
+ }
+
+ return 0;
+}
+
+void cirrus_device_fini(struct cirrus_device *cdev)
+{
+ release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
+ cirrus_vram_fini(cdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct cirrus_device *cdev;
+ int r;
+
+ cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
+ if (cdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)cdev;
+
+ r = cirrus_device_init(cdev, dev, dev->pdev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+ goto out;
+ }
+
+ r = cirrus_mm_init(cdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "fatal err on mm init\n");
+
+ r = cirrus_modeset_init(cdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+
+ dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
+out:
+ if (r)
+ cirrus_driver_unload(dev);
+ return r;
+}
+
+int cirrus_driver_unload(struct drm_device *dev)
+{
+ struct cirrus_device *cdev = dev->dev_private;
+
+ if (cdev == NULL)
+ return 0;
+ cirrus_modeset_fini(cdev);
+ cirrus_mm_fini(cdev);
+ cirrus_device_fini(cdev);
+ kfree(cdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int cirrus_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct cirrus_bo *cirrusbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &cirrusbo->gem;
+ return 0;
+}
+
+int cirrus_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = cirrus_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int cirrus_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int cirrus_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void cirrus_bo_unref(struct cirrus_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+
+void cirrus_gem_free_object(struct drm_gem_object *obj)
+{
+ struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
+
+ if (!cirrus_bo)
+ return;
+ cirrus_bo_unref(&cirrus_bo);
+}
+
+
+static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+
+int
+cirrus_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct cirrus_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_cirrus_bo(obj);
+ *offset = cirrus_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
new file mode 100644
index 000000000000..100f6308c509
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -0,0 +1,629 @@
+
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include <video/cirrus.h>
+
+#include "cirrus_drv.h"
+
+#define CIRRUS_LUT_SIZE 256
+
+#define PALETTE_INDEX 0x8
+#define PALETTE_DATA 0x9
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(PALETTE_INDEX, i);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_r[i]);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_g[i]);
+ WREG8(PALETTE_DATA, cirrus_crtc->lut_b[i]);
+ }
+}
+
+/*
+ * The DRM core requires DPMS functions, but they make little sense in our
+ * case and so are just stubs
+ */
+
+static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ u8 sr01, gr0e;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ sr01 = 0x00;
+ gr0e = 0x00;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ sr01 = 0x20;
+ gr0e = 0x02;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ sr01 = 0x20;
+ gr0e = 0x04;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ sr01 = 0x20;
+ gr0e = 0x06;
+ break;
+ default:
+ return;
+ }
+
+ WREG8(SEQ_INDEX, 0x1);
+ sr01 |= RREG8(SEQ_DATA) & ~0x20;
+ WREG_SEQ(0x1, sr01);
+
+ WREG8(GFX_INDEX, 0xe);
+ gr0e |= RREG8(GFX_DATA) & ~0x06;
+ WREG_GFX(0xe, gr0e);
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+ struct cirrus_device *cdev = crtc->dev->dev_private;
+ u32 addr;
+ u8 tmp;
+
+ addr = offset >> 2;
+ WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+
+ WREG8(CRT_INDEX, 0x1b);
+ tmp = RREG8(CRT_DATA);
+ tmp &= 0xf2;
+ tmp |= (addr >> 16) & 0x01;
+ tmp |= (addr >> 15) & 0x0c;
+ WREG_CRT(0x1b, tmp);
+ WREG8(CRT_INDEX, 0x1d);
+ tmp = RREG8(CRT_DATA);
+ tmp &= 0x7f;
+ tmp |= (addr >> 12) & 0x80;
+ WREG_CRT(0x1d, tmp);
+}
+
+/* cirrus is different - we will force move buffers out of VRAM */
+static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct cirrus_device *cdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct cirrus_framebuffer *cirrus_fb;
+ struct cirrus_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ cirrus_fb = to_cirrus_framebuffer(fb);
+ obj = cirrus_fb->obj;
+ bo = gem_to_cirrus_bo(obj);
+ ret = cirrus_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ cirrus_bo_push_sysram(bo);
+ cirrus_bo_unreserve(bo);
+ }
+
+ cirrus_fb = to_cirrus_framebuffer(crtc->fb);
+ obj = cirrus_fb->obj;
+ bo = gem_to_cirrus_bo(obj);
+
+ ret = cirrus_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ cirrus_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&cdev->mode_info.gfbdev->gfb == cirrus_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+ }
+ cirrus_bo_unreserve(bo);
+
+ cirrus_set_start_address(crtc, (u32)gpu_addr);
+ return 0;
+}
+
+static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct cirrus_device *cdev = dev->dev_private;
+ int hsyncstart, hsyncend, htotal, hdispend;
+ int vtotal, vdispend;
+ int tmp;
+ int sr07 = 0, hdr = 0;
+
+ htotal = mode->htotal / 8;
+ hsyncend = mode->hsync_end / 8;
+ hsyncstart = mode->hsync_start / 8;
+ hdispend = mode->hdisplay / 8;
+
+ vtotal = mode->vtotal;
+ vdispend = mode->vdisplay;
+
+ vdispend -= 1;
+ vtotal -= 2;
+
+ htotal -= 5;
+ hdispend -= 1;
+ hsyncstart += 1;
+ hsyncend += 1;
+
+ WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
+ WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
+ WREG_CRT(VGA_CRTC_H_DISP, hdispend);
+ WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
+ WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
+ WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
+ WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+ tmp = 0x40;
+ if ((vdispend + 1) & 512)
+ tmp |= 0x20;
+ WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
+
+ /*
+ * Overflow bits for values that don't fit in the standard registers
+ */
+ tmp = 16;
+ if (vtotal & 256)
+ tmp |= 1;
+ if (vdispend & 256)
+ tmp |= 2;
+ if ((vdispend + 1) & 256)
+ tmp |= 8;
+ if (vtotal & 512)
+ tmp |= 32;
+ if (vdispend & 512)
+ tmp |= 64;
+ WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
+
+ tmp = 0;
+
+ /* More overflow bits */
+
+ if ((htotal + 5) & 64)
+ tmp |= 16;
+ if ((htotal + 5) & 128)
+ tmp |= 32;
+ if (vtotal & 256)
+ tmp |= 64;
+ if (vtotal & 512)
+ tmp |= 128;
+
+ WREG_CRT(CL_CRT1A, tmp);
+
+ /* Disable Hercules/CGA compatibility */
+ WREG_CRT(VGA_CRTC_MODE, 0x03);
+
+ WREG8(SEQ_INDEX, 0x7);
+ sr07 = RREG8(SEQ_DATA);
+ sr07 &= 0xe0;
+ hdr = 0;
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ sr07 |= 0x11;
+ break;
+ case 16:
+ sr07 |= 0xc1;
+ hdr = 0xc0;
+ break;
+ case 24:
+ sr07 |= 0x15;
+ hdr = 0xc5;
+ break;
+ case 32:
+ sr07 |= 0x19;
+ hdr = 0xc5;
+ break;
+ default:
+ return -1;
+ }
+
+ WREG_SEQ(0x7, sr07);
+
+ /* Program the pitch */
+ tmp = crtc->fb->pitches[0] / 8;
+ WREG_CRT(VGA_CRTC_OFFSET, tmp);
+
+ /* Enable extended blanking and pitch bits, and enable full memory */
+ tmp = 0x22;
+ tmp |= (crtc->fb->pitches[0] >> 7) & 0x10;
+ tmp |= (crtc->fb->pitches[0] >> 6) & 0x40;
+ WREG_CRT(0x1b, tmp);
+
+ /* Enable high-colour modes */
+ WREG_GFX(VGA_GFX_MODE, 0x40);
+
+ /* And set graphics mode */
+ WREG_GFX(VGA_GFX_MISC, 0x01);
+
+ WREG_HDR(hdr);
+ cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ return 0;
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void cirrus_crtc_prepare(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+ int i;
+
+ if (size != CIRRUS_LUT_SIZE)
+ return;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ cirrus_crtc->lut_r[i] = red[i];
+ cirrus_crtc->lut_g[i] = green[i];
+ cirrus_crtc->lut_b[i] = blue[i];
+ }
+ cirrus_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void cirrus_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(cirrus_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs cirrus_crtc_funcs = {
+ .gamma_set = cirrus_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = cirrus_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
+ .dpms = cirrus_crtc_dpms,
+ .mode_fixup = cirrus_crtc_mode_fixup,
+ .mode_set = cirrus_crtc_mode_set,
+ .mode_set_base = cirrus_crtc_mode_set_base,
+ .prepare = cirrus_crtc_prepare,
+ .commit = cirrus_crtc_commit,
+ .load_lut = cirrus_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void cirrus_crtc_init(struct drm_device *dev)
+{
+ struct cirrus_device *cdev = dev->dev_private;
+ struct cirrus_crtc *cirrus_crtc;
+ int i;
+
+ cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
+ (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+
+ if (cirrus_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &cirrus_crtc->base, &cirrus_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
+ cdev->mode_info.crtc = cirrus_crtc;
+
+ for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
+ cirrus_crtc->lut_r[i] = i;
+ cirrus_crtc->lut_g[i] = i;
+ cirrus_crtc->lut_b[i] = i;
+ }
+
+ drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ cirrus_crtc->lut_r[regno] = red;
+ cirrus_crtc->lut_g[regno] = green;
+ cirrus_crtc->lut_b[regno] = blue;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
+
+ *red = cirrus_crtc->lut_r[regno];
+ *green = cirrus_crtc->lut_g[regno];
+ *blue = cirrus_crtc->lut_b[regno];
+}
+
+
+static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+ return;
+}
+
+static void cirrus_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void cirrus_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void cirrus_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(cirrus_encoder);
+}
+
+static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
+ .dpms = cirrus_encoder_dpms,
+ .mode_fixup = cirrus_encoder_mode_fixup,
+ .mode_set = cirrus_encoder_mode_set,
+ .prepare = cirrus_encoder_prepare,
+ .commit = cirrus_encoder_commit,
+};
+
+static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
+ .destroy = cirrus_encoder_destroy,
+};
+
+static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct cirrus_encoder *cirrus_encoder;
+
+ cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
+ if (!cirrus_encoder)
+ return NULL;
+
+ encoder = &cirrus_encoder->base;
+ encoder->possible_crtcs = 0x1;
+
+ drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
+
+ return encoder;
+}
+
+
+int cirrus_vga_get_modes(struct drm_connector *connector)
+{
+ /* Just add a static list of modes */
+ drm_add_modes_noedid(connector, 640, 480);
+ drm_add_modes_noedid(connector, 800, 600);
+ drm_add_modes_noedid(connector, 1024, 768);
+ drm_add_modes_noedid(connector, 1280, 1024);
+
+ return 4;
+}
+
+static int cirrus_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* Any mode we've added is valid */
+ return MODE_OK;
+}
+
+struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj =
+ drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status cirrus_vga_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void cirrus_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
+ .get_modes = cirrus_vga_get_modes,
+ .mode_valid = cirrus_vga_mode_valid,
+ .best_encoder = cirrus_connector_best_encoder,
+};
+
+struct drm_connector_funcs cirrus_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = cirrus_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = cirrus_connector_destroy,
+};
+
+static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct cirrus_connector *cirrus_connector;
+
+ cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
+ if (!cirrus_connector)
+ return NULL;
+
+ connector = &cirrus_connector->base;
+
+ drm_connector_init(dev, connector,
+ &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
+
+ return connector;
+}
+
+
+int cirrus_modeset_init(struct cirrus_device *cdev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ drm_mode_config_init(cdev->dev);
+ cdev->mode_info.mode_config_initialized = true;
+
+ cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
+ cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
+
+ cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
+ cdev->dev->mode_config.preferred_depth = 24;
+ /* don't prefer a shadow on virt GPU */
+ cdev->dev->mode_config.prefer_shadow = 0;
+
+ cirrus_crtc_init(cdev->dev);
+
+ encoder = cirrus_encoder_init(cdev->dev);
+ if (!encoder) {
+ DRM_ERROR("cirrus_encoder_init failed\n");
+ return -1;
+ }
+
+ connector = cirrus_vga_init(cdev->dev);
+ if (!connector) {
+ DRM_ERROR("cirrus_vga_init failed\n");
+ return -1;
+ }
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ret = cirrus_fbdev_init(cdev);
+ if (ret) {
+ DRM_ERROR("cirrus_fbdev_init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void cirrus_modeset_fini(struct cirrus_device *cdev)
+{
+ cirrus_fbdev_fini(cdev);
+
+ if (cdev->mode_info.mode_config_initialized) {
+ drm_mode_config_cleanup(cdev->dev);
+ cdev->mode_info.mode_config_initialized = false;
+ }
+}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
new file mode 100644
index 000000000000..ae2f08ff3808
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -0,0 +1,453 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "cirrus_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct cirrus_device *
+cirrus_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct cirrus_device, ttm.bdev);
+}
+
+static int
+cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &cirrus->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &cirrus_ttm_mem_global_init;
+ global_ref->release = &cirrus_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ cirrus->ttm.bo_global_ref.mem_glob =
+ cirrus->ttm.mem_global_ref.object;
+ global_ref = &cirrus->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+cirrus_ttm_global_release(struct cirrus_device *cirrus)
+{
+ if (cirrus->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&cirrus->ttm.mem_global_ref);
+ cirrus->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct cirrus_bo *bo;
+
+ bo = container_of(tbo, struct cirrus_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &cirrus_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+cirrus_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+ if (!cirrus_ttm_bo_is_cirrus_bo(bo))
+ return;
+
+ cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_SYSTEM);
+ *pl = cirrusbo->placement;
+}
+
+static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct cirrus_device *cirrus = cirrus_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(cirrus->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int cirrus_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func cirrus_tt_backend_func = {
+ .destroy = &cirrus_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &cirrus_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver cirrus_bo_driver = {
+ .ttm_tt_create = cirrus_ttm_tt_create,
+ .ttm_tt_populate = cirrus_ttm_tt_populate,
+ .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
+ .init_mem_type = cirrus_bo_init_mem_type,
+ .evict_flags = cirrus_bo_evict_flags,
+ .move = cirrus_bo_move,
+ .verify_access = cirrus_bo_verify_access,
+ .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
+ .io_mem_free = &cirrus_ttm_io_mem_free,
+};
+
+int cirrus_mm_init(struct cirrus_device *cirrus)
+{
+ int ret;
+ struct drm_device *dev = cirrus->dev;
+ struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
+
+ ret = cirrus_ttm_global_init(cirrus);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&cirrus->ttm.bdev,
+ cirrus->ttm.bo_global_ref.ref.object,
+ &cirrus_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ cirrus->mc.vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ cirrus->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void cirrus_mm_fini(struct cirrus_device *cirrus)
+{
+ struct drm_device *dev = cirrus->dev;
+ ttm_bo_device_release(&cirrus->ttm.bdev);
+
+ cirrus_ttm_global_release(cirrus);
+
+ if (cirrus->fb_mtrr >= 0) {
+ drm_mtrr_del(cirrus->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ cirrus->fb_mtrr = -1;
+ }
+}
+
+void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void cirrus_bo_unreserve(struct cirrus_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int cirrus_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct cirrus_bo **pcirrusbo)
+{
+ struct cirrus_device *cirrus = dev->dev_private;
+ struct cirrus_bo *cirrusbo;
+ size_t acc_size;
+ int ret;
+
+ cirrusbo = kzalloc(sizeof(struct cirrus_bo), GFP_KERNEL);
+ if (!cirrusbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &cirrusbo->gem, size);
+ if (ret) {
+ kfree(cirrusbo);
+ return ret;
+ }
+
+ cirrusbo->gem.driver_private = NULL;
+ cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+
+ cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size,
+ sizeof(struct cirrus_bo));
+
+ ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
+ ttm_bo_type_device, &cirrusbo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ cirrus_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pcirrusbo = cirrusbo;
+ return 0;
+}
+
+static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = cirrus_bo_gpu_offset(bo);
+ }
+
+ cirrus_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = cirrus_bo_gpu_offset(bo);
+ return 0;
+}
+
+int cirrus_bo_unpin(struct cirrus_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int cirrus_bo_push_sysram(struct cirrus_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct cirrus_device *cirrus;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ cirrus = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ee63a123235c..a177d0abb8bb 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -384,6 +384,8 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
if (ret)
goto out;
+ crtc->base.properties = &crtc->properties;
+
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
@@ -483,6 +485,7 @@ int drm_connector_init(struct drm_device *dev,
if (ret)
goto out;
+ connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
connector->connector_type = connector_type;
@@ -1424,11 +1427,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
}
connector = obj_to_connector(obj);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- props_count++;
- }
- }
+ props_count = connector->properties.count;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
@@ -1481,21 +1480,19 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
copied = 0;
prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] != 0) {
- if (put_user(connector->property_ids[i],
- prop_ptr + copied)) {
- ret = -EFAULT;
- goto out;
- }
+ for (i = 0; i < connector->properties.count; i++) {
+ if (put_user(connector->properties.ids[i],
+ prop_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
- if (put_user(connector->property_values[i],
- prop_values + copied)) {
- ret = -EFAULT;
- goto out;
- }
- copied++;
+ if (put_user(connector->properties.values[i],
+ prop_values + copied)) {
+ ret = -EFAULT;
+ goto out;
}
+ copied++;
}
}
out_resp->count_props = props_count;
@@ -2819,60 +2816,78 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
}
EXPORT_SYMBOL(drm_property_destroy);
-int drm_connector_attach_property(struct drm_connector *connector,
+void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val)
{
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == 0) {
- connector->property_ids[i] = property->base.id;
- connector->property_values[i] = init_val;
- break;
- }
- }
-
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ drm_object_attach_property(&connector->base, property, init_val);
}
EXPORT_SYMBOL(drm_connector_attach_property);
int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
+ return drm_object_property_set_value(&connector->base, property, value);
+}
+EXPORT_SYMBOL(drm_connector_property_set_value);
+
+int drm_connector_property_get_value(struct drm_connector *connector,
+ struct drm_property *property, uint64_t *val)
+{
+ return drm_object_property_get_value(&connector->base, property, val);
+}
+EXPORT_SYMBOL(drm_connector_property_get_value);
+
+void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val)
+{
+ int count = obj->properties->count;
+
+ if (count == DRM_OBJECT_MAX_PROPERTY) {
+ WARN(1, "Failed to attach object property (type: 0x%x). Please "
+ "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
+ "you see this message on the same object type.\n",
+ obj->type);
+ return;
+ }
+
+ obj->properties->ids[count] = property->base.id;
+ obj->properties->values[count] = init_val;
+ obj->properties->count++;
+}
+EXPORT_SYMBOL(drm_object_attach_property);
+
+int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property, uint64_t val)
+{
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- connector->property_values[i] = value;
- break;
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ obj->properties->values[i] = val;
+ return 0;
}
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_connector_property_set_value);
+EXPORT_SYMBOL(drm_object_property_set_value);
-int drm_connector_property_get_value(struct drm_connector *connector,
+int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == property->base.id) {
- *val = connector->property_values[i];
- break;
+ for (i = 0; i < obj->properties->count; i++) {
+ if (obj->properties->ids[i] == property->base.id) {
+ *val = obj->properties->values[i];
+ return 0;
}
}
- if (i == DRM_CONNECTOR_MAX_PROPERTY)
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
-EXPORT_SYMBOL(drm_connector_property_get_value);
+EXPORT_SYMBOL(drm_object_property_get_value);
int drm_mode_getproperty_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
@@ -3074,75 +3089,178 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+static bool drm_property_change_is_valid(struct drm_property *property,
+ __u64 value)
+{
+ if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ return false;
+ if (property->flags & DRM_MODE_PROP_RANGE) {
+ if (value < property->values[0] || value > property->values[1])
+ return false;
+ return true;
+ } else {
+ int i;
+ for (i = 0; i < property->num_values; i++)
+ if (property->values[i] == value)
+ return true;
+ return false;
+ }
+}
+
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_connector_set_property *out_resp = data;
- struct drm_mode_object *obj;
- struct drm_property *property;
- struct drm_connector *connector;
+ struct drm_mode_connector_set_property *conn_set_prop = data;
+ struct drm_mode_obj_set_property obj_set_prop = {
+ .value = conn_set_prop->value,
+ .prop_id = conn_set_prop->prop_id,
+ .obj_id = conn_set_prop->connector_id,
+ .obj_type = DRM_MODE_OBJECT_CONNECTOR
+ };
+
+ /* It does all the locking and checking we need */
+ return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv);
+}
+
+static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
int ret = -EINVAL;
+ struct drm_connector *connector = obj_to_connector(obj);
+
+ /* Do DPMS ourselves */
+ if (property == connector->dev->mode_config.dpms_property) {
+ if (connector->funcs->dpms)
+ (*connector->funcs->dpms)(connector, (int)value);
+ ret = 0;
+ } else if (connector->funcs->set_property)
+ ret = connector->funcs->set_property(connector, property, value);
+
+ /* store the property value if successful */
+ if (!ret)
+ drm_connector_property_set_value(connector, property, value);
+ return ret;
+}
+
+static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t value)
+{
+ int ret = -EINVAL;
+ struct drm_crtc *crtc = obj_to_crtc(obj);
+
+ if (crtc->funcs->set_property)
+ ret = crtc->funcs->set_property(crtc, property, value);
+ if (!ret)
+ drm_object_property_set_value(obj, property, value);
+
+ return ret;
+}
+
+int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_get_properties *arg = data;
+ struct drm_mode_object *obj;
+ int ret = 0;
int i;
+ int copied = 0;
+ int props_count = 0;
+ uint32_t __user *props_ptr;
+ uint64_t __user *prop_values_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
- obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
+ obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!obj->properties) {
+ ret = -EINVAL;
goto out;
}
- connector = obj_to_connector(obj);
- for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
- if (connector->property_ids[i] == out_resp->prop_id)
- break;
+ props_count = obj->properties->count;
+
+ /* This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it. */
+ if ((arg->count_props >= props_count) && props_count) {
+ copied = 0;
+ props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
+ prop_values_ptr = (uint64_t __user *)(unsigned long)
+ (arg->prop_values_ptr);
+ for (i = 0; i < props_count; i++) {
+ if (put_user(obj->properties->ids[i],
+ props_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (put_user(obj->properties->values[i],
+ prop_values_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
}
+ arg->count_props = props_count;
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_obj_set_property *arg = data;
+ struct drm_mode_object *arg_obj;
+ struct drm_mode_object *prop_obj;
+ struct drm_property *property;
+ int ret = -EINVAL;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
- if (i == DRM_CONNECTOR_MAX_PROPERTY) {
+ mutex_lock(&dev->mode_config.mutex);
+
+ arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
+ if (!arg_obj)
+ goto out;
+ if (!arg_obj->properties)
goto out;
- }
- obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
- if (!obj) {
+ for (i = 0; i < arg_obj->properties->count; i++)
+ if (arg_obj->properties->ids[i] == arg->prop_id)
+ break;
+
+ if (i == arg_obj->properties->count)
goto out;
- }
- property = obj_to_property(obj);
- if (property->flags & DRM_MODE_PROP_IMMUTABLE)
+ prop_obj = drm_mode_object_find(dev, arg->prop_id,
+ DRM_MODE_OBJECT_PROPERTY);
+ if (!prop_obj)
goto out;
+ property = obj_to_property(prop_obj);
- if (property->flags & DRM_MODE_PROP_RANGE) {
- if (out_resp->value < property->values[0])
- goto out;
+ if (!drm_property_change_is_valid(property, arg->value))
+ goto out;
- if (out_resp->value > property->values[1])
- goto out;
- } else {
- int found = 0;
- for (i = 0; i < property->num_values; i++) {
- if (property->values[i] == out_resp->value) {
- found = 1;
- break;
- }
- }
- if (!found) {
- goto out;
- }
+ switch (arg_obj->type) {
+ case DRM_MODE_OBJECT_CONNECTOR:
+ ret = drm_mode_connector_set_obj_prop(arg_obj, property,
+ arg->value);
+ break;
+ case DRM_MODE_OBJECT_CRTC:
+ ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
+ break;
}
- /* Do DPMS ourselves */
- if (property == connector->dev->mode_config.dpms_property) {
- if (connector->funcs->dpms)
- (*connector->funcs->dpms)(connector, (int) out_resp->value);
- ret = 0;
- } else if (connector->funcs->set_property)
- ret = connector->funcs->set_property(connector, property, out_resp->value);
-
- /* store the property value if successful */
- if (!ret)
- drm_connector_property_set_value(connector, property, out_resp->value);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6116e3b75393..8a9d0792e4ec 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -163,7 +163,9 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index fc6ded8f318b..1ab29a7345c5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -626,7 +626,7 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
drm_gem_object_reference(obj);
mutex_lock(&obj->dev->struct_mutex);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(obj->dev, vma);
mutex_unlock(&obj->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_gem_vm_open);
@@ -637,7 +637,7 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
struct drm_device *dev = obj->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(vma);
+ drm_vm_close_locked(obj->dev, vma);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
}
@@ -710,7 +710,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
*/
drm_gem_object_reference(obj);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index cf85155da2a0..64a62c697313 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -283,6 +283,10 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_DUMB_PREFER_SHADOW:
req->value = dev->mode_config.prefer_shadow;
break;
+ case DRM_CAP_PRIME:
+ req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
+ req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 149561818349..961ee08927fe 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -406,10 +406,9 @@ static const struct vm_operations_struct drm_vm_sg_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist.
*/
-void drm_vm_open_locked(struct vm_area_struct *vma)
+void drm_vm_open_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -430,14 +429,13 @@ static void drm_vm_open(struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
-void drm_vm_close_locked(struct vm_area_struct *vma)
+void drm_vm_close_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
- struct drm_file *priv = vma->vm_file->private_data;
- struct drm_device *dev = priv->minor->dev;
struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -467,7 +465,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
- drm_vm_close_locked(vma);
+ drm_vm_close_locked(dev, vma);
mutex_unlock(&dev->struct_mutex);
}
@@ -519,7 +517,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_DONTEXPAND;
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
return 0;
}
@@ -670,7 +668,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED; /* Don't swap */
vma->vm_flags |= VM_DONTEXPAND;
- drm_vm_open_locked(vma);
+ drm_vm_open_locked(dev, vma);
return 0;
}
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index dd7d6b57996f..abfa2a93f0d0 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -13,7 +13,6 @@ gma500_gfx-y += gem_glue.o \
intel_i2c.o \
intel_gmbus.o \
mmu.o \
- opregion.o \
power.o \
psb_drv.o \
psb_intel_display.o \
@@ -25,6 +24,8 @@ gma500_gfx-y += gem_glue.o \
psb_device.o \
mid_bios.o
+gma500_gfx-$(CONFIG_ACPI) += opregion.o \
+
gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
cdv_intel_crt.o \
cdv_intel_display.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index c10f02068d11..8e393303f53a 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -255,7 +255,7 @@ static int cdv_save_display_registers(struct drm_device *dev)
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
- dev_info(dev->dev, "Saving GPU registers.\n");
+ dev_dbg(dev->dev, "Saving GPU registers.\n");
pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
@@ -485,10 +485,68 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on)
}
}
+/* Cedarview */
+static const struct psb_offset cdv_regmap[2] = {
+ {
+ .fp0 = FPA0,
+ .fp1 = FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = DPLL_A,
+ .dpll_md = DPLL_A_MD,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .base = DSPABASE,
+ .surf = DSPASURF,
+ .addr = DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .dpll_md = DPLL_B_MD,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .base = DSPBBASE,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ }
+};
+
static int cdv_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
+
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->regmap = cdv_regmap;
cdv_get_core_freq(dev);
psb_intel_opregion_init(dev);
psb_intel_init_bios(dev);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 2fab77854971..c3e9a0f701df 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -218,8 +218,7 @@ static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
struct cdv_intel_clock_t *clock, bool is_lvds)
{
- struct psb_intel_crtc *psb_crtc =
- to_psb_intel_crtc(crtc);
+ struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_crtc->pipe;
u32 m, n_vco, p;
int ret = 0;
@@ -503,14 +502,12 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -532,9 +529,9 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -556,15 +553,15 @@ static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
ret = -EINVAL;
goto psb_intel_pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev,
"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
psb_intel_pipe_cleaner:
/* If there was a previous display we can now unpin it */
@@ -721,8 +718,7 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int palreg = PALETTE_A;
int i;
@@ -758,7 +754,7 @@ static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
- dev_priv->regs.psb.save_palette_a[i] =
+ dev_priv->regs.pipe[0].palette[i] =
((psb_intel_crtc->lut_r[i] +
psb_intel_crtc->lut_adj[i]) << 16) |
((psb_intel_crtc->lut_g[i] +
@@ -779,13 +775,10 @@ static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int pipestat_reg = (pipe == 0) ? PIPEASTAT : PIPEBSTAT;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
/* XXX: When our outputs are all unaware of DPMS modes other than off
@@ -803,44 +796,44 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
psb_intel_crtc->active = true;
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Jim Bish - switch plan and pipe per scott */
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
udelay(150);
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
- temp = REG_READ(pipestat_reg);
+ temp = REG_READ(map->status);
temp &= ~(0xFFFF);
temp |= PIPE_FIFO_UNDERRUN;
- REG_WRITE(pipestat_reg, temp);
- REG_READ(pipestat_reg);
+ REG_WRITE(map->status, temp);
+ REG_READ(map->status);
cdv_intel_update_watermark(dev, crtc);
cdv_intel_crtc_load_lut(crtc);
@@ -870,10 +863,10 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
cdv_intel_wait_for_vblank(dev);
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for vblank for the disable to take effect. */
@@ -882,19 +875,19 @@ static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
udelay(150);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
@@ -953,19 +946,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct cdv_intel_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
@@ -1036,7 +1017,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
/* dpll |= (2 << 11); */
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -1049,8 +1030,8 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
dspcntr |= DISPLAY_PLANE_ENABLE;
pipeconf |= PIPEACONF_ENABLE;
- REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+ REG_READ(map->dpll);
cdv_dpll_set_clock_cdv(dev, crtc, &clock, is_lvds);
@@ -1094,48 +1075,48 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
- REG_WRITE(dpll_reg,
- (REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll,
+ (REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150); /* 42 usec w/o calibration, 110 with. rounded up. */
- if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+ if (!(REG_READ(map->dpll) & DPLL_LOCK)) {
dev_err(dev->dev, "Failed to get DPLL lock\n");
return -EBUSY;
}
{
int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
- REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+ REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
}
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(dsppos_reg, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->pos, 0);
+ REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
cdv_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
{
@@ -1156,11 +1137,10 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
static void cdv_intel_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -1169,25 +1149,25 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
return;
}
- crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
- crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
- crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
- crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
- crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
- crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
- crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
- crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
- crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
- crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
- crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
- crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
- crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
/*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
- crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
- crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+ crtc_state->saveDSPBASE = REG_READ(map->base);
DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
crtc_state->saveDSPCNTR,
@@ -1208,7 +1188,7 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
crtc_state->saveDSPBASE
);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
}
@@ -1219,12 +1199,10 @@ static void cdv_intel_crtc_save(struct drm_crtc *crtc)
static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private * dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -1235,23 +1213,23 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
DRM_DEBUG(
"current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
- REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
- REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
- REG_READ(pipeA ? FPA0 : FPB0),
- REG_READ(pipeA ? FPA1 : FPB1),
- REG_READ(pipeA ? DPLL_A : DPLL_B),
- REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
- REG_READ(pipeA ? HBLANK_A : HBLANK_B),
- REG_READ(pipeA ? HSYNC_A : HSYNC_B),
- REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
- REG_READ(pipeA ? VBLANK_A : VBLANK_B),
- REG_READ(pipeA ? VSYNC_A : VSYNC_B),
- REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
- REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
- REG_READ(pipeA ? DSPAPOS : DSPBPOS),
- REG_READ(pipeA ? DSPABASE : DSPBBASE)
- );
+ REG_READ(map->cntr),
+ REG_READ(map->conf),
+ REG_READ(map->src),
+ REG_READ(map->fp0),
+ REG_READ(map->fp1),
+ REG_READ(map->dpll),
+ REG_READ(map->htotal),
+ REG_READ(map->hblank),
+ REG_READ(map->hsync),
+ REG_READ(map->vtotal),
+ REG_READ(map->vblank),
+ REG_READ(map->vsync),
+ REG_READ(map->stride),
+ REG_READ(map->size),
+ REG_READ(map->pos),
+ REG_READ(map->base)
+ );
DRM_DEBUG(
"saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
@@ -1271,51 +1249,51 @@ static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
crtc_state->saveDSPSIZE,
crtc_state->saveDSPPOS,
crtc_state->saveDSPBASE
- );
+ );
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(pipeA ? DPLL_A : DPLL_B,
- crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
DRM_DEBUG("write dpll: %x\n",
- REG_READ(pipeA ? DPLL_A : DPLL_B));
+ REG_READ(map->dpll));
udelay(150);
}
- REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
- REG_READ(pipeA ? FPA0 : FPB0);
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
- REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
- REG_READ(pipeA ? FPA1 : FPB1);
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
- REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
udelay(150);
- REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
- REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
- REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
- REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
- REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
- REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
- REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
- REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
- REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
- REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
- REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
cdv_intel_wait_for_vblank(dev);
- REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
cdv_intel_wait_for_vblank(dev);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
}
@@ -1490,35 +1468,30 @@ static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct cdv_intel_clock_t clock;
bool is_lvds;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
- dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = REG_READ(map->fp0);
else
- fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
- dpll = (pipe == 0) ?
- dev_priv->regs.psb.saveDPLL_A :
- dev_priv->regs.psb.saveDPLL_B;
-
+ dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA0 :
- dev_priv->regs.psb.saveFPB0;
+ fp = p->fp0;
else
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA1 :
- dev_priv->regs.psb.saveFPB1;
+ fp = p->fp1;
is_lvds = (pipe == 1) &&
(dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
@@ -1576,32 +1549,26 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
struct drm_display_mode *mode;
int htot;
int hsync;
int vtot;
int vsync;
- struct drm_psb_private *dev_priv = dev->dev_private;
if (gma_power_begin(dev, false)) {
- htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ htot = REG_READ(map->htotal);
+ hsync = REG_READ(map->hsync);
+ vtot = REG_READ(map->vtotal);
+ vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
- htot = (pipe == 0) ?
- dev_priv->regs.psb.saveHTOTAL_A :
- dev_priv->regs.psb.saveHTOTAL_B;
- hsync = (pipe == 0) ?
- dev_priv->regs.psb.saveHSYNC_A :
- dev_priv->regs.psb.saveHSYNC_B;
- vtot = (pipe == 0) ?
- dev_priv->regs.psb.saveVTOTAL_A :
- dev_priv->regs.psb.saveVTOTAL_B;
- vsync = (pipe == 0) ?
- dev_priv->regs.psb.saveVSYNC_A :
- dev_priv->regs.psb.saveVSYNC_B;
+ htot = p->htotal;
+ hsync = p->hsync;
+ vtot = p->vtotal;
+ vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index f47f883ff9ef..8d77224afc34 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -476,7 +476,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- dev_info(dev->dev, "allocated %dx%d fb\n",
+ dev_dbg(dev->dev, "allocated %dx%d fb\n",
psbfb->base.width, psbfb->base.height);
mutex_unlock(&dev->struct_mutex);
@@ -800,15 +800,20 @@ void psb_modeset_init(struct drm_device *dev)
if (dev_priv->ops->errata)
dev_priv->ops->errata(dev);
+
+ dev_priv->modeset = true;
}
void psb_modeset_cleanup(struct drm_device *dev)
{
- mutex_lock(&dev->struct_mutex);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (dev_priv->modeset) {
+ mutex_lock(&dev->struct_mutex);
- drm_kms_helper_poll_fini(dev);
- psb_fbdev_fini(dev);
- drm_mode_config_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+ psb_fbdev_fini(dev);
+ drm_mode_config_cleanup(dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 4cd33df5f93c..04a371aceb34 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -416,7 +416,6 @@ int psb_gtt_init(struct drm_device *dev, int resume)
unsigned long stolen_size, vram_stolen_size;
unsigned i, num_pages;
unsigned pfn_base;
- uint32_t dvmt_mode = 0;
struct psb_gtt *pg;
int ret = 0;
@@ -489,13 +488,8 @@ int psb_gtt_init(struct drm_device *dev, int resume)
stolen_size = vram_stolen_size;
- printk(KERN_INFO "Stolen memory information\n");
- printk(KERN_INFO " base in RAM: 0x%x\n", dev_priv->stolen_base);
- printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
- vram_stolen_size/1024);
- dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
- printk(KERN_INFO " the correct size should be: %dM(dvmt mode=%d)\n",
- (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+ dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
+ dev_priv->stolen_base, vram_stolen_size / 1024);
if (resume && (gtt_pages != pg->gtt_pages) &&
(stolen_size != pg->stolen_size)) {
@@ -532,7 +526,7 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
num_pages = vram_stolen_size >> PAGE_SHIFT;
- printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+ dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
num_pages, pfn_base << PAGE_SHIFT, 0);
for (i = 0; i < num_pages; ++i) {
pte = psb_gtt_mask_pte(pfn_base + i, 0);
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 479e4497d26c..973d7f6d66b7 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -490,26 +490,8 @@ bool psb_intel_init_bios(struct drm_device *dev)
void psb_intel_destroy_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_display_mode *sdvo_lvds_vbt_mode =
- dev_priv->sdvo_lvds_vbt_mode;
- struct drm_display_mode *lfp_lvds_vbt_mode =
- dev_priv->lfp_lvds_vbt_mode;
- struct bdb_lvds_backlight *lvds_bl =
- dev_priv->lvds_bl;
-
- /*free sdvo panel mode*/
- if (sdvo_lvds_vbt_mode) {
- dev_priv->sdvo_lvds_vbt_mode = NULL;
- kfree(sdvo_lvds_vbt_mode);
- }
-
- if (lfp_lvds_vbt_mode) {
- dev_priv->lfp_lvds_vbt_mode = NULL;
- kfree(lfp_lvds_vbt_mode);
- }
- if (lvds_bl) {
- dev_priv->lvds_bl = NULL;
- kfree(lvds_bl);
- }
+ kfree(dev_priv->sdvo_lvds_vbt_mode);
+ kfree(dev_priv->lfp_lvds_vbt_mode);
+ kfree(dev_priv->lvds_bl);
}
diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
index 717f4db28c3c..2d8e741e06d7 100644
--- a/drivers/gpu/drm/gma500/mdfld_device.c
+++ b/drivers/gpu/drm/gma500/mdfld_device.c
@@ -163,142 +163,30 @@ struct backlight_device *mdfld_get_backlight_device(void)
*
* Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
*/
-static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
+static int mdfld_save_display_registers(struct drm_device *dev, int pipenum)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct medfield_state *regs = &dev_priv->regs.mdfld;
+ struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+ const struct psb_offset *map = &dev_priv->regmap[pipenum];
int i;
+ u32 *mipi_val;
/* register */
- u32 dpll_reg = MRST_DPLL_A;
- u32 fp_reg = MRST_FPA0;
- u32 pipeconf_reg = PIPEACONF;
- u32 htot_reg = HTOTAL_A;
- u32 hblank_reg = HBLANK_A;
- u32 hsync_reg = HSYNC_A;
- u32 vtot_reg = VTOTAL_A;
- u32 vblank_reg = VBLANK_A;
- u32 vsync_reg = VSYNC_A;
- u32 pipesrc_reg = PIPEASRC;
- u32 dspstride_reg = DSPASTRIDE;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dsptileoff_reg = DSPATILEOFF;
- u32 dspsize_reg = DSPASIZE;
- u32 dsppos_reg = DSPAPOS;
- u32 dspsurf_reg = DSPASURF;
u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 dspstatus_reg = PIPEASTAT;
- u32 palette_reg = PALETTE_A;
-
- /* pointer to values */
- u32 *dpll_val = &regs->saveDPLL_A;
- u32 *fp_val = &regs->saveFPA0;
- u32 *pipeconf_val = &regs->savePIPEACONF;
- u32 *htot_val = &regs->saveHTOTAL_A;
- u32 *hblank_val = &regs->saveHBLANK_A;
- u32 *hsync_val = &regs->saveHSYNC_A;
- u32 *vtot_val = &regs->saveVTOTAL_A;
- u32 *vblank_val = &regs->saveVBLANK_A;
- u32 *vsync_val = &regs->saveVSYNC_A;
- u32 *pipesrc_val = &regs->savePIPEASRC;
- u32 *dspstride_val = &regs->saveDSPASTRIDE;
- u32 *dsplinoff_val = &regs->saveDSPALINOFF;
- u32 *dsptileoff_val = &regs->saveDSPATILEOFF;
- u32 *dspsize_val = &regs->saveDSPASIZE;
- u32 *dsppos_val = &regs->saveDSPAPOS;
- u32 *dspsurf_val = &regs->saveDSPASURF;
- u32 *mipi_val = &regs->saveMIPI;
- u32 *dspcntr_val = &regs->saveDSPACNTR;
- u32 *dspstatus_val = &regs->saveDSPASTATUS;
- u32 *palette_val = regs->save_palette_a;
-
- switch (pipe) {
+
+ switch (pipenum) {
case 0:
+ mipi_val = &regs->saveMIPI;
break;
case 1:
- /* regester */
- dpll_reg = MDFLD_DPLL_B;
- fp_reg = MDFLD_DPLL_DIV0;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipesrc_reg = PIPEBSRC;
- dspstride_reg = DSPBSTRIDE;
- dsplinoff_reg = DSPBLINOFF;
- dsptileoff_reg = DSPBTILEOFF;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- dspsurf_reg = DSPBSURF;
- dspcntr_reg = DSPBCNTR;
- dspstatus_reg = PIPEBSTAT;
- palette_reg = PALETTE_B;
-
- /* values */
- dpll_val = &regs->saveDPLL_B;
- fp_val = &regs->saveFPB0;
- pipeconf_val = &regs->savePIPEBCONF;
- htot_val = &regs->saveHTOTAL_B;
- hblank_val = &regs->saveHBLANK_B;
- hsync_val = &regs->saveHSYNC_B;
- vtot_val = &regs->saveVTOTAL_B;
- vblank_val = &regs->saveVBLANK_B;
- vsync_val = &regs->saveVSYNC_B;
- pipesrc_val = &regs->savePIPEBSRC;
- dspstride_val = &regs->saveDSPBSTRIDE;
- dsplinoff_val = &regs->saveDSPBLINOFF;
- dsptileoff_val = &regs->saveDSPBTILEOFF;
- dspsize_val = &regs->saveDSPBSIZE;
- dsppos_val = &regs->saveDSPBPOS;
- dspsurf_val = &regs->saveDSPBSURF;
- dspcntr_val = &regs->saveDSPBCNTR;
- dspstatus_val = &regs->saveDSPBSTATUS;
- palette_val = regs->save_palette_b;
+ mipi_val = &regs->saveMIPI;
break;
case 2:
/* register */
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- pipesrc_reg = PIPECSRC;
- dspstride_reg = DSPCSTRIDE;
- dsplinoff_reg = DSPCLINOFF;
- dsptileoff_reg = DSPCTILEOFF;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- dspsurf_reg = DSPCSURF;
mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- dspstatus_reg = PIPECSTAT;
- palette_reg = PALETTE_C;
-
/* pointer to values */
- pipeconf_val = &regs->savePIPECCONF;
- htot_val = &regs->saveHTOTAL_C;
- hblank_val = &regs->saveHBLANK_C;
- hsync_val = &regs->saveHSYNC_C;
- vtot_val = &regs->saveVTOTAL_C;
- vblank_val = &regs->saveVBLANK_C;
- vsync_val = &regs->saveVSYNC_C;
- pipesrc_val = &regs->savePIPECSRC;
- dspstride_val = &regs->saveDSPCSTRIDE;
- dsplinoff_val = &regs->saveDSPCLINOFF;
- dsptileoff_val = &regs->saveDSPCTILEOFF;
- dspsize_val = &regs->saveDSPCSIZE;
- dsppos_val = &regs->saveDSPCPOS;
- dspsurf_val = &regs->saveDSPCSURF;
mipi_val = &regs->saveMIPI_C;
- dspcntr_val = &regs->saveDSPCCNTR;
- dspstatus_val = &regs->saveDSPCSTATUS;
- palette_val = regs->save_palette_c;
break;
default:
DRM_ERROR("%s, invalid pipe number.\n", __func__);
@@ -306,30 +194,30 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
}
/* Pipe & plane A info */
- *dpll_val = PSB_RVDC32(dpll_reg);
- *fp_val = PSB_RVDC32(fp_reg);
- *pipeconf_val = PSB_RVDC32(pipeconf_reg);
- *htot_val = PSB_RVDC32(htot_reg);
- *hblank_val = PSB_RVDC32(hblank_reg);
- *hsync_val = PSB_RVDC32(hsync_reg);
- *vtot_val = PSB_RVDC32(vtot_reg);
- *vblank_val = PSB_RVDC32(vblank_reg);
- *vsync_val = PSB_RVDC32(vsync_reg);
- *pipesrc_val = PSB_RVDC32(pipesrc_reg);
- *dspstride_val = PSB_RVDC32(dspstride_reg);
- *dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
- *dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
- *dspsize_val = PSB_RVDC32(dspsize_reg);
- *dsppos_val = PSB_RVDC32(dsppos_reg);
- *dspsurf_val = PSB_RVDC32(dspsurf_reg);
- *dspcntr_val = PSB_RVDC32(dspcntr_reg);
- *dspstatus_val = PSB_RVDC32(dspstatus_reg);
+ pipe->dpll = PSB_RVDC32(map->dpll);
+ pipe->fp0 = PSB_RVDC32(map->fp0);
+ pipe->conf = PSB_RVDC32(map->conf);
+ pipe->htotal = PSB_RVDC32(map->htotal);
+ pipe->hblank = PSB_RVDC32(map->hblank);
+ pipe->hsync = PSB_RVDC32(map->hsync);
+ pipe->vtotal = PSB_RVDC32(map->vtotal);
+ pipe->vblank = PSB_RVDC32(map->vblank);
+ pipe->vsync = PSB_RVDC32(map->vsync);
+ pipe->src = PSB_RVDC32(map->src);
+ pipe->stride = PSB_RVDC32(map->stride);
+ pipe->linoff = PSB_RVDC32(map->linoff);
+ pipe->tileoff = PSB_RVDC32(map->tileoff);
+ pipe->size = PSB_RVDC32(map->size);
+ pipe->pos = PSB_RVDC32(map->pos);
+ pipe->surf = PSB_RVDC32(map->surf);
+ pipe->cntr = PSB_RVDC32(map->cntr);
+ pipe->status = PSB_RVDC32(map->status);
/*save palette (gamma) */
for (i = 0; i < 256; i++)
- palette_val[i] = PSB_RVDC32(palette_reg + (i << 2));
+ pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2));
- if (pipe == 1) {
+ if (pipenum == 1) {
regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
@@ -349,7 +237,7 @@ static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
*
* Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
*/
-static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
+static int mdfld_restore_display_registers(struct drm_device *dev, int pipenum)
{
/* To get panel out of ULPS mode. */
u32 temp = 0;
@@ -357,142 +245,30 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
struct drm_psb_private *dev_priv = dev->dev_private;
struct mdfld_dsi_config *dsi_config = NULL;
struct medfield_state *regs = &dev_priv->regs.mdfld;
- u32 i = 0;
- u32 dpll = 0;
+ struct psb_pipe *pipe = &dev_priv->regs.pipe[pipenum];
+ const struct psb_offset *map = &dev_priv->regmap[pipenum];
+ u32 i;
+ u32 dpll;
u32 timeout = 0;
- /* regester */
- u32 dpll_reg = MRST_DPLL_A;
- u32 fp_reg = MRST_FPA0;
- u32 pipeconf_reg = PIPEACONF;
- u32 htot_reg = HTOTAL_A;
- u32 hblank_reg = HBLANK_A;
- u32 hsync_reg = HSYNC_A;
- u32 vtot_reg = VTOTAL_A;
- u32 vblank_reg = VBLANK_A;
- u32 vsync_reg = VSYNC_A;
- u32 pipesrc_reg = PIPEASRC;
- u32 dspstride_reg = DSPASTRIDE;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dsptileoff_reg = DSPATILEOFF;
- u32 dspsize_reg = DSPASIZE;
- u32 dsppos_reg = DSPAPOS;
- u32 dspsurf_reg = DSPASURF;
- u32 dspstatus_reg = PIPEASTAT;
+ /* register */
u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 palette_reg = PALETTE_A;
/* values */
- u32 dpll_val = regs->saveDPLL_A & ~DPLL_VCO_ENABLE;
- u32 fp_val = regs->saveFPA0;
- u32 pipeconf_val = regs->savePIPEACONF;
- u32 htot_val = regs->saveHTOTAL_A;
- u32 hblank_val = regs->saveHBLANK_A;
- u32 hsync_val = regs->saveHSYNC_A;
- u32 vtot_val = regs->saveVTOTAL_A;
- u32 vblank_val = regs->saveVBLANK_A;
- u32 vsync_val = regs->saveVSYNC_A;
- u32 pipesrc_val = regs->savePIPEASRC;
- u32 dspstride_val = regs->saveDSPASTRIDE;
- u32 dsplinoff_val = regs->saveDSPALINOFF;
- u32 dsptileoff_val = regs->saveDSPATILEOFF;
- u32 dspsize_val = regs->saveDSPASIZE;
- u32 dsppos_val = regs->saveDSPAPOS;
- u32 dspsurf_val = regs->saveDSPASURF;
- u32 dspstatus_val = regs->saveDSPASTATUS;
+ u32 dpll_val = pipe->dpll;
u32 mipi_val = regs->saveMIPI;
- u32 dspcntr_val = regs->saveDSPACNTR;
- u32 *palette_val = regs->save_palette_a;
- switch (pipe) {
+ switch (pipenum) {
case 0:
+ dpll_val &= ~DPLL_VCO_ENABLE;
dsi_config = dev_priv->dsi_configs[0];
break;
case 1:
- /* regester */
- dpll_reg = MDFLD_DPLL_B;
- fp_reg = MDFLD_DPLL_DIV0;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipesrc_reg = PIPEBSRC;
- dspstride_reg = DSPBSTRIDE;
- dsplinoff_reg = DSPBLINOFF;
- dsptileoff_reg = DSPBTILEOFF;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- dspsurf_reg = DSPBSURF;
- dspcntr_reg = DSPBCNTR;
- dspstatus_reg = PIPEBSTAT;
- palette_reg = PALETTE_B;
-
- /* values */
- dpll_val = regs->saveDPLL_B & ~DPLL_VCO_ENABLE;
- fp_val = regs->saveFPB0;
- pipeconf_val = regs->savePIPEBCONF;
- htot_val = regs->saveHTOTAL_B;
- hblank_val = regs->saveHBLANK_B;
- hsync_val = regs->saveHSYNC_B;
- vtot_val = regs->saveVTOTAL_B;
- vblank_val = regs->saveVBLANK_B;
- vsync_val = regs->saveVSYNC_B;
- pipesrc_val = regs->savePIPEBSRC;
- dspstride_val = regs->saveDSPBSTRIDE;
- dsplinoff_val = regs->saveDSPBLINOFF;
- dsptileoff_val = regs->saveDSPBTILEOFF;
- dspsize_val = regs->saveDSPBSIZE;
- dsppos_val = regs->saveDSPBPOS;
- dspsurf_val = regs->saveDSPBSURF;
- dspcntr_val = regs->saveDSPBCNTR;
- dspstatus_val = regs->saveDSPBSTATUS;
- palette_val = regs->save_palette_b;
+ dpll_val &= ~DPLL_VCO_ENABLE;
break;
case 2:
- /* regester */
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- pipesrc_reg = PIPECSRC;
- dspstride_reg = DSPCSTRIDE;
- dsplinoff_reg = DSPCLINOFF;
- dsptileoff_reg = DSPCTILEOFF;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- dspsurf_reg = DSPCSURF;
mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- dspstatus_reg = PIPECSTAT;
- palette_reg = PALETTE_C;
-
- /* values */
- pipeconf_val = regs->savePIPECCONF;
- htot_val = regs->saveHTOTAL_C;
- hblank_val = regs->saveHBLANK_C;
- hsync_val = regs->saveHSYNC_C;
- vtot_val = regs->saveVTOTAL_C;
- vblank_val = regs->saveVBLANK_C;
- vsync_val = regs->saveVSYNC_C;
- pipesrc_val = regs->savePIPECSRC;
- dspstride_val = regs->saveDSPCSTRIDE;
- dsplinoff_val = regs->saveDSPCLINOFF;
- dsptileoff_val = regs->saveDSPCTILEOFF;
- dspsize_val = regs->saveDSPCSIZE;
- dsppos_val = regs->saveDSPCPOS;
- dspsurf_val = regs->saveDSPCSURF;
mipi_val = regs->saveMIPI_C;
- dspcntr_val = regs->saveDSPCCNTR;
- dspstatus_val = regs->saveDSPCSTATUS;
- palette_val = regs->save_palette_c;
-
dsi_config = dev_priv->dsi_configs[1];
break;
default:
@@ -503,14 +279,14 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*make sure VGA plane is off. it initializes to on after reset!*/
PSB_WVDC32(0x80000000, VGACNTRL);
- if (pipe == 1) {
- PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
- PSB_RVDC32(dpll_reg);
+ if (pipenum == 1) {
+ PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll);
+ PSB_RVDC32(map->dpll);
- PSB_WVDC32(fp_val, fp_reg);
+ PSB_WVDC32(pipe->fp0, map->fp0);
} else {
- dpll = PSB_RVDC32(dpll_reg);
+ dpll = PSB_RVDC32(map->dpll);
if (!(dpll & DPLL_VCO_ENABLE)) {
@@ -518,23 +294,23 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
before enable the VCO */
if (dpll & MDFLD_PWR_GATE_EN) {
dpll &= ~MDFLD_PWR_GATE_EN;
- PSB_WVDC32(dpll, dpll_reg);
+ PSB_WVDC32(dpll, map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
- PSB_WVDC32(fp_val, fp_reg);
- PSB_WVDC32(dpll_val, dpll_reg);
+ PSB_WVDC32(pipe->fp0, map->fp0);
+ PSB_WVDC32(dpll_val, map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
dpll_val |= DPLL_VCO_ENABLE;
- PSB_WVDC32(dpll_val, dpll_reg);
- PSB_RVDC32(dpll_reg);
+ PSB_WVDC32(dpll_val, map->dpll);
+ PSB_RVDC32(map->dpll);
/* wait for DSI PLL to lock */
while (timeout < 20000 &&
- !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
@@ -547,28 +323,28 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
}
}
/* Restore mode */
- PSB_WVDC32(htot_val, htot_reg);
- PSB_WVDC32(hblank_val, hblank_reg);
- PSB_WVDC32(hsync_val, hsync_reg);
- PSB_WVDC32(vtot_val, vtot_reg);
- PSB_WVDC32(vblank_val, vblank_reg);
- PSB_WVDC32(vsync_val, vsync_reg);
- PSB_WVDC32(pipesrc_val, pipesrc_reg);
- PSB_WVDC32(dspstatus_val, dspstatus_reg);
+ PSB_WVDC32(pipe->htotal, map->htotal);
+ PSB_WVDC32(pipe->hblank, map->hblank);
+ PSB_WVDC32(pipe->hsync, map->hsync);
+ PSB_WVDC32(pipe->vtotal, map->vtotal);
+ PSB_WVDC32(pipe->vblank, map->vblank);
+ PSB_WVDC32(pipe->vsync, map->vsync);
+ PSB_WVDC32(pipe->src, map->src);
+ PSB_WVDC32(pipe->status, map->status);
/*set up the plane*/
- PSB_WVDC32(dspstride_val, dspstride_reg);
- PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
- PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
- PSB_WVDC32(dspsize_val, dspsize_reg);
- PSB_WVDC32(dsppos_val, dsppos_reg);
- PSB_WVDC32(dspsurf_val, dspsurf_reg);
-
- if (pipe == 1) {
+ PSB_WVDC32(pipe->stride, map->stride);
+ PSB_WVDC32(pipe->linoff, map->linoff);
+ PSB_WVDC32(pipe->tileoff, map->tileoff);
+ PSB_WVDC32(pipe->size, map->size);
+ PSB_WVDC32(pipe->pos, map->pos);
+ PSB_WVDC32(pipe->surf, map->surf);
+
+ if (pipenum == 1) {
/* restore palette (gamma) */
/*DRM_UDELAY(50000); */
for (i = 0; i < 256; i++)
- PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+ PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
@@ -578,7 +354,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*TODO: resume pipe*/
/*enable the plane*/
- PSB_WVDC32(dspcntr_val & ~DISPLAY_PLANE_ENABLE, dspcntr_reg);
+ PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr);
return 0;
}
@@ -588,7 +364,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
/*setup MIPI adapter + MIPI IP registers*/
if (dsi_config)
- mdfld_dsi_controller_init(dsi_config, pipe);
+ mdfld_dsi_controller_init(dsi_config, pipenum);
if (in_atomic() || in_interrupt())
mdelay(20);
@@ -596,7 +372,7 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
msleep(20);
/*enable the plane*/
- PSB_WVDC32(dspcntr_val, dspcntr_reg);
+ PSB_WVDC32(pipe->cntr, map->cntr);
if (in_atomic() || in_interrupt())
mdelay(20);
@@ -625,12 +401,12 @@ static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
mdelay(1);
/*enable the pipe*/
- PSB_WVDC32(pipeconf_val, pipeconf_reg);
+ PSB_WVDC32(pipe->conf, map->conf);
/* restore palette (gamma) */
/*DRM_UDELAY(50000); */
for (i = 0; i < 256; i++)
- PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
+ PSB_WVDC32(pipe->palette[i], map->palette + (i << 2));
return 0;
}
@@ -667,6 +443,87 @@ static int mdfld_power_up(struct drm_device *dev)
return 0;
}
+/* Medfield */
+static const struct psb_offset mdfld_regmap[3] = {
+ {
+ .fp0 = MRST_FPA0,
+ .fp1 = MRST_FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .surf = DSPASURF,
+ .addr = MRST_DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = MDFLD_DPLL_DIV0,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = MDFLD_DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .surf = DSPBSURF,
+ .addr = MRST_DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ },
+ {
+ .fp0 = MRST_FPA0, /* This is what the old code did ?? */
+ .cntr = DSPCCNTR,
+ .conf = PIPECCONF,
+ .src = PIPECSRC,
+ /* No DPLL_C */
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_C,
+ .hblank = HBLANK_C,
+ .hsync = HSYNC_C,
+ .vtotal = VTOTAL_C,
+ .vblank = VBLANK_C,
+ .vsync = VSYNC_C,
+ .stride = DSPCSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPCPOS,
+ .surf = DSPCSURF,
+ .addr = MDFLD_DSPCBASE,
+ .status = PIPECSTAT,
+ .linoff = DSPCLINOFF,
+ .tileoff = DSPCTILEOFF,
+ .palette = PALETTE_C,
+ },
+};
+
+static int mdfld_chip_setup(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->regmap = mdfld_regmap;
+ return mid_chip_setup(dev);
+}
+
const struct psb_ops mdfld_chip_ops = {
.name = "mdfld",
.accel_2d = 0,
@@ -676,7 +533,7 @@ const struct psb_ops mdfld_chip_ops = {
.hdmi_mask = (1 << 1),
.sgx_offset = MRST_SGX_OFFSET,
- .chip_setup = mid_chip_setup,
+ .chip_setup = mdfld_chip_setup,
.crtc_helper = &mdfld_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index baa0e14165e0..489ffd2c66e5 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -605,6 +605,8 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
struct mdfld_dsi_config *dsi_config =
mdfld_dsi_get_config(dsi_connector);
struct drm_device *dev = dsi_config->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 mipi_val = 0;
if (!dsi_connector) {
@@ -632,21 +634,13 @@ int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
/*init regs*/
- if (pipe == 0) {
- pkg_sender->dpll_reg = MRST_DPLL_A;
- pkg_sender->dspcntr_reg = DSPACNTR;
- pkg_sender->pipeconf_reg = PIPEACONF;
- pkg_sender->dsplinoff_reg = DSPALINOFF;
- pkg_sender->dspsurf_reg = DSPASURF;
- pkg_sender->pipestat_reg = PIPEASTAT;
- } else if (pipe == 2) {
- pkg_sender->dpll_reg = MRST_DPLL_A;
- pkg_sender->dspcntr_reg = DSPCCNTR;
- pkg_sender->pipeconf_reg = PIPECCONF;
- pkg_sender->dsplinoff_reg = DSPCLINOFF;
- pkg_sender->dspsurf_reg = DSPCSURF;
- pkg_sender->pipestat_reg = PIPECSTAT;
- }
+ /* FIXME: should just copy the regmap ptr ? */
+ pkg_sender->dpll_reg = map->dpll;
+ pkg_sender->dspcntr_reg = map->cntr;
+ pkg_sender->pipeconf_reg = map->conf;
+ pkg_sender->dsplinoff_reg = map->linoff;
+ pkg_sender->dspsurf_reg = map->surf;
+ pkg_sender->pipestat_reg = map->status;
pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index a35a2921bdf7..3f3cd619c79f 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -50,17 +50,14 @@ struct mrst_clock_t {
void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int count, temp;
- u32 pipeconf_reg = PIPEACONF;
switch (pipe) {
case 0:
- break;
case 1:
- pipeconf_reg = PIPEBCONF;
- break;
case 2:
- pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
@@ -73,7 +70,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
/* Wait for for the pipe disable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_PIPE_STATE) == 0)
break;
}
@@ -81,17 +78,14 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int count, temp;
- u32 pipeconf_reg = PIPEACONF;
switch (pipe) {
case 0:
- break;
case 1:
- pipeconf_reg = PIPEBCONF;
- break;
case 2:
- pipeconf_reg = PIPECCONF;
break;
default:
DRM_ERROR("Illegal Pipe Number.\n");
@@ -104,7 +98,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
/* Wait for for the pipe enable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_PIPE_STATE) == 1)
break;
}
@@ -189,15 +183,12 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dsplinoff = DSPALINOFF;
- int dspsurf = DSPASURF;
- int dspstride = DSPASTRIDE;
- int dspcntr_reg = DSPACNTR;
u32 dspcntr;
int ret;
@@ -215,23 +206,7 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
- switch (pipe) {
- case 0:
- dsplinoff = DSPALINOFF;
- break;
- case 1:
- dsplinoff = DSPBLINOFF;
- dspsurf = DSPBSURF;
- dspstride = DSPBSTRIDE;
- dspcntr_reg = DSPBCNTR;
- break;
- case 2:
- dsplinoff = DSPCLINOFF;
- dspsurf = DSPCSURF;
- dspstride = DSPCSTRIDE;
- dspcntr_reg = DSPCCNTR;
- break;
- default:
+ if (pipe > 2) {
DRM_ERROR("Illegal Pipe Number.\n");
return -EINVAL;
}
@@ -242,8 +217,8 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -261,14 +236,14 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
start, offset, x, y);
- REG_WRITE(dsplinoff, offset);
- REG_READ(dsplinoff);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->linoff, offset);
+ REG_READ(map->linoff);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
gma_power_end(dev);
@@ -281,78 +256,56 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
*/
void mdfld_disable_crtc(struct drm_device *dev, int pipe)
{
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int dspbase_reg = MRST_DSPABASE;
- int pipeconf_reg = PIPEACONF;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
dev_dbg(dev->dev, "pipe = %d\n", pipe);
- switch (pipe) {
- case 0:
- break;
- case 1:
- dpll_reg = MDFLD_DPLL_B;
- dspcntr_reg = DSPBCNTR;
- dspbase_reg = DSPBSURF;
- pipeconf_reg = PIPEBCONF;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- pipeconf_reg = PIPECCONF;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
-
if (pipe != 1)
mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* FIXME_JLIU7 MDFLD_PO revisit */
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
temp &= ~PIPEACONF_ENABLE;
temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(pipeconf_reg, temp);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp);
+ REG_READ(map->conf);
/* Wait for for the pipe disable to take effect. */
mdfldWaitForPipeDisable(dev, pipe);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if (temp & DPLL_VCO_ENABLE) {
if ((pipe != 1 &&
!((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
& PIPEACONF_ENABLE)) || pipe == 1) {
temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to turn off. */
/* FIXME_MDFLD PO may need more delay */
udelay(500);
if (!(temp & MDFLD_PWR_GATE_EN)) {
/* gating power of DPLL */
- REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+ REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(5000);
}
@@ -373,41 +326,15 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int dspbase_reg = MRST_DSPABASE;
- int pipeconf_reg = PIPEACONF;
- u32 pipestat_reg = PIPEASTAT;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 pipeconf = dev_priv->pipeconf[pipe];
u32 temp;
int timeout = 0;
dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
-/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
-/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- dpll_reg = DPLL_B;
- dspcntr_reg = DSPBCNTR;
- dspbase_reg = MRST_DSPBBASE;
- pipeconf_reg = PIPEBCONF;
- dpll_reg = MDFLD_DPLL_B;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- pipeconf_reg = PIPECCONF;
- pipestat_reg = PIPECSTAT;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return;
- }
+ /* Note: Old code uses pipe a stat for pipe b but that appears
+ to be a bug */
if (!gma_power_begin(dev, true))
return;
@@ -420,25 +347,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
/* When ungating power of DPLL, needs to wait 0.5us
before enable the VCO */
if (temp & MDFLD_PWR_GATE_EN) {
temp &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, temp);
+ REG_WRITE(map->dpll, temp);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/**
* wait for DSI PLL to lock
@@ -446,25 +373,25 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
* since both MIPI pipes share the same PLL.
*/
while ((pipe != 2) && (timeout < 20000) &&
- !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
}
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(pipeconf_reg, pipeconf);
+ REG_WRITE(map->conf, pipeconf);
/* Wait for for the pipe enable to take effect. */
mdfldWaitForPipeEnable(dev, pipe);
@@ -473,39 +400,39 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
/*workaround for sighting 3741701 Random X blank display*/
/*perform w/a in video mode only on pipe A or C*/
if (pipe == 0 || pipe == 2) {
- REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
+ REG_WRITE(map->status, REG_READ(map->status));
msleep(100);
- if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg))
+ if (PIPE_VBLANK_STATUS & REG_READ(map->status))
dev_dbg(dev->dev, "OK");
else {
dev_dbg(dev->dev, "STUCK!!!!");
/*shutdown controller*/
- temp = REG_READ(dspcntr_reg);
- REG_WRITE(dspcntr_reg,
+ temp = REG_READ(map->cntr);
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
/*mdfld_dsi_dpi_shut_down(dev, pipe);*/
REG_WRITE(0xb048, 1);
msleep(100);
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
temp &= ~PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, temp);
+ REG_WRITE(map->conf, temp);
msleep(100); /*wait for pipe disable*/
REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
msleep(100);
REG_WRITE(0xb004, REG_READ(0xb004));
/* try to bring the controller back up again*/
REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
- temp = REG_READ(dspcntr_reg);
- REG_WRITE(dspcntr_reg,
+ temp = REG_READ(map->cntr);
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
/*mdfld_dsi_dpi_turn_on(dev, pipe);*/
REG_WRITE(0xb048, 2);
msleep(100);
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
temp |= PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, temp);
+ REG_WRITE(map->conf, temp);
}
}
@@ -529,35 +456,35 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
temp &= ~PIPEACONF_ENABLE;
temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(pipeconf_reg, temp);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp);
+ REG_READ(map->conf);
/* Wait for for the pipe disable to take effect. */
mdfldWaitForPipeDisable(dev, pipe);
}
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if (temp & DPLL_VCO_ENABLE) {
if ((pipe != 1 && !((REG_READ(PIPEACONF)
| REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
|| pipe == 1) {
temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to turn off. */
/* FIXME_MDFLD PO may need more delay */
udelay(500);
@@ -764,21 +691,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = MRST_FPA0;
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int pipeconf_reg = PIPEACONF;
- int htot_reg = HTOTAL_A;
- int hblank_reg = HBLANK_A;
- int hsync_reg = HSYNC_A;
- int vtot_reg = VTOTAL_A;
- int vblank_reg = VBLANK_A;
- int vsync_reg = VSYNC_A;
- int dspsize_reg = DSPASIZE;
- int dsppos_reg = DSPAPOS;
- int pipesrc_reg = PIPEASRC;
- u32 *pipeconf = &dev_priv->pipeconf[pipe];
- u32 *dspcntr = &dev_priv->dspcntr[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
clk_tmp = 0;
@@ -806,45 +719,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
#endif
- switch (pipe) {
- case 0:
- break;
- case 1:
- fp_reg = FPB0;
- dpll_reg = DPLL_B;
- dspcntr_reg = DSPBCNTR;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- pipesrc_reg = PIPEBSRC;
- fp_reg = MDFLD_DPLL_DIV0;
- dpll_reg = MDFLD_DPLL_B;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- pipesrc_reg = PIPECSRC;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number.\n");
- return 0;
- }
-
ret = check_fb(crtc->fb);
if (ret)
return ret;
@@ -929,21 +803,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
* contained within the displayable area of the screen image
* (frame buffer).
*/
- REG_WRITE(dspsize_reg, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
+ REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
| (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
/* Set the CRTC with encoder mode. */
- REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
+ REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16)
| (mode->crtc_vdisplay - 1));
} else {
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->crtc_vdisplay - 1) << 16) |
(mode->crtc_hdisplay - 1));
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->src,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
}
- REG_WRITE(dsppos_reg, 0);
+ REG_WRITE(map->pos, 0);
if (psb_intel_encoder)
drm_connector_property_get_value(connector,
@@ -961,34 +835,34 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
- REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start -
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start -
offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start -
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start -
offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start -
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start -
offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start -
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start -
offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
} else {
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
}
@@ -1000,12 +874,12 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
/* setup pipeconf */
- *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
+ dev_priv->pipeconf[pipe] = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
/* Set up the display plane register */
- *dspcntr = REG_READ(dspcntr_reg);
- *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
- *dspcntr |= DISPLAY_PLANE_ENABLE;
+ dev_priv->dspcntr[pipe] = REG_READ(map->cntr);
+ dev_priv->dspcntr[pipe] |= pipe << DISPPLANE_SEL_PIPE_POS;
+ dev_priv->dspcntr[pipe] |= DISPLAY_PLANE_ENABLE;
if (is_mipi2)
goto mrst_crtc_mode_set_exit;
@@ -1070,21 +944,21 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
clock.p1, m_conv);
}
- dpll = REG_READ(dpll_reg);
+ dpll = REG_READ(map->dpll);
if (dpll & DPLL_VCO_ENABLE) {
dpll &= ~DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
/* reset M1, N1 & P1 */
- REG_WRITE(fp_reg, 0);
+ REG_WRITE(map->fp0, 0);
dpll &= ~MDFLD_P1_MASK;
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
@@ -1093,7 +967,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
* enable the VCO */
if (dpll & MDFLD_PWR_GATE_EN) {
dpll &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
}
@@ -1134,18 +1008,18 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
fp = 0x000000c1;
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
/* FIXME_MDFLD PO - change 500 to 1 after PO */
udelay(500);
dpll |= DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* wait for DSI PLL to lock */
while (timeout < 20000 &&
- !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+ !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) {
udelay(150);
timeout++;
}
@@ -1155,11 +1029,11 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
- REG_WRITE(pipeconf_reg, *pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, dev_priv->pipeconf[pipe]);
+ REG_READ(map->conf);
/* Wait for for the pipe enable to take effect. */
- REG_WRITE(dspcntr_reg, *dspcntr);
+ REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
psb_intel_wait_for_vblank(dev);
mrst_crtc_mode_set_exit:
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index a39b0d0d680f..f821c835ca90 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -162,12 +162,10 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
if (!gma_power_begin(dev, true))
@@ -181,32 +179,32 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
psb_intel_crtc_load_lut(crtc);
@@ -223,28 +221,28 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Disable the VGA plane that we never use */
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for for the pipe disable to take effect. */
psb_intel_wait_for_vblank(dev);
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
@@ -292,17 +290,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
struct oaktrail_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -350,7 +338,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (oaktrail_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->src,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
@@ -369,34 +357,34 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
offsetY = (adjusted_mode->crtc_vdisplay -
mode->crtc_vdisplay) / 2;
- REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg,
+ REG_WRITE(map->hblank,
(adjusted_mode->crtc_hblank_start - offsetX - 1) |
((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(hsync_reg,
+ REG_WRITE(map->hsync,
(adjusted_mode->crtc_hsync_start - offsetX - 1) |
((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(vblank_reg,
+ REG_WRITE(map->vblank,
(adjusted_mode->crtc_vblank_start - offsetY - 1) |
((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(vsync_reg,
+ REG_WRITE(map->vsync,
(adjusted_mode->crtc_vsync_start - offsetY - 1) |
((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
} else {
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
}
@@ -408,10 +396,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
}
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr |= DISPPLANE_GAMMA_ENABLE;
if (pipe == 0)
@@ -467,30 +455,30 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
mrstPrintPll("chosen", &clock);
if (dpll & DPLL_VCO_ENABLE) {
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Check the DPLLA lock bit PIPEACONF[29] */
udelay(150);
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
psb_intel_wait_for_vblank(dev);
oaktrail_crtc_mode_set_exit:
@@ -509,15 +497,13 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -533,9 +519,9 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
start = psbfb->gtt->offset;
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -557,12 +543,12 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
ret = -EINVAL;
goto pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
pipe_set_base_exit:
gma_power_end(dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 0bb74cc3ecf8..7a8ff8e2dfc0 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -187,6 +187,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_save_area *regs = &dev_priv->regs;
+ struct psb_pipe *p = &regs->pipe[0];
int i;
u32 pp_stat;
@@ -201,24 +202,24 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
/* Pipe & plane A info */
- regs->psb.savePIPEACONF = PSB_RVDC32(PIPEACONF);
- regs->psb.savePIPEASRC = PSB_RVDC32(PIPEASRC);
- regs->psb.saveFPA0 = PSB_RVDC32(MRST_FPA0);
- regs->psb.saveFPA1 = PSB_RVDC32(MRST_FPA1);
- regs->psb.saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
- regs->psb.saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
- regs->psb.saveHBLANK_A = PSB_RVDC32(HBLANK_A);
- regs->psb.saveHSYNC_A = PSB_RVDC32(HSYNC_A);
- regs->psb.saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
- regs->psb.saveVBLANK_A = PSB_RVDC32(VBLANK_A);
- regs->psb.saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+ p->conf = PSB_RVDC32(PIPEACONF);
+ p->src = PSB_RVDC32(PIPEASRC);
+ p->fp0 = PSB_RVDC32(MRST_FPA0);
+ p->fp1 = PSB_RVDC32(MRST_FPA1);
+ p->dpll = PSB_RVDC32(MRST_DPLL_A);
+ p->htotal = PSB_RVDC32(HTOTAL_A);
+ p->hblank = PSB_RVDC32(HBLANK_A);
+ p->hsync = PSB_RVDC32(HSYNC_A);
+ p->vtotal = PSB_RVDC32(VTOTAL_A);
+ p->vblank = PSB_RVDC32(VBLANK_A);
+ p->vsync = PSB_RVDC32(VSYNC_A);
regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
- regs->psb.saveDSPACNTR = PSB_RVDC32(DSPACNTR);
- regs->psb.saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
- regs->psb.saveDSPAADDR = PSB_RVDC32(DSPABASE);
- regs->psb.saveDSPASURF = PSB_RVDC32(DSPASURF);
- regs->psb.saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
- regs->psb.saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+ p->cntr = PSB_RVDC32(DSPACNTR);
+ p->stride = PSB_RVDC32(DSPASTRIDE);
+ p->addr = PSB_RVDC32(DSPABASE);
+ p->surf = PSB_RVDC32(DSPASURF);
+ p->linoff = PSB_RVDC32(DSPALINOFF);
+ p->tileoff = PSB_RVDC32(DSPATILEOFF);
/* Save cursor regs */
regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
@@ -227,7 +228,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
/* Save palette (gamma) */
for (i = 0; i < 256; i++)
- regs->psb.save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+ p->palette[i] = PSB_RVDC32(PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_save(dev);
@@ -300,6 +301,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_save_area *regs = &dev_priv->regs;
+ struct psb_pipe *p = &regs->pipe[0];
u32 pp_stat;
int i;
@@ -317,21 +319,21 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
PSB_WVDC32(0x80000000, VGACNTRL);
/* set the plls */
- PSB_WVDC32(regs->psb.saveFPA0, MRST_FPA0);
- PSB_WVDC32(regs->psb.saveFPA1, MRST_FPA1);
+ PSB_WVDC32(p->fp0, MRST_FPA0);
+ PSB_WVDC32(p->fp1, MRST_FPA1);
/* Actually enable it */
- PSB_WVDC32(regs->psb.saveDPLL_A, MRST_DPLL_A);
+ PSB_WVDC32(p->dpll, MRST_DPLL_A);
DRM_UDELAY(150);
/* Restore mode */
- PSB_WVDC32(regs->psb.saveHTOTAL_A, HTOTAL_A);
- PSB_WVDC32(regs->psb.saveHBLANK_A, HBLANK_A);
- PSB_WVDC32(regs->psb.saveHSYNC_A, HSYNC_A);
- PSB_WVDC32(regs->psb.saveVTOTAL_A, VTOTAL_A);
- PSB_WVDC32(regs->psb.saveVBLANK_A, VBLANK_A);
- PSB_WVDC32(regs->psb.saveVSYNC_A, VSYNC_A);
- PSB_WVDC32(regs->psb.savePIPEASRC, PIPEASRC);
+ PSB_WVDC32(p->htotal, HTOTAL_A);
+ PSB_WVDC32(p->hblank, HBLANK_A);
+ PSB_WVDC32(p->hsync, HSYNC_A);
+ PSB_WVDC32(p->vtotal, VTOTAL_A);
+ PSB_WVDC32(p->vblank, VBLANK_A);
+ PSB_WVDC32(p->vsync, VSYNC_A);
+ PSB_WVDC32(p->src, PIPEASRC);
PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
/* Restore performance mode*/
@@ -339,16 +341,16 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
/* Enable the pipe*/
if (dev_priv->iLVDS_enable)
- PSB_WVDC32(regs->psb.savePIPEACONF, PIPEACONF);
+ PSB_WVDC32(p->conf, PIPEACONF);
/* Set up the plane*/
- PSB_WVDC32(regs->psb.saveDSPALINOFF, DSPALINOFF);
- PSB_WVDC32(regs->psb.saveDSPASTRIDE, DSPASTRIDE);
- PSB_WVDC32(regs->psb.saveDSPATILEOFF, DSPATILEOFF);
+ PSB_WVDC32(p->linoff, DSPALINOFF);
+ PSB_WVDC32(p->stride, DSPASTRIDE);
+ PSB_WVDC32(p->tileoff, DSPATILEOFF);
/* Enable the plane */
- PSB_WVDC32(regs->psb.saveDSPACNTR, DSPACNTR);
- PSB_WVDC32(regs->psb.saveDSPASURF, DSPASURF);
+ PSB_WVDC32(p->cntr, DSPACNTR);
+ PSB_WVDC32(p->surf, DSPASURF);
/* Enable Cursor A */
PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
@@ -357,7 +359,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
/* Restore palette (gamma) */
for (i = 0; i < 256; i++)
- PSB_WVDC32(regs->psb.save_palette_a[i], PALETTE_A + (i << 2));
+ PSB_WVDC32(p->palette[i], PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
oaktrail_hdmi_restore(dev);
@@ -454,11 +456,65 @@ static int oaktrail_power_up(struct drm_device *dev)
return 0;
}
+/* Oaktrail */
+static const struct psb_offset oaktrail_regmap[2] = {
+ {
+ .fp0 = MRST_FPA0,
+ .fp1 = MRST_FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = MRST_DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .surf = DSPASURF,
+ .addr = MRST_DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ },
+};
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
int ret;
+
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+
+ dev_priv->regmap = oaktrail_regmap;
ret = mid_chip_setup(dev);
if (ret < 0)
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 25956601191f..c10899c953b9 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -434,6 +434,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
+ struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
int i;
/* dpll */
@@ -444,14 +445,14 @@ void oaktrail_hdmi_save(struct drm_device *dev)
hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
/* pipe B */
- regs->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
- regs->savePIPEBSRC = PSB_RVDC32(PIPEBSRC);
- regs->saveHTOTAL_B = PSB_RVDC32(HTOTAL_B);
- regs->saveHBLANK_B = PSB_RVDC32(HBLANK_B);
- regs->saveHSYNC_B = PSB_RVDC32(HSYNC_B);
- regs->saveVTOTAL_B = PSB_RVDC32(VTOTAL_B);
- regs->saveVBLANK_B = PSB_RVDC32(VBLANK_B);
- regs->saveVSYNC_B = PSB_RVDC32(VSYNC_B);
+ pipeb->conf = PSB_RVDC32(PIPEBCONF);
+ pipeb->src = PSB_RVDC32(PIPEBSRC);
+ pipeb->htotal = PSB_RVDC32(HTOTAL_B);
+ pipeb->hblank = PSB_RVDC32(HBLANK_B);
+ pipeb->hsync = PSB_RVDC32(HSYNC_B);
+ pipeb->vtotal = PSB_RVDC32(VTOTAL_B);
+ pipeb->vblank = PSB_RVDC32(VBLANK_B);
+ pipeb->vsync = PSB_RVDC32(VSYNC_B);
hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
@@ -463,12 +464,12 @@ void oaktrail_hdmi_save(struct drm_device *dev)
hdmi_dev->savePCH_VSYNC_B = PSB_RVDC32(PCH_VSYNC_B);
/* plane */
- regs->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
- regs->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
- regs->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
- regs->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
- regs->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
- regs->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+ pipeb->cntr = PSB_RVDC32(DSPBCNTR);
+ pipeb->stride = PSB_RVDC32(DSPBSTRIDE);
+ pipeb->addr = PSB_RVDC32(DSPBBASE);
+ pipeb->surf = PSB_RVDC32(DSPBSURF);
+ pipeb->linoff = PSB_RVDC32(DSPBLINOFF);
+ pipeb->tileoff = PSB_RVDC32(DSPBTILEOFF);
/* cursor B */
regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
@@ -477,7 +478,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
/* save palette */
for (i = 0; i < 256; i++)
- regs->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+ pipeb->palette[i] = PSB_RVDC32(PALETTE_B + (i << 2));
}
/* restore HDMI register state */
@@ -486,6 +487,7 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
+ struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
int i;
/* dpll */
@@ -497,13 +499,13 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
DRM_UDELAY(150);
/* pipe */
- PSB_WVDC32(regs->savePIPEBSRC, PIPEBSRC);
- PSB_WVDC32(regs->saveHTOTAL_B, HTOTAL_B);
- PSB_WVDC32(regs->saveHBLANK_B, HBLANK_B);
- PSB_WVDC32(regs->saveHSYNC_B, HSYNC_B);
- PSB_WVDC32(regs->saveVTOTAL_B, VTOTAL_B);
- PSB_WVDC32(regs->saveVBLANK_B, VBLANK_B);
- PSB_WVDC32(regs->saveVSYNC_B, VSYNC_B);
+ PSB_WVDC32(pipeb->src, PIPEBSRC);
+ PSB_WVDC32(pipeb->htotal, HTOTAL_B);
+ PSB_WVDC32(pipeb->hblank, HBLANK_B);
+ PSB_WVDC32(pipeb->hsync, HSYNC_B);
+ PSB_WVDC32(pipeb->vtotal, VTOTAL_B);
+ PSB_WVDC32(pipeb->vblank, VBLANK_B);
+ PSB_WVDC32(pipeb->vsync, VSYNC_B);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
@@ -513,15 +515,15 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B, PCH_VSYNC_B);
- PSB_WVDC32(regs->savePIPEBCONF, PIPEBCONF);
+ PSB_WVDC32(pipeb->conf, PIPEBCONF);
PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
/* plane */
- PSB_WVDC32(regs->saveDSPBLINOFF, DSPBLINOFF);
- PSB_WVDC32(regs->saveDSPBSTRIDE, DSPBSTRIDE);
- PSB_WVDC32(regs->saveDSPBTILEOFF, DSPBTILEOFF);
- PSB_WVDC32(regs->saveDSPBCNTR, DSPBCNTR);
- PSB_WVDC32(regs->saveDSPBSURF, DSPBSURF);
+ PSB_WVDC32(pipeb->linoff, DSPBLINOFF);
+ PSB_WVDC32(pipeb->stride, DSPBSTRIDE);
+ PSB_WVDC32(pipeb->tileoff, DSPBTILEOFF);
+ PSB_WVDC32(pipeb->cntr, DSPBCNTR);
+ PSB_WVDC32(pipeb->surf, DSPBSURF);
/* cursor B */
PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
@@ -530,5 +532,5 @@ void oaktrail_hdmi_restore(struct drm_device *dev)
/* restore palette */
for (i = 0; i < 256; i++)
- PSB_WVDC32(regs->save_palette_b[i], PALETTE_B + (i << 2));
+ PSB_WVDC32(pipeb->palette[i], PALETTE_B + (i << 2));
}
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index 05661bfeac75..4f186eca3a30 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -21,10 +21,8 @@
* DEALINGS IN THE SOFTWARE.
*
*/
-#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#include <linux/acpi_io.h>
-#endif
#include "psb_drv.h"
#include "psb_intel_reg.h"
@@ -151,7 +149,6 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
struct drm_psb_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
struct backlight_device *bd = dev_priv->backlight_device;
- u32 max;
DRM_DEBUG_DRIVER("asle set backlight %x\n", bclp);
@@ -165,11 +162,12 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
if (bclp > 255)
return ASLE_BACKLIGHT_FAILED;
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- max = bd->props.max_brightness;
- bd->props.brightness = bclp * max / 255;
- backlight_update_status(bd);
-#endif
+ if (config_enabled(CONFIG_BACKLIGHT_CLASS_DEVICE)) {
+ int max = bd->props.max_brightness;
+ bd->props.brightness = bclp * max / 255;
+ backlight_update_status(bd);
+ }
+
asle->cblv = (bclp * 0x64) / 0xff | ASLE_CBLV_VALID;
return 0;
@@ -311,11 +309,7 @@ int psb_intel_opregion_setup(struct drm_device *dev)
return -ENOTSUPP;
}
DRM_DEBUG("OpRegion detected at 0x%8x\n", opregion_phy);
-#ifdef CONFIG_ACPI
base = acpi_os_ioremap(opregion_phy, 8*1024);
-#else
- base = ioremap(opregion_phy, 8*1024);
-#endif
if (!base)
return -ENOMEM;
diff --git a/drivers/gpu/drm/gma500/opregion.h b/drivers/gpu/drm/gma500/opregion.h
index a392ea8908b7..72dc6b921265 100644
--- a/drivers/gpu/drm/gma500/opregion.h
+++ b/drivers/gpu/drm/gma500/opregion.h
@@ -22,8 +22,28 @@
*
*/
+#if defined(CONFIG_ACPI)
extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
-extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
extern void psb_intel_opregion_init(struct drm_device *dev);
extern void psb_intel_opregion_fini(struct drm_device *dev);
extern int psb_intel_opregion_setup(struct drm_device *dev);
+
+#else
+
+extern inline void psb_intel_opregion_asle_intr(struct drm_device *dev)
+{
+}
+
+extern inline void psb_intel_opregion_init(struct drm_device *dev)
+{
+}
+
+extern inline void psb_intel_opregion_fini(struct drm_device *dev)
+{
+}
+
+extern inline int psb_intel_opregion_setup(struct drm_device *dev)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index e95cddbceb60..1726c04dedda 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -289,8 +289,62 @@ static void psb_get_core_freq(struct drm_device *dev)
}
}
+/* Poulsbo */
+static const struct psb_offset psb_regmap[2] = {
+ {
+ .fp0 = FPA0,
+ .fp1 = FPA1,
+ .cntr = DSPACNTR,
+ .conf = PIPEACONF,
+ .src = PIPEASRC,
+ .dpll = DPLL_A,
+ .htotal = HTOTAL_A,
+ .hblank = HBLANK_A,
+ .hsync = HSYNC_A,
+ .vtotal = VTOTAL_A,
+ .vblank = VBLANK_A,
+ .vsync = VSYNC_A,
+ .stride = DSPASTRIDE,
+ .size = DSPASIZE,
+ .pos = DSPAPOS,
+ .base = DSPABASE,
+ .surf = DSPASURF,
+ .addr = DSPABASE,
+ .status = PIPEASTAT,
+ .linoff = DSPALINOFF,
+ .tileoff = DSPATILEOFF,
+ .palette = PALETTE_A,
+ },
+ {
+ .fp0 = FPB0,
+ .fp1 = FPB1,
+ .cntr = DSPBCNTR,
+ .conf = PIPEBCONF,
+ .src = PIPEBSRC,
+ .dpll = DPLL_B,
+ .htotal = HTOTAL_B,
+ .hblank = HBLANK_B,
+ .hsync = HSYNC_B,
+ .vtotal = VTOTAL_B,
+ .vblank = VBLANK_B,
+ .vsync = VSYNC_B,
+ .stride = DSPBSTRIDE,
+ .size = DSPBSIZE,
+ .pos = DSPBPOS,
+ .base = DSPBBASE,
+ .surf = DSPBSURF,
+ .addr = DSPBBASE,
+ .status = PIPEBSTAT,
+ .linoff = DSPBLINOFF,
+ .tileoff = DSPBTILEOFF,
+ .palette = PALETTE_B,
+ }
+};
+
static int psb_chip_setup(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ dev_priv->regmap = psb_regmap;
psb_get_core_freq(dev);
gma_intel_setup_gmbus(dev);
psb_intel_opregion_init(dev);
@@ -298,8 +352,17 @@ static int psb_chip_setup(struct drm_device *dev)
return 0;
}
+/* Not exactly an erratum more an irritation */
+static void psb_chip_errata(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_lid_timer_init(dev_priv);
+}
+
static void psb_chip_teardown(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
@@ -313,6 +376,7 @@ const struct psb_ops psb_chip_ops = {
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
.chip_teardown = psb_chip_teardown,
+ .errata = psb_chip_errata,
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 0e85978877e8..0c995ba0f2ec 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -79,6 +79,14 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{ 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bea, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0beb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bec, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bee, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0bef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
#endif
{ 0, }
};
@@ -144,10 +152,6 @@ static void psb_lastclose(struct drm_device *dev)
return;
}
-static void psb_do_takedown(struct drm_device *dev)
-{
-}
-
static int psb_do_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -172,24 +176,6 @@ static int psb_do_init(struct drm_device *dev)
dev_priv->gatt_free_offset = pg->mmu_gatt_start +
(stolen_gtt << PAGE_SHIFT) * 1024;
- if (1 || drm_debug) {
- uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
- uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
- DRM_INFO("SGX core id = 0x%08x\n", core_id);
- DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
- (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
- _PSB_CC_REVISION_MAJOR_SHIFT,
- (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
- _PSB_CC_REVISION_MINOR_SHIFT);
- DRM_INFO
- ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
- (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
- _PSB_CC_REVISION_MAINTENANCE_SHIFT,
- (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
- _PSB_CC_REVISION_DESIGNER_SHIFT);
- }
-
-
spin_lock_init(&dev_priv->irqmask_lock);
spin_lock_init(&dev_priv->lock_2d);
@@ -204,7 +190,6 @@ static int psb_do_init(struct drm_device *dev)
PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
return 0;
out_err:
- psb_do_takedown(dev);
return ret;
}
@@ -214,17 +199,16 @@ static int psb_driver_unload(struct drm_device *dev)
/* Kill vblank etc here */
- gma_backlight_exit(dev);
- psb_modeset_cleanup(dev);
if (dev_priv) {
- psb_intel_opregion_fini(dev);
- psb_lid_timer_takedown(dev_priv);
+ if (dev_priv->backlight_device)
+ gma_backlight_exit(dev);
+ psb_modeset_cleanup(dev);
if (dev_priv->ops->chip_teardown)
dev_priv->ops->chip_teardown(dev);
- psb_do_takedown(dev);
+ psb_intel_opregion_fini(dev);
if (dev_priv->pf_pd) {
psb_mmu_free_pagedir(dev_priv->pf_pd);
@@ -258,15 +242,13 @@ static int psb_driver_unload(struct drm_device *dev)
dev_priv->sgx_reg = NULL;
}
+ /* Destroy VBT data */
+ psb_intel_destroy_bios(dev);
+
kfree(dev_priv);
dev->dev_private = NULL;
-
- /*destroy VBT data*/
- psb_intel_destroy_bios(dev);
}
-
gma_power_uninit(dev);
-
return 0;
}
@@ -290,11 +272,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
pci_set_master(dev->pdev);
- if (!IS_PSB(dev)) {
- if (pci_enable_msi(dev->pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
- }
-
dev_priv->num_pipe = dev_priv->ops->pipes;
resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
@@ -351,8 +328,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
acpi_video_register();
- if (dev_priv->opregion.lid_state)
- psb_lid_timer_init(dev_priv);
ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
@@ -371,8 +346,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_install(dev);
+
+ drm_irq_install(dev);
dev->vblank_disable_allowed = 1;
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 270a27bc936a..a1b0c0bce972 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -281,50 +281,72 @@ struct intel_gmbus {
};
/*
+ * Register offset maps
+ */
+
+struct psb_offset {
+ u32 fp0;
+ u32 fp1;
+ u32 cntr;
+ u32 conf;
+ u32 src;
+ u32 dpll;
+ u32 dpll_md;
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 surf;
+ u32 addr;
+ u32 base;
+ u32 status;
+ u32 linoff;
+ u32 tileoff;
+ u32 palette;
+};
+
+/*
* Register save state. This is used to hold the context when the
* device is powered off. In the case of Oaktrail this can (but does not
* yet) include screen blank. Operations occuring during the save
* update the register cache instead.
*/
+
+/*
+ * Common status for pipes.
+ */
+struct psb_pipe {
+ u32 fp0;
+ u32 fp1;
+ u32 cntr;
+ u32 conf;
+ u32 src;
+ u32 dpll;
+ u32 dpll_md;
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 base;
+ u32 surf;
+ u32 addr;
+ u32 status;
+ u32 linoff;
+ u32 tileoff;
+ u32 palette[256];
+};
+
struct psb_state {
- uint32_t saveDSPACNTR;
- uint32_t saveDSPBCNTR;
- uint32_t savePIPEACONF;
- uint32_t savePIPEBCONF;
- uint32_t savePIPEASRC;
- uint32_t savePIPEBSRC;
- uint32_t saveFPA0;
- uint32_t saveFPA1;
- uint32_t saveDPLL_A;
- uint32_t saveDPLL_A_MD;
- uint32_t saveHTOTAL_A;
- uint32_t saveHBLANK_A;
- uint32_t saveHSYNC_A;
- uint32_t saveVTOTAL_A;
- uint32_t saveVBLANK_A;
- uint32_t saveVSYNC_A;
- uint32_t saveDSPASTRIDE;
- uint32_t saveDSPASIZE;
- uint32_t saveDSPAPOS;
- uint32_t saveDSPABASE;
- uint32_t saveDSPASURF;
- uint32_t saveDSPASTATUS;
- uint32_t saveFPB0;
- uint32_t saveFPB1;
- uint32_t saveDPLL_B;
- uint32_t saveDPLL_B_MD;
- uint32_t saveHTOTAL_B;
- uint32_t saveHBLANK_B;
- uint32_t saveHSYNC_B;
- uint32_t saveVTOTAL_B;
- uint32_t saveVBLANK_B;
- uint32_t saveVSYNC_B;
- uint32_t saveDSPBSTRIDE;
- uint32_t saveDSPBSIZE;
- uint32_t saveDSPBPOS;
- uint32_t saveDSPBBASE;
- uint32_t saveDSPBSURF;
- uint32_t saveDSPBSTATUS;
uint32_t saveVCLK_DIVISOR_VGA0;
uint32_t saveVCLK_DIVISOR_VGA1;
uint32_t saveVCLK_POST_DIV;
@@ -339,14 +361,8 @@ struct psb_state {
uint32_t savePP_CONTROL;
uint32_t savePP_CYCLE;
uint32_t savePFIT_CONTROL;
- uint32_t savePaletteA[256];
- uint32_t savePaletteB[256];
uint32_t saveCLOCKGATING;
uint32_t saveDSPARB;
- uint32_t saveDSPATILEOFF;
- uint32_t saveDSPBTILEOFF;
- uint32_t saveDSPAADDR;
- uint32_t saveDSPBADDR;
uint32_t savePFIT_AUTO_RATIOS;
uint32_t savePFIT_PGM_RATIOS;
uint32_t savePP_ON_DELAYS;
@@ -354,8 +370,6 @@ struct psb_state {
uint32_t savePP_DIVISOR;
uint32_t saveBCLRPAT_A;
uint32_t saveBCLRPAT_B;
- uint32_t saveDSPALINOFF;
- uint32_t saveDSPBLINOFF;
uint32_t savePERF_MODE;
uint32_t saveDSPFW1;
uint32_t saveDSPFW2;
@@ -370,8 +384,6 @@ struct psb_state {
uint32_t saveDSPBCURSOR_BASE;
uint32_t saveDSPACURSOR_POS;
uint32_t saveDSPBCURSOR_POS;
- uint32_t save_palette_a[256];
- uint32_t save_palette_b[256];
uint32_t saveOV_OVADD;
uint32_t saveOV_OGAMC0;
uint32_t saveOV_OGAMC1;
@@ -394,64 +406,7 @@ struct psb_state {
};
struct medfield_state {
- uint32_t saveDPLL_A;
- uint32_t saveFPA0;
- uint32_t savePIPEACONF;
- uint32_t saveHTOTAL_A;
- uint32_t saveHBLANK_A;
- uint32_t saveHSYNC_A;
- uint32_t saveVTOTAL_A;
- uint32_t saveVBLANK_A;
- uint32_t saveVSYNC_A;
- uint32_t savePIPEASRC;
- uint32_t saveDSPASTRIDE;
- uint32_t saveDSPALINOFF;
- uint32_t saveDSPATILEOFF;
- uint32_t saveDSPASIZE;
- uint32_t saveDSPAPOS;
- uint32_t saveDSPASURF;
- uint32_t saveDSPACNTR;
- uint32_t saveDSPASTATUS;
- uint32_t save_palette_a[256];
uint32_t saveMIPI;
-
- uint32_t saveDPLL_B;
- uint32_t saveFPB0;
- uint32_t savePIPEBCONF;
- uint32_t saveHTOTAL_B;
- uint32_t saveHBLANK_B;
- uint32_t saveHSYNC_B;
- uint32_t saveVTOTAL_B;
- uint32_t saveVBLANK_B;
- uint32_t saveVSYNC_B;
- uint32_t savePIPEBSRC;
- uint32_t saveDSPBSTRIDE;
- uint32_t saveDSPBLINOFF;
- uint32_t saveDSPBTILEOFF;
- uint32_t saveDSPBSIZE;
- uint32_t saveDSPBPOS;
- uint32_t saveDSPBSURF;
- uint32_t saveDSPBCNTR;
- uint32_t saveDSPBSTATUS;
- uint32_t save_palette_b[256];
-
- uint32_t savePIPECCONF;
- uint32_t saveHTOTAL_C;
- uint32_t saveHBLANK_C;
- uint32_t saveHSYNC_C;
- uint32_t saveVTOTAL_C;
- uint32_t saveVBLANK_C;
- uint32_t saveVSYNC_C;
- uint32_t savePIPECSRC;
- uint32_t saveDSPCSTRIDE;
- uint32_t saveDSPCLINOFF;
- uint32_t saveDSPCTILEOFF;
- uint32_t saveDSPCSIZE;
- uint32_t saveDSPCPOS;
- uint32_t saveDSPCSURF;
- uint32_t saveDSPCCNTR;
- uint32_t saveDSPCSTATUS;
- uint32_t save_palette_c[256];
uint32_t saveMIPI_C;
uint32_t savePFIT_CONTROL;
@@ -480,6 +435,7 @@ struct cdv_state {
};
struct psb_save_area {
+ struct psb_pipe pipe[3];
uint32_t saveBSM;
uint32_t saveVBT;
union {
@@ -498,6 +454,7 @@ struct psb_ops;
struct drm_psb_private {
struct drm_device *dev;
const struct psb_ops *ops;
+ const struct psb_offset *regmap;
struct child_device_config *child_dev;
int child_dev_num;
@@ -550,6 +507,7 @@ struct drm_psb_private {
* Modesetting
*/
struct psb_intel_mode_device mode_dev;
+ bool modeset; /* true if we have done the mode_device setup */
struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
@@ -808,12 +766,6 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
/*
- * intel_opregion.c
- */
-extern int gma_intel_opregion_init(struct drm_device *dev);
-extern int gma_intel_opregion_exit(struct drm_device *dev);
-
-/*
* framebuffer.c
*/
extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 2616558457c8..f3a3160aafdc 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -337,15 +337,12 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
- int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
- int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
- int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
u32 dspcntr;
int ret = 0;
@@ -367,9 +364,9 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitches[0]);
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
- dspcntr = REG_READ(dspcntr_reg);
+ dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (crtc->fb->bits_per_pixel) {
@@ -392,18 +389,10 @@ static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
psb_gtt_unpin(psbfb->gtt);
goto psb_intel_pipe_set_base_exit;
}
- REG_WRITE(dspcntr_reg, dspcntr);
-
+ REG_WRITE(map->cntr, dspcntr);
- if (0 /* FIXMEAC - check what PSB needs */) {
- REG_WRITE(dspbase, offset);
- REG_READ(dspbase);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
- } else {
- REG_WRITE(dspbase, start + offset);
- REG_READ(dspbase);
- }
+ REG_WRITE(map->base, start + offset);
+ REG_READ(map->base);
psb_intel_pipe_cleaner:
/* If there was a previous display we can now unpin it */
@@ -424,14 +413,10 @@ psb_intel_pipe_set_base_exit:
static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
- /* struct drm_i915_private *dev_priv = dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
int pipe = psb_intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
/* XXX: When our outputs are all unaware of DPMS modes other than off
@@ -442,34 +427,34 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
/* Enable the DPLL */
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
}
/* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
/* Enable the plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp | DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+ REG_WRITE(map->base, REG_READ(map->base));
}
psb_intel_crtc_load_lut(crtc);
@@ -487,29 +472,29 @@ static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable display plane */
- temp = REG_READ(dspcntr_reg);
+ temp = REG_READ(map->cntr);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
+ REG_WRITE(map->cntr,
temp & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
}
/* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
+ temp = REG_READ(map->conf);
if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
}
/* Wait for vblank for the disable to take effect. */
psb_intel_wait_for_vblank(dev);
- temp = REG_READ(dpll_reg);
+ temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
}
/* Wait for the clocks to turn off. */
@@ -589,22 +574,11 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = psb_intel_crtc->pipe;
- int fp_reg = (pipe == 0) ? FPA0 : FPB0;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
- int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
- int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
- int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
- int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
- int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
- int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
- int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
- int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
- int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
- int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
- int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
struct psb_intel_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
@@ -690,7 +664,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
- pipeconf = REG_READ(pipeconf_reg);
+ pipeconf = REG_READ(map->conf);
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -712,9 +686,9 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
drm_mode_debug_printmodeline(mode);
if (dpll & DPLL_VCO_ENABLE) {
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
udelay(150);
}
@@ -747,45 +721,45 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_READ(LVDS);
}
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
+ REG_WRITE(map->fp0, fp);
+ REG_WRITE(map->dpll, dpll);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
/* write it again -- the BIOS does, after all */
- REG_WRITE(dpll_reg, dpll);
+ REG_WRITE(map->dpll, dpll);
- REG_READ(dpll_reg);
+ REG_READ(map->dpll);
/* Wait for the clocks to stabilize. */
udelay(150);
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+ REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+ REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+ REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+ REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+ REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+ REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
- REG_WRITE(dspsize_reg,
+ REG_WRITE(map->size,
((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
- REG_WRITE(dsppos_reg, 0);
- REG_WRITE(pipesrc_reg,
+ REG_WRITE(map->pos, 0);
+ REG_WRITE(map->src,
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
+ REG_WRITE(map->conf, pipeconf);
+ REG_READ(map->conf);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(dspcntr_reg, dspcntr);
+ REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
@@ -799,10 +773,10 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int palreg = PALETTE_A;
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
+ int palreg = map->palette;
int i;
/* The clocks have to be on to load the palette. */
@@ -811,12 +785,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
switch (psb_intel_crtc->pipe) {
case 0:
- break;
case 1:
- palreg = PALETTE_B;
- break;
- case 2:
- palreg = PALETTE_C;
break;
default:
dev_err(dev->dev, "Illegal Pipe Number.\n");
@@ -836,7 +805,7 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
gma_power_end(dev);
} else {
for (i = 0; i < 256; i++) {
- dev_priv->regs.psb.save_palette_a[i] =
+ dev_priv->regs.pipe[0].palette[i] =
((psb_intel_crtc->lut_r[i] +
psb_intel_crtc->lut_adj[i]) << 16) |
((psb_intel_crtc->lut_g[i] +
@@ -854,11 +823,10 @@ void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
static void psb_intel_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -867,27 +835,27 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
return;
}
- crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
- crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
- crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
- crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
- crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
- crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
- crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
- crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
- crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
- crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
- crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
- crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
- crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
/*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
- crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
- crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+ crtc_state->saveDSPBASE = REG_READ(map->base);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
}
@@ -898,12 +866,10 @@ static void psb_intel_crtc_save(struct drm_crtc *crtc)
static void psb_intel_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- /* struct drm_psb_private * dev_priv =
- (struct drm_psb_private *)dev->dev_private; */
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- /* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
- int pipeA = (psb_intel_crtc->pipe == 0);
+ const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
uint32_t paletteReg;
int i;
@@ -913,45 +879,45 @@ static void psb_intel_crtc_restore(struct drm_crtc *crtc)
}
if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+ REG_WRITE(map->dpll,
crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_READ(map->dpll);
udelay(150);
}
- REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
- REG_READ(pipeA ? FPA0 : FPB0);
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
- REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
- REG_READ(pipeA ? FPA1 : FPB1);
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
- REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
- REG_READ(pipeA ? DPLL_A : DPLL_B);
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
udelay(150);
- REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
- REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
- REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
- REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
- REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
- REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
- REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
- REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
- REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
- REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
- REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
psb_intel_wait_for_vblank(dev);
- REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
- REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
psb_intel_wait_for_vblank(dev);
- paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+ paletteReg = map->palette;
for (i = 0; i < 256; ++i)
REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
}
@@ -1115,34 +1081,30 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct drm_psb_private *dev_priv = dev->dev_private;
int pipe = psb_intel_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
struct psb_intel_clock_t clock;
bool is_lvds;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
if (gma_power_begin(dev, false)) {
- dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ dpll = REG_READ(map->dpll);
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = REG_READ(map->fp0);
else
- fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = REG_READ(map->fp1);
is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
gma_power_end(dev);
} else {
- dpll = (pipe == 0) ?
- dev_priv->regs.psb.saveDPLL_A :
- dev_priv->regs.psb.saveDPLL_B;
+ dpll = p->dpll;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA0 :
- dev_priv->regs.psb.saveFPB0;
+ fp = p->fp0;
else
- fp = (pipe == 0) ?
- dev_priv->regs.psb.saveFPA1 :
- dev_priv->regs.psb.saveFPB1;
+ fp = p->fp1;
is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
LVDS_PORT_EN);
@@ -1202,26 +1164,20 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
int vtot;
int vsync;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
if (gma_power_begin(dev, false)) {
- htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ htot = REG_READ(map->htotal);
+ hsync = REG_READ(map->hsync);
+ vtot = REG_READ(map->vtotal);
+ vsync = REG_READ(map->vsync);
gma_power_end(dev);
} else {
- htot = (pipe == 0) ?
- dev_priv->regs.psb.saveHTOTAL_A :
- dev_priv->regs.psb.saveHTOTAL_B;
- hsync = (pipe == 0) ?
- dev_priv->regs.psb.saveHSYNC_A :
- dev_priv->regs.psb.saveHSYNC_B;
- vtot = (pipe == 0) ?
- dev_priv->regs.psb.saveVTOTAL_A :
- dev_priv->regs.psb.saveVTOTAL_B;
- vsync = (pipe == 0) ?
- dev_priv->regs.psb.saveVSYNC_A :
- dev_priv->regs.psb.saveVSYNC_B;
+ htot = p->htotal;
+ hsync = p->hsync;
+ vtot = p->vtotal;
+ vsync = p->vsync;
}
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 958b4e2d4aed..d39b15be7649 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -2038,8 +2038,7 @@ psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
struct drm_device *dev = connector->base.base.dev;
intel_attach_force_audio_property(&connector->base.base);
- if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
- intel_attach_broadcast_rgb_property(&connector->base.base);
+ intel_attach_broadcast_rgb_property(&connector->base.base);
*/
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index b0df2949c1b7..f94792626b94 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1271,6 +1271,12 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
+static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+ .set_gpu_state = i915_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = i915_switcheroo_can_switch,
+};
+
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1293,10 +1299,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(dev->pdev,
- i915_switcheroo_set_state,
- NULL,
- i915_switcheroo_can_switch);
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
if (ret)
goto cleanup_vga_client;
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
new file mode 100644
index 000000000000..d63013497f66
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -0,0 +1,15 @@
+config DRM_MGAG200
+ tristate "Kernel modesetting driver for MGA G200 server engines"
+ depends on DRM && PCI && EXPERIMENTAL
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ help
+ This is a KMS driver for the MGA G200 server chips, it
+ does not support the original MGA G200 or any of the desktop
+ chips. It requires 0.3.0 of the modesetting userspace driver,
+ and a version of mga driver that will fail on KMS enabled
+ devices.
+
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
new file mode 100644
index 000000000000..7db592eedbf1
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -Iinclude/drm
+mgag200-y := mgag200_main.o mgag200_mode.o \
+ mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
+
+obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
new file mode 100644
index 000000000000..3c8e04f54713
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include <linux/console.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "mgag200_drv.h"
+
+#include "drm_pciids.h"
+
+/*
+ * This is the generic driver code. This binds the driver to the drm core,
+ * which then performs further device association and calls our graphics init
+ * functions
+ */
+int mgag200_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, mgag200_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_A },
+ { PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
+ { PCI_VENDOR_ID_MATROX, 0x530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EV },
+ { PCI_VENDOR_ID_MATROX, 0x532, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_WB },
+ { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
+ { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static int __devinit
+mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static void mga_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ drm_put_dev(dev);
+}
+
+static const struct file_operations mgag200_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = mgag200_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR,
+ .load = mgag200_driver_load,
+ .unload = mgag200_driver_unload,
+ .fops = &mgag200_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_init_object = mgag200_gem_init_object,
+ .gem_free_object = mgag200_gem_free_object,
+ .dumb_create = mgag200_dumb_create,
+ .dumb_map_offset = mgag200_dumb_mmap_offset,
+ .dumb_destroy = mgag200_dumb_destroy,
+};
+
+static struct pci_driver mgag200_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = mga_pci_probe,
+ .remove = mga_pci_remove,
+};
+
+static int __init mgag200_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && mgag200_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (mgag200_modeset == 0)
+ return -EINVAL;
+ return drm_pci_init(&driver, &mgag200_pci_driver);
+}
+
+static void __exit mgag200_exit(void)
+{
+ drm_pci_exit(&driver, &mgag200_pci_driver);
+}
+
+module_init(mgag200_init);
+module_exit(mgag200_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
new file mode 100644
index 000000000000..6f13b3563234
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#ifndef __MGAG200_DRV_H__
+#define __MGAG200_DRV_H__
+
+#include <video/vga.h>
+
+#include "drm/drm_fb_helper.h"
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include "mgag200_reg.h"
+
+#define DRIVER_AUTHOR "Matthew Garrett"
+
+#define DRIVER_NAME "mgag200"
+#define DRIVER_DESC "MGA G200 SE"
+#define DRIVER_DATE "20110418"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define MGAG200FB_CONN_LIMIT 1
+
+#define RREG8(reg) ioread8(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG8(reg, v) iowrite8(v, ((void __iomem *)mdev->rmmio) + (reg))
+#define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
+#define WREG32(reg, v) iowrite32(v, ((void __iomem *)mdev->rmmio) + (reg))
+
+#define ATTR_INDEX 0x1fc0
+#define ATTR_DATA 0x1fc1
+
+#define WREG_ATTR(reg, v) \
+ do { \
+ RREG8(0x1fda); \
+ WREG8(ATTR_INDEX, reg); \
+ WREG8(ATTR_DATA, v); \
+ } while (0) \
+
+#define WREG_SEQ(reg, v) \
+ do { \
+ WREG8(MGAREG_SEQ_INDEX, reg); \
+ WREG8(MGAREG_SEQ_DATA, v); \
+ } while (0) \
+
+#define WREG_CRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTC_INDEX, reg); \
+ WREG8(MGAREG_CRTC_DATA, v); \
+ } while (0) \
+
+
+#define WREG_ECRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTCEXT_INDEX, reg); \
+ WREG8(MGAREG_CRTCEXT_DATA, v); \
+ } while (0) \
+
+#define GFX_INDEX 0x1fce
+#define GFX_DATA 0x1fcf
+
+#define WREG_GFX(reg, v) \
+ do { \
+ WREG8(GFX_INDEX, reg); \
+ WREG8(GFX_DATA, v); \
+ } while (0) \
+
+#define DAC_INDEX 0x3c00
+#define DAC_DATA 0x3c0a
+
+#define WREG_DAC(reg, v) \
+ do { \
+ WREG8(DAC_INDEX, reg); \
+ WREG8(DAC_DATA, v); \
+ } while (0) \
+
+#define MGA_MISC_OUT 0x1fc2
+#define MGA_MISC_IN 0x1fcc
+
+#define MGAG200_MAX_FB_HEIGHT 4096
+#define MGAG200_MAX_FB_WIDTH 4096
+
+#define MATROX_DPMS_CLEARED (-1)
+
+#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
+#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
+#define to_mga_connector(x) container_of(x, struct mga_connector, base)
+#define to_mga_framebuffer(x) container_of(x, struct mga_framebuffer, base)
+
+struct mga_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct mga_fbdev {
+ struct drm_fb_helper helper;
+ struct mga_framebuffer mfb;
+ struct list_head fbdev_list;
+ void *sysram;
+ int size;
+ struct ttm_bo_kmap_obj mapping;
+};
+
+struct mga_crtc {
+ struct drm_crtc base;
+ u8 lut_r[256], lut_g[256], lut_b[256];
+ int last_dpms;
+ bool enabled;
+};
+
+struct mga_mode_info {
+ bool mode_config_initialized;
+ struct mga_crtc *crtc;
+};
+
+struct mga_encoder {
+ struct drm_encoder base;
+ int last_dpms;
+};
+
+
+struct mga_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+ int data, clock;
+};
+
+struct mga_connector {
+ struct drm_connector base;
+ struct mga_i2c_chan *i2c;
+};
+
+
+struct mga_mc {
+ resource_size_t vram_size;
+ resource_size_t vram_base;
+ resource_size_t vram_window;
+};
+
+enum mga_type {
+ G200_SE_A,
+ G200_SE_B,
+ G200_WB,
+ G200_EV,
+ G200_EH,
+ G200_ER,
+};
+
+#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
+
+struct mga_device {
+ struct drm_device *dev;
+ unsigned long flags;
+
+ resource_size_t rmmio_base;
+ resource_size_t rmmio_size;
+ void __iomem *rmmio;
+
+ drm_local_map_t *framebuffer;
+
+ struct mga_mc mc;
+ struct mga_mode_info mode_info;
+
+ struct mga_fbdev *mfbdev;
+
+ bool suspended;
+ int num_crtc;
+ enum mga_type type;
+ int has_sdram;
+ struct drm_display_mode mode;
+
+ int bpp_shifts[4];
+
+ int fb_mtrr;
+
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ } ttm;
+
+ u32 reg_1e24; /* SE model number */
+};
+
+
+struct mgag200_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ u32 placements[3];
+ int pin_count;
+};
+#define gem_to_mga_bo(gobj) container_of((gobj), struct mgag200_bo, gem)
+
+static inline struct mgag200_bo *
+mgag200_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct mgag200_bo, bo);
+}
+ /* mga_crtc.c */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno);
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno);
+
+ /* mgag200_mode.c */
+int mgag200_modeset_init(struct mga_device *mdev);
+void mgag200_modeset_fini(struct mga_device *mdev);
+
+ /* mga_fbdev.c */
+int mgag200_fbdev_init(struct mga_device *mdev);
+void mgag200_fbdev_fini(struct mga_device *mdev);
+
+ /* mgag200_main.c */
+int mgag200_framebuffer_init(struct drm_device *dev,
+ struct mga_framebuffer *mfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
+int mgag200_driver_unload(struct drm_device *dev);
+int mgag200_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj);
+int mgag200_gem_init_object(struct drm_gem_object *obj);
+int mgag200_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int mgag200_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
+void mgag200_gem_free_object(struct drm_gem_object *obj);
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset);
+ /* mga_i2c.c */
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait);
+void mgag200_bo_unreserve(struct mgag200_bo *bo);
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct mgag200_bo **pastbo);
+int mgag200_mm_init(struct mga_device *mdev);
+void mgag200_mm_fini(struct mga_device *mdev);
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
+int mgag200_bo_unpin(struct mgag200_bo *bo);
+int mgag200_bo_push_sysram(struct mgag200_bo *bo);
+#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
new file mode 100644
index 000000000000..880d3369760e
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#include <linux/module.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_fb_helper.h"
+
+#include <linux/fb.h>
+
+#include "mgag200_drv.h"
+
+static void mga_dirty_update(struct mga_fbdev *mfbdev,
+ int x, int y, int width, int height)
+{
+ int i;
+ struct drm_gem_object *obj;
+ struct mgag200_bo *bo;
+ int src_offset, dst_offset;
+ int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
+ int ret;
+ bool unmap = false;
+
+ obj = mfbdev->mfb.obj;
+ bo = gem_to_mga_bo(obj);
+
+ ret = mgag200_bo_reserve(bo, true);
+ if (ret) {
+ DRM_ERROR("failed to reserve fb bo\n");
+ return;
+ }
+
+ if (!bo->kmap.virtual) {
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("failed to kmap fb updates\n");
+ mgag200_bo_unreserve(bo);
+ return;
+ }
+ unmap = true;
+ }
+ for (i = y; i < y + height; i++) {
+ /* assume equal stride for now */
+ src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
+ memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
+
+ }
+ if (unmap)
+ ttm_bo_kunmap(&bo->kmap);
+
+ mgag200_bo_unreserve(bo);
+}
+
+static void mga_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_fillrect(info, rect);
+ mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
+ rect->height);
+}
+
+static void mga_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_copyarea(info, area);
+ mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
+ area->height);
+}
+
+static void mga_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct mga_fbdev *mfbdev = info->par;
+ sys_imageblit(info, image);
+ mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
+ image->height);
+}
+
+
+static struct fb_ops mgag200fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_fillrect = mga_fillrect,
+ .fb_copyarea = mga_copyarea,
+ .fb_imageblit = mga_imageblit,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int mgag200fb_create_object(struct mga_fbdev *afbdev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **gobj_p)
+{
+ struct drm_device *dev = afbdev->helper.dev;
+ u32 bpp, depth;
+ u32 size;
+ struct drm_gem_object *gobj;
+
+ int ret = 0;
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ size = mode_cmd->pitches[0] * mode_cmd->height;
+ ret = mgag200_gem_create(dev, size, true, &gobj);
+ if (ret)
+ return ret;
+
+ *gobj_p = gobj;
+ return ret;
+}
+
+static int mgag200fb_create(struct mga_fbdev *mfbdev,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = mfbdev->helper.dev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct mga_device *mdev = dev->dev_private;
+ struct fb_info *info;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *gobj = NULL;
+ struct device *device = &dev->pdev->dev;
+ struct mgag200_bo *bo;
+ int ret;
+ void *sysram;
+ int size;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+ bo = gem_to_mga_bo(gobj);
+
+ sysram = vmalloc(size);
+ if (!sysram)
+ return -ENOMEM;
+
+ info = framebuffer_alloc(0, device);
+ if (info == NULL)
+ return -ENOMEM;
+
+ info->par = mfbdev;
+
+ ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ mfbdev->sysram = sysram;
+ mfbdev->size = size;
+
+ fb = &mfbdev->mfb.base;
+
+ /* setup helper */
+ mfbdev->helper.fb = fb;
+ mfbdev->helper.fbdev = info;
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ DRM_ERROR("%s: can't allocate color map\n", info->fix.id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ strcpy(info->fix.id, "mgadrmfb");
+
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ info->fbops = &mgag200fb_ops;
+
+ /* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
+ info->apertures->ranges[0].size = mdev->mc.vram_size;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
+ sizes->fb_height);
+
+ info->screen_base = sysram;
+ info->screen_size = size;
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n",
+ fb->width, fb->height);
+ return 0;
+out:
+ return ret;
+}
+
+static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size
+ *sizes)
+{
+ struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = mgag200fb_create(mfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static int mga_fbdev_destroy(struct drm_device *dev,
+ struct mga_fbdev *mfbdev)
+{
+ struct fb_info *info;
+ struct mga_framebuffer *mfb = &mfbdev->mfb;
+
+ if (mfbdev->helper.fbdev) {
+ info = mfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+
+ if (mfb->obj) {
+ drm_gem_object_unreference_unlocked(mfb->obj);
+ mfb->obj = NULL;
+ }
+ drm_fb_helper_fini(&mfbdev->helper);
+ vfree(mfbdev->sysram);
+ drm_framebuffer_cleanup(&mfb->base);
+
+ return 0;
+}
+
+static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
+ .gamma_set = mga_crtc_fb_gamma_set,
+ .gamma_get = mga_crtc_fb_gamma_get,
+ .fb_probe = mga_fb_find_or_create_single,
+};
+
+int mgag200_fbdev_init(struct mga_device *mdev)
+{
+ struct mga_fbdev *mfbdev;
+ int ret;
+
+ mfbdev = kzalloc(sizeof(struct mga_fbdev), GFP_KERNEL);
+ if (!mfbdev)
+ return -ENOMEM;
+
+ mdev->mfbdev = mfbdev;
+ mfbdev->helper.funcs = &mga_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
+ mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+ if (ret) {
+ kfree(mfbdev);
+ return ret;
+ }
+ drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
+ drm_fb_helper_initial_config(&mfbdev->helper, 32);
+
+ return 0;
+}
+
+void mgag200_fbdev_fini(struct mga_device *mdev)
+{
+ if (!mdev->mfbdev)
+ return;
+
+ mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
+ kfree(mdev->mfbdev);
+ mdev->mfbdev = NULL;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
new file mode 100644
index 000000000000..dd3568a1b6b0
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+
+#include "mgag200_drv.h"
+
+static int mga_i2c_read_gpio(struct mga_device *mdev)
+{
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ return RREG8(DAC_DATA);
+}
+
+static void mga_i2c_set_gpio(struct mga_device *mdev, int mask, int val)
+{
+ int tmp;
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = (RREG8(DAC_DATA) & mask) | val;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+ WREG_DAC(MGA1064_GEN_IO_DATA, 0);
+}
+
+static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
+{
+ if (state)
+ state = 0;
+ else
+ state = mask;
+ mga_i2c_set_gpio(mdev, ~mask, state);
+}
+
+static void mga_gpio_setsda(void *data, int state)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ mga_i2c_set(mdev, i2c->data, state);
+}
+
+static void mga_gpio_setscl(void *data, int state)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ mga_i2c_set(mdev, i2c->clock, state);
+}
+
+static int mga_gpio_getsda(void *data)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
+}
+
+static int mga_gpio_getscl(void *data)
+{
+ struct mga_i2c_chan *i2c = data;
+ struct mga_device *mdev = i2c->dev->dev_private;
+ return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
+}
+
+struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mga_i2c_chan *i2c;
+ int ret;
+ int data, clock;
+
+ WREG_DAC(MGA1064_GEN_IO_DATA, 0xff);
+ WREG_DAC(MGA1064_GEN_IO_CTL, 0);
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ case G200_EV:
+ case G200_WB:
+ data = 1;
+ clock = 2;
+ break;
+ case G200_EH:
+ case G200_ER:
+ data = 2;
+ clock = 1;
+ break;
+ default:
+ data = 2;
+ clock = 8;
+ break;
+ }
+
+ i2c = kzalloc(sizeof(struct mga_i2c_chan), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+
+ i2c->data = data;
+ i2c->clock = clock;
+ i2c->adapter.owner = THIS_MODULE;
+ i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->dev = dev;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c");
+
+ i2c->adapter.algo_data = &i2c->bit;
+
+ i2c->bit.udelay = 10;
+ i2c->bit.timeout = 2;
+ i2c->bit.data = i2c;
+ i2c->bit.setsda = mga_gpio_setsda;
+ i2c->bit.setscl = mga_gpio_setscl;
+ i2c->bit.getsda = mga_gpio_getsda;
+ i2c->bit.getscl = mga_gpio_getscl;
+
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ kfree(i2c);
+ i2c = NULL;
+ }
+ return i2c;
+}
+
+void mgag200_i2c_destroy(struct mga_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
new file mode 100644
index 000000000000..636a81cd2f37
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+#include "mgag200_drv.h"
+
+static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
+ if (mga_fb->obj)
+ drm_gem_object_unreference_unlocked(mga_fb->obj);
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+static int mga_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs mga_fb_funcs = {
+ .destroy = mga_user_framebuffer_destroy,
+ .create_handle = mga_user_framebuffer_create_handle,
+};
+
+int mgag200_framebuffer_init(struct drm_device *dev,
+ struct mga_framebuffer *gfb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
+ if (ret) {
+ DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
+ return ret;
+ }
+ drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
+ gfb->obj = obj;
+ return 0;
+}
+
+static struct drm_framebuffer *
+mgag200_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct mga_framebuffer *mga_fb;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ if (obj == NULL)
+ return ERR_PTR(-ENOENT);
+
+ mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
+ if (!mga_fb) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ kfree(mga_fb);
+ return ERR_PTR(ret);
+ }
+ return &mga_fb->base;
+}
+
+static const struct drm_mode_config_funcs mga_mode_funcs = {
+ .fb_create = mgag200_user_framebuffer_create,
+};
+
+/* Unmap the framebuffer from the core and release the memory */
+static void mga_vram_fini(struct mga_device *mdev)
+{
+ pci_iounmap(mdev->dev->pdev, mdev->rmmio);
+ mdev->rmmio = NULL;
+ if (mdev->mc.vram_base)
+ release_mem_region(mdev->mc.vram_base, mdev->mc.vram_window);
+}
+
+static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
+{
+ int offset;
+ int orig;
+ int test1, test2;
+ int orig1, orig2;
+
+ /* Probe */
+ orig = ioread16(mem);
+ iowrite16(0, mem);
+
+ for (offset = 0x100000; offset < mdev->mc.vram_window; offset += 0x4000) {
+ orig1 = ioread8(mem + offset);
+ orig2 = ioread8(mem + offset + 0x100);
+
+ iowrite16(0xaa55, mem + offset);
+ iowrite16(0xaa55, mem + offset + 0x100);
+
+ test1 = ioread16(mem + offset);
+ test2 = ioread16(mem);
+
+ iowrite16(orig1, mem + offset);
+ iowrite16(orig2, mem + offset + 0x100);
+
+ if (test1 != 0xaa55) {
+ break;
+ }
+
+ if (test2) {
+ break;
+ }
+ }
+
+ iowrite16(orig, mem);
+ return offset - 65536;
+}
+
+/* Map the framebuffer from the card and configure the core */
+static int mga_vram_init(struct mga_device *mdev)
+{
+ void __iomem *mem;
+ struct apertures_struct *aper = alloc_apertures(1);
+
+ /* BAR 0 is VRAM */
+ mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
+ mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
+
+ aper->ranges[0].base = mdev->mc.vram_base;
+ aper->ranges[0].size = mdev->mc.vram_window;
+ aper->count = 1;
+
+ remove_conflicting_framebuffers(aper, "mgafb", true);
+
+ if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
+ "mgadrmfb_vram")) {
+ DRM_ERROR("can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ mem = pci_iomap(mdev->dev->pdev, 0, 0);
+
+ mdev->mc.vram_size = mga_probe_vram(mdev, mem);
+
+ pci_iounmap(mdev->dev->pdev, mem);
+
+ return 0;
+}
+
+static int mgag200_device_init(struct drm_device *dev,
+ uint32_t flags)
+{
+ struct mga_device *mdev = dev->dev_private;
+ int ret, option;
+
+ mdev->type = flags;
+
+ /* Hardcode the number of CRTCs to 1 */
+ mdev->num_crtc = 1;
+
+ pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+ mdev->has_sdram = !(option & (1 << 14));
+
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
+ mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
+
+ if (!request_mem_region(mdev->rmmio_base, mdev->rmmio_size,
+ "mgadrmfb_mmio")) {
+ DRM_ERROR("can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ mdev->rmmio = pci_iomap(dev->pdev, 1, 0);
+ if (mdev->rmmio == NULL)
+ return -ENOMEM;
+
+ /* stash G200 SE model number for later use */
+ if (IS_G200_SE(mdev))
+ mdev->reg_1e24 = RREG32(0x1e24);
+
+ ret = mga_vram_init(mdev);
+ if (ret) {
+ release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+ return ret;
+ }
+
+ mdev->bpp_shifts[0] = 0;
+ mdev->bpp_shifts[1] = 1;
+ mdev->bpp_shifts[2] = 0;
+ mdev->bpp_shifts[3] = 2;
+ return 0;
+}
+
+void mgag200_device_fini(struct mga_device *mdev)
+{
+ release_mem_region(mdev->rmmio_base, mdev->rmmio_size);
+ mga_vram_fini(mdev);
+}
+
+/*
+ * Functions here will be called by the core once it's bound the driver to
+ * a PCI device
+ */
+
+
+int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct mga_device *mdev;
+ int r;
+
+ mdev = kzalloc(sizeof(struct mga_device), GFP_KERNEL);
+ if (mdev == NULL)
+ return -ENOMEM;
+ dev->dev_private = (void *)mdev;
+ mdev->dev = dev;
+
+ r = mgag200_device_init(dev, flags);
+ if (r) {
+ dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
+ goto out;
+ }
+ r = mgag200_mm_init(mdev);
+ if (r)
+ goto out;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.funcs = (void *)&mga_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ r = mgag200_modeset_init(mdev);
+ if (r)
+ dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
+out:
+ if (r)
+ mgag200_driver_unload(dev);
+ return r;
+}
+
+int mgag200_driver_unload(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+
+ if (mdev == NULL)
+ return 0;
+ mgag200_modeset_fini(mdev);
+ mgag200_fbdev_fini(mdev);
+ drm_mode_config_cleanup(dev);
+ mgag200_mm_fini(mdev);
+ mgag200_device_fini(mdev);
+ kfree(mdev);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int mgag200_gem_create(struct drm_device *dev,
+ u32 size, bool iskernel,
+ struct drm_gem_object **obj)
+{
+ struct mgag200_bo *astbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = mgag200_bo_create(dev, size, 0, 0, &astbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+ *obj = &astbo->gem;
+ return 0;
+}
+
+int mgag200_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ int ret;
+ struct drm_gem_object *gobj;
+ u32 handle;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = mgag200_gem_create(dev, args->size, false,
+ &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_unreference_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ return 0;
+}
+
+int mgag200_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+int mgag200_gem_init_object(struct drm_gem_object *obj)
+{
+ BUG();
+ return 0;
+}
+
+void mgag200_bo_unref(struct mgag200_bo **bo)
+{
+ struct ttm_buffer_object *tbo;
+
+ if ((*bo) == NULL)
+ return;
+
+ tbo = &((*bo)->bo);
+ ttm_bo_unref(&tbo);
+ if (tbo == NULL)
+ *bo = NULL;
+
+}
+
+void mgag200_gem_free_object(struct drm_gem_object *obj)
+{
+ struct mgag200_bo *mgag200_bo = gem_to_mga_bo(obj);
+
+ if (!mgag200_bo)
+ return;
+ mgag200_bo_unref(&mgag200_bo);
+}
+
+
+static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
+{
+ return bo->bo.addr_space_offset;
+}
+
+int
+mgag200_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct mgag200_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_mga_bo(obj);
+ *offset = mgag200_bo_mmap_offset(bo);
+
+ drm_gem_object_unreference(obj);
+ ret = 0;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
new file mode 100644
index 000000000000..d303061b251e
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -0,0 +1,1533 @@
+/*
+ * Copyright 2010 Matt Turner.
+ * Copyright 2012 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Matt Turner
+ * Dave Airlie
+ */
+
+#include <linux/delay.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc_helper.h"
+
+#include "mgag200_drv.h"
+
+#define MGAG200_LUT_SIZE 256
+
+/*
+ * This file contains setup code for the CRTC.
+ */
+
+static void mga_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ int i;
+
+ if (!crtc->enabled)
+ return;
+
+ WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ /* VGA registers */
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_b[i]);
+ }
+}
+
+static inline void mga_wait_vsync(struct mga_device *mdev)
+{
+ unsigned int count = 0;
+ unsigned int status = 0;
+
+ do {
+ status = RREG32(MGAREG_Status);
+ count++;
+ } while ((status & 0x08) && (count < 250000));
+ count = 0;
+ status = 0;
+ do {
+ status = RREG32(MGAREG_Status);
+ count++;
+ } while (!(status & 0x08) && (count < 250000));
+}
+
+static inline void mga_wait_busy(struct mga_device *mdev)
+{
+ unsigned int count = 0;
+ unsigned int status = 0;
+ do {
+ status = RREG8(MGAREG_Status + 2);
+ count++;
+ } while ((status & 0x01) && (count < 500000));
+}
+
+/*
+ * The core passes the desired mode to the CRTC code to see whether any
+ * CRTC-specific modifications need to be made to it. We're in a position
+ * to just pass that straight through, so this does nothing
+ */
+static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+
+ m = n = p = 0;
+ vcomax = 320000;
+ vcomin = 160000;
+ pllreffreq = 25000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 8; testp > 0; testp /= 2) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 17; testn < 256; testn++) {
+ for (testm = 1; testm < 32; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm - 1;
+ n = testn - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ if (delta > permitteddelta) {
+ printk(KERN_WARNING "PLL delta too large\n");
+ return 1;
+ }
+
+ WREG_DAC(MGA1064_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_PIX_PLLC_P, p);
+ return 0;
+}
+
+static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int i, j, tmpcount, vcount;
+ bool pll_locked = false;
+ u8 tmp;
+
+ m = n = p = 0;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 48000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 1; testp < 9; testp++) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 17; testm++) {
+ for (testn = 1; testn < 151; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1) | ((n >> 1) & 0x80);
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ if (i > 0) {
+ WREG8(MGAREG_CRTC_INDEX, 0x1e);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ if (tmp < 0xff)
+ WREG8(MGAREG_CRTC_DATA, tmp+1);
+ }
+
+ /* set pixclkdis to 1 */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ /* select PLL Set C */
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ /* reset the PLL */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(50);
+
+ /* program pixel pll register */
+ WREG_DAC(MGA1064_WB_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_WB_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ /* turn pll on */
+ WREG8(DAC_INDEX, MGA1064_VREF_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x04;
+ WREG_DAC(MGA1064_VREF_CTL, tmp);
+
+ udelay(500);
+
+ /* select the pixel pll */
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
+ tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ /* reset dotclock rate bit */
+ WREG8(MGAREG_SEQ_INDEX, 1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+ tmp &= ~0x8;
+ WREG8(MGAREG_SEQ_DATA, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+ return 0;
+}
+
+static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ u8 tmp;
+
+ m = n = p = 0;
+ vcomax = 550000;
+ vcomin = 150000;
+ pllreffreq = 50000;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testn = 1; testn < 257; testn++) {
+ for (testm = 1; testm < 17; testm++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = testm - 1;
+ p = testp - 1;
+ }
+ }
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_EV_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
+ tmp = RREG8(DAC_DATA);
+ WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3 << 2);
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ return 0;
+}
+
+static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta, permitteddelta;
+ unsigned int testp, testm, testn;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int i, j, tmpcount, vcount;
+ u8 tmp;
+ bool pll_locked = false;
+
+ m = n = p = 0;
+ vcomax = 800000;
+ vcomin = 400000;
+ pllreffreq = 3333;
+
+ delta = 0xffffffff;
+ permitteddelta = clock * 5 / 1000;
+
+ for (testp = 16; testp > 0; testp--) {
+ if (clock * testp > vcomax)
+ continue;
+ if (clock * testp < vcomin)
+ continue;
+
+ for (testm = 1; testm < 33; testm++) {
+ for (testn = 1; testn < 257; testn++) {
+ computed = (pllreffreq * testn) /
+ (testm * testp);
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ n = testn - 1;
+ m = (testm - 1) | ((n >> 1) & 0x80);
+ p = testp - 1;
+ }
+ if ((clock * testp) >= 600000)
+ p |= 80;
+ }
+ }
+ }
+ for (i = 0; i <= 32 && pll_locked == false; i++) {
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= 0x3 << 2;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_EH_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_EH_PIX_PLLC_P, p);
+
+ udelay(500);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
+ tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ vcount = RREG8(MGAREG_VCOUNT);
+
+ for (j = 0; j < 30 && pll_locked == false; j++) {
+ tmpcount = RREG8(MGAREG_VCOUNT);
+ if (tmpcount < vcount)
+ vcount = 0;
+ if ((tmpcount - vcount) > 2)
+ pll_locked = true;
+ else
+ udelay(5);
+ }
+ }
+
+ return 0;
+}
+
+static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
+{
+ unsigned int vcomax, vcomin, pllreffreq;
+ unsigned int delta, tmpdelta;
+ unsigned int testr, testn, testm, testo;
+ unsigned int p, m, n;
+ unsigned int computed;
+ int tmp;
+
+ m = n = p = 0;
+ vcomax = 1488000;
+ vcomin = 1056000;
+ pllreffreq = 48000;
+
+ delta = 0xffffffff;
+
+ for (testr = 0; testr < 4; testr++) {
+ if (delta == 0)
+ break;
+ for (testn = 5; testn < 129; testn++) {
+ if (delta == 0)
+ break;
+ for (testm = 3; testm >= 0; testm--) {
+ if (delta == 0)
+ break;
+ for (testo = 5; testo < 33; testo++) {
+ computed = pllreffreq * (testn + 1) /
+ (testr + 1);
+ if (computed < vcomin)
+ continue;
+ if (computed > vcomax)
+ continue;
+ if (computed > clock)
+ tmpdelta = computed - clock;
+ else
+ tmpdelta = clock - computed;
+ if (tmpdelta < delta) {
+ delta = tmpdelta;
+ m = testm | (testo << 3);
+ n = testn;
+ p = testr | (testr << 3);
+ }
+ }
+ }
+ }
+ }
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
+ WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= MGA1064_REMHEADCTL_CLKDIS;
+ WREG_DAC(MGA1064_REMHEADCTL, tmp);
+
+ tmp = RREG8(MGAREG_MEM_MISC_READ);
+ tmp |= (0x3<<2) | 0xc0;
+ WREG8(MGAREG_MEM_MISC_WRITE, tmp);
+
+ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
+ tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
+ WREG_DAC(MGA1064_PIX_CLK_CTL, tmp);
+
+ udelay(500);
+
+ WREG_DAC(MGA1064_ER_PIX_PLLC_N, n);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_M, m);
+ WREG_DAC(MGA1064_ER_PIX_PLLC_P, p);
+
+ udelay(50);
+
+ return 0;
+}
+
+static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
+{
+ switch(mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ return mga_g200se_set_plls(mdev, clock);
+ break;
+ case G200_WB:
+ return mga_g200wb_set_plls(mdev, clock);
+ break;
+ case G200_EV:
+ return mga_g200ev_set_plls(mdev, clock);
+ break;
+ case G200_EH:
+ return mga_g200eh_set_plls(mdev, clock);
+ break;
+ case G200_ER:
+ return mga_g200er_set_plls(mdev, clock);
+ break;
+ }
+ return 0;
+}
+
+static void mga_g200wb_prepare(struct drm_crtc *crtc)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u8 tmp;
+ int iter_max;
+
+ /* 1- The first step is to warn the BMC of an upcoming mode change.
+ * We are putting the misc<0> to output.*/
+
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_CTL, tmp);
+
+ /* we are putting a 1 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+
+ /* 2- Second step to mask and further scan request
+ * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>)
+ */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x80;
+ WREG_DAC(MGA1064_SPAREREG, tmp);
+
+ /* 3a- the third step is to verifu if there is an active scan
+ * We are searching for a 0 on remhsyncsts <XSPAREREG<0>)
+ */
+ iter_max = 300;
+ while (!(tmp & 0x1) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+
+ /* 3b- this step occurs only if the remove is actually scanning
+ * we are waiting for the end of the frame which is a 1 on
+ * remvsyncsts (XSPAREREG<1>)
+ */
+ if (iter_max) {
+ iter_max = 300;
+ while ((tmp & 0x2) && iter_max) {
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ udelay(1000);
+ iter_max--;
+ }
+ }
+}
+
+static void mga_g200wb_commit(struct drm_crtc *crtc)
+{
+ u8 tmp;
+ struct mga_device *mdev = crtc->dev->dev_private;
+
+ /* 1- The first step is to ensure that the vrsten and hrsten are set */
+ WREG8(MGAREG_CRTCEXT_INDEX, 1);
+ tmp = RREG8(MGAREG_CRTCEXT_DATA);
+ WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88);
+
+ /* 2- second step is to assert the rstlvl2 */
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ tmp = RREG8(DAC_DATA);
+ tmp |= 0x8;
+ WREG8(DAC_DATA, tmp);
+
+ /* wait 10 us */
+ udelay(10);
+
+ /* 3- deassert rstlvl2 */
+ tmp &= ~0x08;
+ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2);
+ WREG8(DAC_DATA, tmp);
+
+ /* 4- remove mask of scan request */
+ WREG8(DAC_INDEX, MGA1064_SPAREREG);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x80;
+ WREG8(DAC_DATA, tmp);
+
+ /* 5- put back a 0 on the misc<0> line */
+ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA);
+ tmp = RREG8(DAC_DATA);
+ tmp &= ~0x10;
+ WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
+}
+
+
+void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ u32 addr;
+ int count;
+
+ while (RREG8(0x1fda) & 0x08);
+ while (!(RREG8(0x1fda) & 0x08));
+
+ count = RREG8(MGAREG_VCOUNT) + 2;
+ while (RREG8(MGAREG_VCOUNT) < count);
+
+ addr = offset >> 2;
+ WREG_CRT(0x0d, (u8)(addr & 0xff));
+ WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
+ WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf);
+}
+
+
+/* ast is different - we will force move buffers out of VRAM */
+static int mga_crtc_do_set_base(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y, int atomic)
+{
+ struct mga_device *mdev = crtc->dev->dev_private;
+ struct drm_gem_object *obj;
+ struct mga_framebuffer *mga_fb;
+ struct mgag200_bo *bo;
+ int ret;
+ u64 gpu_addr;
+
+ /* push the previous fb to system ram */
+ if (!atomic && fb) {
+ mga_fb = to_mga_framebuffer(fb);
+ obj = mga_fb->obj;
+ bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+ mgag200_bo_push_sysram(bo);
+ mgag200_bo_unreserve(bo);
+ }
+
+ mga_fb = to_mga_framebuffer(crtc->fb);
+ obj = mga_fb->obj;
+ bo = gem_to_mga_bo(obj);
+
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ ret = mgag200_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
+ if (ret) {
+ mgag200_bo_unreserve(bo);
+ return ret;
+ }
+
+ if (&mdev->mfbdev->mfb == mga_fb) {
+ /* if pushing console in kmap it */
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret)
+ DRM_ERROR("failed to kmap fbcon\n");
+
+ }
+ mgag200_bo_unreserve(bo);
+
+ DRM_INFO("mga base %llx\n", gpu_addr);
+
+ mga_set_start_address(crtc, (u32)gpu_addr);
+
+ return 0;
+}
+
+static int mga_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ return mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+}
+
+static int mga_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ int hdisplay, hsyncstart, hsyncend, htotal;
+ int vdisplay, vsyncstart, vsyncend, vtotal;
+ int pitch;
+ int option = 0, option2 = 0;
+ int i;
+ unsigned char misc = 0;
+ unsigned char ext_vga[6];
+ unsigned char ext_vga_index24;
+ unsigned char dac_index90 = 0;
+ u8 bppshift;
+
+ static unsigned char dacvalue[] = {
+ /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0,
+ /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x18: */ 0x00, 0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20,
+ /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28: */ 0x00, 0x00, 0x00, 0x00, 0, 0, 0, 0x40,
+ /* 0x30: */ 0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83,
+ /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A,
+ /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
+ };
+
+ bppshift = mdev->bpp_shifts[(crtc->fb->bits_per_pixel >> 3) - 1];
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ dacvalue[MGA1064_VREF_CTL] = 0x03;
+ dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
+ MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ if (mdev->has_sdram)
+ option = 0x40049120;
+ else
+ option = 0x4004d120;
+ option2 = 0x00008000;
+ break;
+ case G200_WB:
+ dacvalue[MGA1064_VREF_CTL] = 0x07;
+ option = 0x41049120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EV:
+ dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EH:
+ dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
+ MGA1064_MISC_CTL_DAC_RAM_CS;
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_ER:
+ dac_index90 = 0;
+ break;
+ }
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
+ else
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
+ break;
+ case 24:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_24bits;
+ break;
+ case 32:
+ dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_32_24bits;
+ break;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= 0x40;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= 0x80;
+
+
+ for (i = 0; i < sizeof(dacvalue); i++) {
+ if ((i <= 0x03) ||
+ (i == 0x07) ||
+ (i == 0x0b) ||
+ (i == 0x0f) ||
+ ((i >= 0x13) && (i <= 0x17)) ||
+ (i == 0x1b) ||
+ (i == 0x1c) ||
+ ((i >= 0x1f) && (i <= 0x29)) ||
+ ((i >= 0x30) && (i <= 0x37)))
+ continue;
+ if (IS_G200_SE(mdev) &&
+ ((i == 0x2c) || (i == 0x2d) || (i == 0x2e)))
+ continue;
+ if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH) &&
+ (i >= 0x44) && (i <= 0x4e))
+ continue;
+
+ WREG_DAC(i, dacvalue[i]);
+ }
+
+ if (mdev->type == G200_ER) {
+ WREG_DAC(0x90, dac_index90);
+ }
+
+
+ if (option)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+ if (option2)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+
+ WREG_SEQ(2, 0xf);
+ WREG_SEQ(3, 0);
+ WREG_SEQ(4, 0xe);
+
+ pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
+ if (crtc->fb->bits_per_pixel == 24)
+ pitch = pitch >> (4 - bppshift);
+ else
+ pitch = pitch >> (4 - bppshift);
+
+ hdisplay = mode->hdisplay / 8 - 1;
+ hsyncstart = mode->hsync_start / 8 - 1;
+ hsyncend = mode->hsync_end / 8 - 1;
+ htotal = mode->htotal / 8 - 1;
+
+ /* Work around hardware quirk */
+ if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04)
+ htotal++;
+
+ vdisplay = mode->vdisplay - 1;
+ vsyncstart = mode->vsync_start - 1;
+ vsyncend = mode->vsync_end - 1;
+ vtotal = mode->vtotal - 2;
+
+ WREG_GFX(0, 0);
+ WREG_GFX(1, 0);
+ WREG_GFX(2, 0);
+ WREG_GFX(3, 0);
+ WREG_GFX(4, 0);
+ WREG_GFX(5, 0x40);
+ WREG_GFX(6, 0x5);
+ WREG_GFX(7, 0xf);
+ WREG_GFX(8, 0xf);
+
+ WREG_CRT(0, htotal - 4);
+ WREG_CRT(1, hdisplay);
+ WREG_CRT(2, hdisplay);
+ WREG_CRT(3, (htotal & 0x1F) | 0x80);
+ WREG_CRT(4, hsyncstart);
+ WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F));
+ WREG_CRT(6, vtotal & 0xFF);
+ WREG_CRT(7, ((vtotal & 0x100) >> 8) |
+ ((vdisplay & 0x100) >> 7) |
+ ((vsyncstart & 0x100) >> 6) |
+ ((vdisplay & 0x100) >> 5) |
+ ((vdisplay & 0x100) >> 4) | /* linecomp */
+ ((vtotal & 0x200) >> 4)|
+ ((vdisplay & 0x200) >> 3) |
+ ((vsyncstart & 0x200) >> 2));
+ WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
+ ((vdisplay & 0x200) >> 3));
+ WREG_CRT(10, 0);
+ WREG_CRT(11, 0);
+ WREG_CRT(12, 0);
+ WREG_CRT(13, 0);
+ WREG_CRT(14, 0);
+ WREG_CRT(15, 0);
+ WREG_CRT(16, vsyncstart & 0xFF);
+ WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
+ WREG_CRT(18, vdisplay & 0xFF);
+ WREG_CRT(19, pitch & 0xFF);
+ WREG_CRT(20, 0);
+ WREG_CRT(21, vdisplay & 0xFF);
+ WREG_CRT(22, (vtotal + 1) & 0xFF);
+ WREG_CRT(23, 0xc3);
+ WREG_CRT(24, vdisplay & 0xFF);
+
+ ext_vga[0] = 0;
+ ext_vga[5] = 0;
+
+ /* TODO interlace */
+
+ ext_vga[0] |= (pitch & 0x300) >> 4;
+ ext_vga[1] = (((htotal - 4) & 0x100) >> 8) |
+ ((hdisplay & 0x100) >> 7) |
+ ((hsyncstart & 0x100) >> 6) |
+ (htotal & 0x40);
+ ext_vga[2] = ((vtotal & 0xc00) >> 10) |
+ ((vdisplay & 0x400) >> 8) |
+ ((vdisplay & 0xc00) >> 7) |
+ ((vsyncstart & 0xc00) >> 5) |
+ ((vdisplay & 0x400) >> 3);
+ if (crtc->fb->bits_per_pixel == 24)
+ ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
+ else
+ ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
+ ext_vga[4] = 0;
+ if (mdev->type == G200_WB)
+ ext_vga[1] |= 0x88;
+
+ ext_vga_index24 = 0x05;
+
+ /* Set pixel clocks */
+ misc = 0x2d;
+ WREG8(MGA_MISC_OUT, misc);
+
+ mga_crtc_set_plls(mdev, mode->clock);
+
+ for (i = 0; i < 6; i++) {
+ WREG_ECRT(i, ext_vga[i]);
+ }
+
+ if (mdev->type == G200_ER)
+ WREG_ECRT(24, ext_vga_index24);
+
+ if (mdev->type == G200_EV) {
+ WREG_ECRT(6, 0);
+ }
+
+ WREG_ECRT(0, ext_vga[0]);
+ /* Enable mga pixel clock */
+ misc = 0x2d;
+
+ WREG8(MGA_MISC_OUT, misc);
+
+ if (adjusted_mode)
+ memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
+
+ mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+
+ /* reset tagfifo */
+ if (mdev->type == G200_ER) {
+ u32 mem_ctl = RREG32(MGAREG_MEMCTL);
+ u8 seq1;
+
+ /* screen off */
+ WREG8(MGAREG_SEQ_INDEX, 0x01);
+ seq1 = RREG8(MGAREG_SEQ_DATA) | 0x20;
+ WREG8(MGAREG_SEQ_DATA, seq1);
+
+ WREG32(MGAREG_MEMCTL, mem_ctl | 0x00200000);
+ udelay(1000);
+ WREG32(MGAREG_MEMCTL, mem_ctl & ~0x00200000);
+
+ WREG8(MGAREG_SEQ_DATA, seq1 & ~0x20);
+ }
+
+
+ if (IS_G200_SE(mdev)) {
+ if (mdev->reg_1e24 >= 0x02) {
+ u8 hi_pri_lvl;
+ u32 bpp;
+ u32 mb;
+
+ if (crtc->fb->bits_per_pixel > 16)
+ bpp = 32;
+ else if (crtc->fb->bits_per_pixel > 8)
+ bpp = 16;
+ else
+ bpp = 8;
+
+ mb = (mode->clock * bpp) / 1000;
+ if (mb > 3100)
+ hi_pri_lvl = 0;
+ else if (mb > 2600)
+ hi_pri_lvl = 1;
+ else if (mb > 1900)
+ hi_pri_lvl = 2;
+ else if (mb > 1160)
+ hi_pri_lvl = 3;
+ else if (mb > 440)
+ hi_pri_lvl = 4;
+ else
+ hi_pri_lvl = 5;
+
+ WREG8(0x1fde, 0x06);
+ WREG8(0x1fdf, hi_pri_lvl);
+ } else {
+ if (mdev->reg_1e24 >= 0x01)
+ WREG8(0x1fdf, 0x03);
+ else
+ WREG8(0x1fdf, 0x04);
+ }
+ }
+ return 0;
+}
+
+#if 0 /* code from mjg to attempt D3 on crtc dpms off - revisit later */
+static int mga_suspend(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ int option;
+
+ if (mdev->suspended)
+ return 0;
+
+ WREG_SEQ(1, 0x20);
+ WREG_ECRT(1, 0x30);
+ /* Disable the pixel clock */
+ WREG_DAC(0x1a, 0x05);
+ /* Power down the DAC */
+ WREG_DAC(0x1e, 0x18);
+ /* Power down the pixel PLL */
+ WREG_DAC(0x1a, 0x0d);
+
+ /* Disable PLLs and clocks */
+ pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ option &= ~(0x1F8024);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+ pci_set_power_state(pdev, PCI_D3hot);
+ pci_disable_device(pdev);
+
+ mdev->suspended = true;
+
+ return 0;
+}
+
+static int mga_resume(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+ int option;
+
+ if (!mdev->suspended)
+ return 0;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_device(pdev);
+
+ /* Disable sysclk */
+ pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
+ option &= ~(0x4);
+ pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+
+ mdev->suspended = false;
+
+ return 0;
+}
+
+#endif
+
+static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ u8 seq1 = 0, crtcext1 = 0;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ seq1 = 0;
+ crtcext1 = 0;
+ mga_crtc_load_lut(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ seq1 = 0x20;
+ crtcext1 = 0x10;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ seq1 = 0x20;
+ crtcext1 = 0x20;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ seq1 = 0x20;
+ crtcext1 = 0x30;
+ break;
+ }
+
+#if 0
+ if (mode == DRM_MODE_DPMS_OFF) {
+ mga_suspend(crtc);
+ }
+#endif
+ WREG8(MGAREG_SEQ_INDEX, 0x01);
+ seq1 |= RREG8(MGAREG_SEQ_DATA) & ~0x20;
+ mga_wait_vsync(mdev);
+ mga_wait_busy(mdev);
+ WREG8(MGAREG_SEQ_DATA, seq1);
+ msleep(20);
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x01);
+ crtcext1 |= RREG8(MGAREG_CRTCEXT_DATA) & ~0x30;
+ WREG8(MGAREG_CRTCEXT_DATA, crtcext1);
+
+#if 0
+ if (mode == DRM_MODE_DPMS_ON && mdev->suspended == true) {
+ mga_resume(crtc);
+ drm_helper_resume_force_mode(dev);
+ }
+#endif
+}
+
+/*
+ * This is called before a mode is programmed. A typical use might be to
+ * enable DPMS during the programming to avoid seeing intermediate stages,
+ * but that's not relevant to us
+ */
+static void mga_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ u8 tmp;
+
+ /* mga_resume(crtc);*/
+
+ WREG8(MGAREG_CRTC_INDEX, 0x11);
+ tmp = RREG8(MGAREG_CRTC_DATA);
+ WREG_CRT(0x11, tmp | 0x80);
+
+ if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+ WREG_SEQ(0, 1);
+ msleep(50);
+ WREG_SEQ(1, 0x20);
+ msleep(20);
+ } else {
+ WREG8(MGAREG_SEQ_INDEX, 0x1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+
+ /* start sync reset */
+ WREG_SEQ(0, 1);
+ WREG_SEQ(1, tmp | 0x20);
+ }
+
+ if (mdev->type == G200_WB)
+ mga_g200wb_prepare(crtc);
+
+ WREG_CRT(17, 0);
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void mga_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = dev->dev_private;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ u8 tmp;
+
+ if (mdev->type == G200_WB)
+ mga_g200wb_commit(crtc);
+
+ if (mdev->type == G200_SE_A || mdev->type == G200_SE_B) {
+ msleep(50);
+ WREG_SEQ(1, 0x0);
+ msleep(20);
+ WREG_SEQ(0, 0x3);
+ } else {
+ WREG8(MGAREG_SEQ_INDEX, 0x1);
+ tmp = RREG8(MGAREG_SEQ_DATA);
+
+ tmp &= ~0x20;
+ WREG_SEQ(0x1, tmp);
+ WREG_SEQ(0, 3);
+ }
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+ int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
+ int i;
+
+ for (i = start; i < end; i++) {
+ mga_crtc->lut_r[i] = red[i] >> 8;
+ mga_crtc->lut_g[i] = green[i] >> 8;
+ mga_crtc->lut_b[i] = blue[i] >> 8;
+ }
+ mga_crtc_load_lut(crtc);
+}
+
+/* Simple cleanup function */
+static void mga_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(mga_crtc);
+}
+
+/* These provide the minimum set of functions required to handle a CRTC */
+static const struct drm_crtc_funcs mga_crtc_funcs = {
+ .gamma_set = mga_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mga_crtc_destroy,
+};
+
+static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+ .dpms = mga_crtc_dpms,
+ .mode_fixup = mga_crtc_mode_fixup,
+ .mode_set = mga_crtc_mode_set,
+ .mode_set_base = mga_crtc_mode_set_base,
+ .prepare = mga_crtc_prepare,
+ .commit = mga_crtc_commit,
+ .load_lut = mga_crtc_load_lut,
+};
+
+/* CRTC setup */
+static void mga_crtc_init(struct drm_device *dev)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mga_crtc *mga_crtc;
+ int i;
+
+ mga_crtc = kzalloc(sizeof(struct mga_crtc) +
+ (MGAG200FB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+
+ if (mga_crtc == NULL)
+ return;
+
+ drm_crtc_init(dev, &mga_crtc->base, &mga_crtc_funcs);
+
+ drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
+ mdev->mode_info.crtc = mga_crtc;
+
+ for (i = 0; i < MGAG200_LUT_SIZE; i++) {
+ mga_crtc->lut_r[i] = i;
+ mga_crtc->lut_g[i] = i;
+ mga_crtc->lut_b[i] = i;
+ }
+
+ drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
+}
+
+/** Sets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ u16 blue, int regno)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ mga_crtc->lut_r[regno] = red >> 8;
+ mga_crtc->lut_g[regno] = green >> 8;
+ mga_crtc->lut_b[regno] = blue >> 8;
+}
+
+/** Gets the color ramps on behalf of fbcon */
+void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, int regno)
+{
+ struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
+
+ *red = (u16)mga_crtc->lut_r[regno] << 8;
+ *green = (u16)mga_crtc->lut_g[regno] << 8;
+ *blue = (u16)mga_crtc->lut_b[regno] << 8;
+}
+
+/*
+ * The encoder comes after the CRTC in the output pipeline, but before
+ * the connector. It's responsible for ensuring that the digital
+ * stream is appropriately converted into the output format. Setup is
+ * very simple in this case - all we have to do is inform qemu of the
+ * colour depth in order to ensure that it displays appropriately
+ */
+
+/*
+ * These functions are analagous to those in the CRTC code, but are intended
+ * to handle any encoder-specific limitations
+ */
+static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mga_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
+{
+ return;
+}
+
+static void mga_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void mga_encoder_commit(struct drm_encoder *encoder)
+{
+}
+
+void mga_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mga_encoder);
+}
+
+static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
+ .dpms = mga_encoder_dpms,
+ .mode_fixup = mga_encoder_mode_fixup,
+ .mode_set = mga_encoder_mode_set,
+ .prepare = mga_encoder_prepare,
+ .commit = mga_encoder_commit,
+};
+
+static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
+ .destroy = mga_encoder_destroy,
+};
+
+static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ struct mga_encoder *mga_encoder;
+
+ mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
+ if (!mga_encoder)
+ return NULL;
+
+ encoder = &mga_encoder->base;
+ encoder->possible_crtcs = 0x1;
+
+ drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
+ DRM_MODE_ENCODER_DAC);
+ drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
+
+ return encoder;
+}
+
+
+static int mga_vga_get_modes(struct drm_connector *connector)
+{
+ struct mga_connector *mga_connector = to_mga_connector(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ edid = drm_get_edid(connector, &mga_connector->i2c->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ return ret;
+}
+
+static int mga_vga_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* FIXME: Add bandwidth and g200se limitations */
+
+ if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
+ mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
+ mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
+ mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) {
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *mga_connector_best_encoder(struct drm_connector
+ *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj =
+ drm_mode_object_find(connector->dev, enc_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ return NULL;
+}
+
+static enum drm_connector_status mga_vga_detect(struct drm_connector
+ *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void mga_connector_destroy(struct drm_connector *connector)
+{
+ struct mga_connector *mga_connector = to_mga_connector(connector);
+ mgag200_i2c_destroy(mga_connector->i2c);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
+ .get_modes = mga_vga_get_modes,
+ .mode_valid = mga_vga_mode_valid,
+ .best_encoder = mga_connector_best_encoder,
+};
+
+struct drm_connector_funcs mga_vga_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = mga_vga_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mga_connector_destroy,
+};
+
+static struct drm_connector *mga_vga_init(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct mga_connector *mga_connector;
+
+ mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
+ if (!mga_connector)
+ return NULL;
+
+ connector = &mga_connector->base;
+
+ drm_connector_init(dev, connector,
+ &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+
+ drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+
+ mga_connector->i2c = mgag200_i2c_create(dev);
+ if (!mga_connector->i2c)
+ DRM_ERROR("failed to add ddc bus\n");
+
+ return connector;
+}
+
+
+int mgag200_modeset_init(struct mga_device *mdev)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ mdev->mode_info.mode_config_initialized = true;
+
+ mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+ mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+
+ mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+
+ mga_crtc_init(mdev->dev);
+
+ encoder = mga_encoder_init(mdev->dev);
+ if (!encoder) {
+ DRM_ERROR("mga_encoder_init failed\n");
+ return -1;
+ }
+
+ connector = mga_vga_init(mdev->dev);
+ if (!connector) {
+ DRM_ERROR("mga_vga_init failed\n");
+ return -1;
+ }
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ ret = mgag200_fbdev_init(mdev);
+ if (ret) {
+ DRM_ERROR("mga_fbdev_init failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void mgag200_modeset_fini(struct mga_device *mdev)
+{
+
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
new file mode 100644
index 000000000000..fb24d8655feb
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -0,0 +1,661 @@
+/*
+ * MGA Millennium (MGA2064W) functions
+ * MGA Mystique (MGA1064SG) functions
+ *
+ * Copyright 1996 The XFree86 Project, Inc.
+ *
+ * Authors
+ * Dirk Hohndel
+ * hohndel@XFree86.Org
+ * David Dawes
+ * dawes@XFree86.Org
+ * Contributors:
+ * Guy DESBIEF, Aix-en-provence, France
+ * g.desbief@aix.pacwan.net
+ * MGA1064SG Mystique register file
+ */
+
+
+#ifndef _MGA_REG_H_
+#define _MGA_REG_H_
+
+#define MGAREG_DWGCTL 0x1c00
+#define MGAREG_MACCESS 0x1c04
+/* the following is a mystique only register */
+#define MGAREG_MCTLWTST 0x1c08
+#define MGAREG_ZORG 0x1c0c
+
+#define MGAREG_PAT0 0x1c10
+#define MGAREG_PAT1 0x1c14
+#define MGAREG_PLNWT 0x1c1c
+
+#define MGAREG_BCOL 0x1c20
+#define MGAREG_FCOL 0x1c24
+
+#define MGAREG_SRC0 0x1c30
+#define MGAREG_SRC1 0x1c34
+#define MGAREG_SRC2 0x1c38
+#define MGAREG_SRC3 0x1c3c
+
+#define MGAREG_XYSTRT 0x1c40
+#define MGAREG_XYEND 0x1c44
+
+#define MGAREG_SHIFT 0x1c50
+/* the following is a mystique only register */
+#define MGAREG_DMAPAD 0x1c54
+#define MGAREG_SGN 0x1c58
+#define MGAREG_LEN 0x1c5c
+
+#define MGAREG_AR0 0x1c60
+#define MGAREG_AR1 0x1c64
+#define MGAREG_AR2 0x1c68
+#define MGAREG_AR3 0x1c6c
+#define MGAREG_AR4 0x1c70
+#define MGAREG_AR5 0x1c74
+#define MGAREG_AR6 0x1c78
+
+#define MGAREG_CXBNDRY 0x1c80
+#define MGAREG_FXBNDRY 0x1c84
+#define MGAREG_YDSTLEN 0x1c88
+#define MGAREG_PITCH 0x1c8c
+
+#define MGAREG_YDST 0x1c90
+#define MGAREG_YDSTORG 0x1c94
+#define MGAREG_YTOP 0x1c98
+#define MGAREG_YBOT 0x1c9c
+
+#define MGAREG_CXLEFT 0x1ca0
+#define MGAREG_CXRIGHT 0x1ca4
+#define MGAREG_FXLEFT 0x1ca8
+#define MGAREG_FXRIGHT 0x1cac
+
+#define MGAREG_XDST 0x1cb0
+
+#define MGAREG_DR0 0x1cc0
+#define MGAREG_DR1 0x1cc4
+#define MGAREG_DR2 0x1cc8
+#define MGAREG_DR3 0x1ccc
+
+#define MGAREG_DR4 0x1cd0
+#define MGAREG_DR5 0x1cd4
+#define MGAREG_DR6 0x1cd8
+#define MGAREG_DR7 0x1cdc
+
+#define MGAREG_DR8 0x1ce0
+#define MGAREG_DR9 0x1ce4
+#define MGAREG_DR10 0x1ce8
+#define MGAREG_DR11 0x1cec
+
+#define MGAREG_DR12 0x1cf0
+#define MGAREG_DR13 0x1cf4
+#define MGAREG_DR14 0x1cf8
+#define MGAREG_DR15 0x1cfc
+
+#define MGAREG_SRCORG 0x2cb4
+#define MGAREG_DSTORG 0x2cb8
+
+/* add or or this to one of the previous "power registers" to start
+ the drawing engine */
+
+#define MGAREG_EXEC 0x0100
+
+#define MGAREG_FIFOSTATUS 0x1e10
+#define MGAREG_Status 0x1e14
+#define MGAREG_CACHEFLUSH 0x1fff
+#define MGAREG_ICLEAR 0x1e18
+#define MGAREG_IEN 0x1e1c
+
+#define MGAREG_VCOUNT 0x1e20
+
+#define MGAREG_Reset 0x1e40
+
+#define MGAREG_OPMODE 0x1e54
+
+/* Warp Registers */
+#define MGAREG_WIADDR 0x1dc0
+#define MGAREG_WIADDR2 0x1dd8
+#define MGAREG_WGETMSB 0x1dc8
+#define MGAREG_WVRTXSZ 0x1dcc
+#define MGAREG_WACCEPTSEQ 0x1dd4
+#define MGAREG_WMISC 0x1e70
+
+#define MGAREG_MEMCTL 0x2e08
+
+/* OPMODE register additives */
+
+#define MGAOPM_DMA_GENERAL (0x00 << 2)
+#define MGAOPM_DMA_BLIT (0x01 << 2)
+#define MGAOPM_DMA_VECTOR (0x10 << 2)
+
+/* MACCESS register additives */
+#define MGAMAC_PW8 0x00
+#define MGAMAC_PW16 0x01
+#define MGAMAC_PW24 0x03 /* not a typo */
+#define MGAMAC_PW32 0x02 /* not a typo */
+#define MGAMAC_BYPASS332 0x10000000
+#define MGAMAC_NODITHER 0x40000000
+#define MGAMAC_DIT555 0x80000000
+
+/* DWGCTL register additives */
+
+/* Lines */
+
+#define MGADWG_LINE_OPEN 0x00
+#define MGADWG_AUTOLINE_OPEN 0x01
+#define MGADWG_LINE_CLOSE 0x02
+#define MGADWG_AUTOLINE_CLOSE 0x03
+
+/* Trapezoids */
+#define MGADWG_TRAP 0x04
+#define MGADWG_TEXTURE_TRAP 0x06
+
+/* BitBlts */
+
+#define MGADWG_BITBLT 0x08
+#define MGADWG_FBITBLT 0x0c
+#define MGADWG_ILOAD 0x09
+#define MGADWG_ILOAD_SCALE 0x0d
+#define MGADWG_ILOAD_FILTER 0x0f
+#define MGADWG_ILOAD_HIQH 0x07
+#define MGADWG_ILOAD_HIQHV 0x0e
+#define MGADWG_IDUMP 0x0a
+
+/* atype access to WRAM */
+
+#define MGADWG_RPL ( 0x00 << 4 )
+#define MGADWG_RSTR ( 0x01 << 4 )
+#define MGADWG_ZI ( 0x03 << 4 )
+#define MGADWG_BLK ( 0x04 << 4 )
+#define MGADWG_I ( 0x07 << 4 )
+
+/* specifies whether bit blits are linear or xy */
+#define MGADWG_LINEAR ( 0x01 << 7 )
+
+/* z drawing mode. use MGADWG_NOZCMP for always */
+
+#define MGADWG_NOZCMP ( 0x00 << 8 )
+#define MGADWG_ZE ( 0x02 << 8 )
+#define MGADWG_ZNE ( 0x03 << 8 )
+#define MGADWG_ZLT ( 0x04 << 8 )
+#define MGADWG_ZLTE ( 0x05 << 8 )
+#define MGADWG_GT ( 0x06 << 8 )
+#define MGADWG_GTE ( 0x07 << 8 )
+
+/* use this to force colour expansion circuitry to do its stuff */
+
+#define MGADWG_SOLID ( 0x01 << 11 )
+
+/* ar register at zero */
+
+#define MGADWG_ARZERO ( 0x01 << 12 )
+
+#define MGADWG_SGNZERO ( 0x01 << 13 )
+
+#define MGADWG_SHIFTZERO ( 0x01 << 14 )
+
+/* See table on 4-43 for bop ALU operations */
+
+/* See table on 4-44 for translucidity masks */
+
+#define MGADWG_BMONOLEF ( 0x00 << 25 )
+#define MGADWG_BMONOWF ( 0x04 << 25 )
+#define MGADWG_BPLAN ( 0x01 << 25 )
+
+/* note that if bfcol is specified and you're doing a bitblt, it causes
+ a fbitblt to be performed, so check that you obey the fbitblt rules */
+
+#define MGADWG_BFCOL ( 0x02 << 25 )
+#define MGADWG_BUYUV ( 0x0e << 25 )
+#define MGADWG_BU32BGR ( 0x03 << 25 )
+#define MGADWG_BU32RGB ( 0x07 << 25 )
+#define MGADWG_BU24BGR ( 0x0b << 25 )
+#define MGADWG_BU24RGB ( 0x0f << 25 )
+
+#define MGADWG_PATTERN ( 0x01 << 29 )
+#define MGADWG_TRANSC ( 0x01 << 30 )
+#define MGAREG_MISC_WRITE 0x3c2
+#define MGAREG_MISC_READ 0x3cc
+#define MGAREG_MEM_MISC_WRITE 0x1fc2
+#define MGAREG_MEM_MISC_READ 0x1fcc
+
+#define MGAREG_MISC_IOADSEL (0x1 << 0)
+#define MGAREG_MISC_RAMMAPEN (0x1 << 1)
+#define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2)
+#define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2)
+#define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2)
+#define MGAREG_MISC_VIDEO_DIS (0x1 << 4)
+#define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5)
+
+/* MMIO VGA registers */
+#define MGAREG_SEQ_INDEX 0x1fc4
+#define MGAREG_SEQ_DATA 0x1fc5
+#define MGAREG_CRTC_INDEX 0x1fd4
+#define MGAREG_CRTC_DATA 0x1fd5
+#define MGAREG_CRTCEXT_INDEX 0x1fde
+#define MGAREG_CRTCEXT_DATA 0x1fdf
+
+
+
+/* MGA bits for registers PCI_OPTION_REG */
+#define MGA1064_OPT_SYS_CLK_PCI ( 0x00 << 0 )
+#define MGA1064_OPT_SYS_CLK_PLL ( 0x01 << 0 )
+#define MGA1064_OPT_SYS_CLK_EXT ( 0x02 << 0 )
+#define MGA1064_OPT_SYS_CLK_MSK ( 0x03 << 0 )
+
+#define MGA1064_OPT_SYS_CLK_DIS ( 0x01 << 2 )
+#define MGA1064_OPT_G_CLK_DIV_1 ( 0x01 << 3 )
+#define MGA1064_OPT_M_CLK_DIV_1 ( 0x01 << 4 )
+
+#define MGA1064_OPT_SYS_PLL_PDN ( 0x01 << 5 )
+#define MGA1064_OPT_VGA_ION ( 0x01 << 8 )
+
+/* MGA registers in PCI config space */
+#define PCI_MGA_INDEX 0x44
+#define PCI_MGA_DATA 0x48
+#define PCI_MGA_OPTION 0x40
+#define PCI_MGA_OPTION2 0x50
+#define PCI_MGA_OPTION3 0x54
+
+#define RAMDAC_OFFSET 0x3c00
+
+/* TVP3026 direct registers */
+
+#define TVP3026_INDEX 0x00
+#define TVP3026_WADR_PAL 0x00
+#define TVP3026_COL_PAL 0x01
+#define TVP3026_PIX_RD_MSK 0x02
+#define TVP3026_RADR_PAL 0x03
+#define TVP3026_CUR_COL_ADDR 0x04
+#define TVP3026_CUR_COL_DATA 0x05
+#define TVP3026_DATA 0x0a
+#define TVP3026_CUR_RAM 0x0b
+#define TVP3026_CUR_XLOW 0x0c
+#define TVP3026_CUR_XHI 0x0d
+#define TVP3026_CUR_YLOW 0x0e
+#define TVP3026_CUR_YHI 0x0f
+
+/* TVP3026 indirect registers */
+
+#define TVP3026_SILICON_REV 0x01
+#define TVP3026_CURSOR_CTL 0x06
+#define TVP3026_LATCH_CTL 0x0f
+#define TVP3026_TRUE_COLOR_CTL 0x18
+#define TVP3026_MUX_CTL 0x19
+#define TVP3026_CLK_SEL 0x1a
+#define TVP3026_PAL_PAGE 0x1c
+#define TVP3026_GEN_CTL 0x1d
+#define TVP3026_MISC_CTL 0x1e
+#define TVP3026_GEN_IO_CTL 0x2a
+#define TVP3026_GEN_IO_DATA 0x2b
+#define TVP3026_PLL_ADDR 0x2c
+#define TVP3026_PIX_CLK_DATA 0x2d
+#define TVP3026_MEM_CLK_DATA 0x2e
+#define TVP3026_LOAD_CLK_DATA 0x2f
+#define TVP3026_KEY_RED_LOW 0x32
+#define TVP3026_KEY_RED_HI 0x33
+#define TVP3026_KEY_GREEN_LOW 0x34
+#define TVP3026_KEY_GREEN_HI 0x35
+#define TVP3026_KEY_BLUE_LOW 0x36
+#define TVP3026_KEY_BLUE_HI 0x37
+#define TVP3026_KEY_CTL 0x38
+#define TVP3026_MCLK_CTL 0x39
+#define TVP3026_SENSE_TEST 0x3a
+#define TVP3026_TEST_DATA 0x3b
+#define TVP3026_CRC_LSB 0x3c
+#define TVP3026_CRC_MSB 0x3d
+#define TVP3026_CRC_CTL 0x3e
+#define TVP3026_ID 0x3f
+#define TVP3026_RESET 0xff
+
+
+/* MGA1064 DAC Register file */
+/* MGA1064 direct registers */
+
+#define MGA1064_INDEX 0x00
+#define MGA1064_WADR_PAL 0x00
+#define MGA1064_SPAREREG 0x00
+#define MGA1064_COL_PAL 0x01
+#define MGA1064_PIX_RD_MSK 0x02
+#define MGA1064_RADR_PAL 0x03
+#define MGA1064_DATA 0x0a
+
+#define MGA1064_CUR_XLOW 0x0c
+#define MGA1064_CUR_XHI 0x0d
+#define MGA1064_CUR_YLOW 0x0e
+#define MGA1064_CUR_YHI 0x0f
+
+/* MGA1064 indirect registers */
+#define MGA1064_DVI_PIPE_CTL 0x03
+#define MGA1064_CURSOR_BASE_ADR_LOW 0x04
+#define MGA1064_CURSOR_BASE_ADR_HI 0x05
+#define MGA1064_CURSOR_CTL 0x06
+#define MGA1064_CURSOR_COL0_RED 0x08
+#define MGA1064_CURSOR_COL0_GREEN 0x09
+#define MGA1064_CURSOR_COL0_BLUE 0x0a
+
+#define MGA1064_CURSOR_COL1_RED 0x0c
+#define MGA1064_CURSOR_COL1_GREEN 0x0d
+#define MGA1064_CURSOR_COL1_BLUE 0x0e
+
+#define MGA1064_CURSOR_COL2_RED 0x010
+#define MGA1064_CURSOR_COL2_GREEN 0x011
+#define MGA1064_CURSOR_COL2_BLUE 0x012
+
+#define MGA1064_VREF_CTL 0x018
+
+#define MGA1064_MUL_CTL 0x19
+#define MGA1064_MUL_CTL_8bits 0x0
+#define MGA1064_MUL_CTL_15bits 0x01
+#define MGA1064_MUL_CTL_16bits 0x02
+#define MGA1064_MUL_CTL_24bits 0x03
+#define MGA1064_MUL_CTL_32bits 0x04
+#define MGA1064_MUL_CTL_2G8V16bits 0x05
+#define MGA1064_MUL_CTL_G16V16bits 0x06
+#define MGA1064_MUL_CTL_32_24bits 0x07
+
+#define MGA1064_PIX_CLK_CTL 0x1a
+#define MGA1064_PIX_CLK_CTL_CLK_DIS ( 0x01 << 2 )
+#define MGA1064_PIX_CLK_CTL_CLK_POW_DOWN ( 0x01 << 3 )
+#define MGA1064_PIX_CLK_CTL_SEL_PCI ( 0x00 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_PLL ( 0x01 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_EXT ( 0x02 << 0 )
+#define MGA1064_PIX_CLK_CTL_SEL_MSK ( 0x03 << 0 )
+
+#define MGA1064_GEN_CTL 0x1d
+#define MGA1064_GEN_CTL_SYNC_ON_GREEN_DIS (0x01 << 5)
+#define MGA1064_MISC_CTL 0x1e
+#define MGA1064_MISC_CTL_DAC_EN ( 0x01 << 0 )
+#define MGA1064_MISC_CTL_VGA ( 0x01 << 1 )
+#define MGA1064_MISC_CTL_DIS_CON ( 0x03 << 1 )
+#define MGA1064_MISC_CTL_MAFC ( 0x02 << 1 )
+#define MGA1064_MISC_CTL_VGA8 ( 0x01 << 3 )
+#define MGA1064_MISC_CTL_DAC_RAM_CS ( 0x01 << 4 )
+
+#define MGA1064_GEN_IO_CTL2 0x29
+#define MGA1064_GEN_IO_CTL 0x2a
+#define MGA1064_GEN_IO_DATA 0x2b
+#define MGA1064_SYS_PLL_M 0x2c
+#define MGA1064_SYS_PLL_N 0x2d
+#define MGA1064_SYS_PLL_P 0x2e
+#define MGA1064_SYS_PLL_STAT 0x2f
+
+#define MGA1064_REMHEADCTL 0x30
+#define MGA1064_REMHEADCTL_CLKDIS ( 0x01 << 0 )
+#define MGA1064_REMHEADCTL_CLKSL_OFF ( 0x00 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PLL ( 0x01 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_PCI ( 0x02 << 1 )
+#define MGA1064_REMHEADCTL_CLKSL_MSK ( 0x03 << 1 )
+
+#define MGA1064_REMHEADCTL2 0x31
+
+#define MGA1064_ZOOM_CTL 0x38
+#define MGA1064_SENSE_TST 0x3a
+
+#define MGA1064_CRC_LSB 0x3c
+#define MGA1064_CRC_MSB 0x3d
+#define MGA1064_CRC_CTL 0x3e
+#define MGA1064_COL_KEY_MSK_LSB 0x40
+#define MGA1064_COL_KEY_MSK_MSB 0x41
+#define MGA1064_COL_KEY_LSB 0x42
+#define MGA1064_COL_KEY_MSB 0x43
+#define MGA1064_PIX_PLLA_M 0x44
+#define MGA1064_PIX_PLLA_N 0x45
+#define MGA1064_PIX_PLLA_P 0x46
+#define MGA1064_PIX_PLLB_M 0x48
+#define MGA1064_PIX_PLLB_N 0x49
+#define MGA1064_PIX_PLLB_P 0x4a
+#define MGA1064_PIX_PLLC_M 0x4c
+#define MGA1064_PIX_PLLC_N 0x4d
+#define MGA1064_PIX_PLLC_P 0x4e
+
+#define MGA1064_PIX_PLL_STAT 0x4f
+
+/*Added for G450 dual head*/
+
+#define MGA1064_VID_PLL_STAT 0x8c
+#define MGA1064_VID_PLL_P 0x8D
+#define MGA1064_VID_PLL_M 0x8E
+#define MGA1064_VID_PLL_N 0x8F
+
+/* Modified PLL for G200 Winbond (G200WB) */
+#define MGA1064_WB_PIX_PLLC_M 0xb7
+#define MGA1064_WB_PIX_PLLC_N 0xb6
+#define MGA1064_WB_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 Maxim (G200EV) */
+#define MGA1064_EV_PIX_PLLC_M 0xb6
+#define MGA1064_EV_PIX_PLLC_N 0xb7
+#define MGA1064_EV_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 EH */
+#define MGA1064_EH_PIX_PLLC_M 0xb6
+#define MGA1064_EH_PIX_PLLC_N 0xb7
+#define MGA1064_EH_PIX_PLLC_P 0xb8
+
+/* Modified PLL for G200 Maxim (G200ER) */
+#define MGA1064_ER_PIX_PLLC_M 0xb7
+#define MGA1064_ER_PIX_PLLC_N 0xb6
+#define MGA1064_ER_PIX_PLLC_P 0xb8
+
+#define MGA1064_DISP_CTL 0x8a
+#define MGA1064_DISP_CTL_DAC1OUTSEL_MASK 0x01
+#define MGA1064_DISP_CTL_DAC1OUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_DAC1OUTSEL_EN 0x01
+#define MGA1064_DISP_CTL_DAC2OUTSEL_MASK (0x03 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC1 (0x01 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_CRTC2 (0x02 << 2)
+#define MGA1064_DISP_CTL_DAC2OUTSEL_TVE (0x03 << 2)
+#define MGA1064_DISP_CTL_PANOUTSEL_MASK (0x03 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_DIS 0x00
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC1 (0x01 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2RGB (0x02 << 5)
+#define MGA1064_DISP_CTL_PANOUTSEL_CRTC2656 (0x03 << 5)
+
+#define MGA1064_SYNC_CTL 0x8b
+
+#define MGA1064_PWR_CTL 0xa0
+#define MGA1064_PWR_CTL_DAC2_EN (0x01 << 0)
+#define MGA1064_PWR_CTL_VID_PLL_EN (0x01 << 1)
+#define MGA1064_PWR_CTL_PANEL_EN (0x01 << 2)
+#define MGA1064_PWR_CTL_RFIFO_EN (0x01 << 3)
+#define MGA1064_PWR_CTL_CFIFO_EN (0x01 << 4)
+
+#define MGA1064_PAN_CTL 0xa2
+
+/* Using crtc2 */
+#define MGAREG2_C2CTL 0x10
+#define MGAREG2_C2HPARAM 0x14
+#define MGAREG2_C2HSYNC 0x18
+#define MGAREG2_C2VPARAM 0x1c
+#define MGAREG2_C2VSYNC 0x20
+#define MGAREG2_C2STARTADD0 0x28
+
+#define MGAREG2_C2OFFSET 0x40
+#define MGAREG2_C2DATACTL 0x4c
+
+#define MGAREG_C2CTL 0x3c10
+#define MGAREG_C2CTL_C2_EN 0x01
+
+#define MGAREG_C2_HIPRILVL_M (0x07 << 4)
+#define MGAREG_C2_MAXHIPRI_M (0x07 << 8)
+
+#define MGAREG_C2CTL_PIXCLKSEL_MASK (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSELH_MASK (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_PCICLK 0x00
+#define MGAREG_C2CTL_PIXCLKSEL_VDOCLK (0x01 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_PIXELPLL (0x02 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VIDEOPLL (0x03 << 1)
+#define MGAREG_C2CTL_PIXCLKSEL_VDCLK (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKSEL_CRISTAL (0x01 << 1) | (0x01 << 14)
+#define MGAREG_C2CTL_PIXCLKSEL_SYSTEMPLL (0x02 << 1) | (0x01 << 14)
+
+#define MGAREG_C2CTL_PIXCLKDIS_MASK (0x01 << 3)
+#define MGAREG_C2CTL_PIXCLKDIS_DISABLE (0x01 << 3)
+
+#define MGAREG_C2CTL_CRTCDACSEL_MASK (0x01 << 20)
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC1 0x00
+#define MGAREG_C2CTL_CRTCDACSEL_CRTC2 (0x01 << 20)
+
+#define MGAREG_C2HPARAM 0x3c14
+#define MGAREG_C2HSYNC 0x3c18
+#define MGAREG_C2VPARAM 0x3c1c
+#define MGAREG_C2VSYNC 0x3c20
+#define MGAREG_C2STARTADD0 0x3c28
+
+#define MGAREG_C2OFFSET 0x3c40
+#define MGAREG_C2DATACTL 0x3c4c
+
+/* video register */
+
+#define MGAREG_BESA1C3ORG 0x3d60
+#define MGAREG_BESA1CORG 0x3d10
+#define MGAREG_BESA1ORG 0x3d00
+#define MGAREG_BESCTL 0x3d20
+#define MGAREG_BESGLOBCTL 0x3dc0
+#define MGAREG_BESHCOORD 0x3d28
+#define MGAREG_BESHISCAL 0x3d30
+#define MGAREG_BESHSRCEND 0x3d3c
+#define MGAREG_BESHSRCLST 0x3d50
+#define MGAREG_BESHSRCST 0x3d38
+#define MGAREG_BESLUMACTL 0x3d40
+#define MGAREG_BESPITCH 0x3d24
+#define MGAREG_BESV1SRCLST 0x3d54
+#define MGAREG_BESV1WGHT 0x3d48
+#define MGAREG_BESVCOORD 0x3d2c
+#define MGAREG_BESVISCAL 0x3d34
+
+/* texture engine registers */
+
+#define MGAREG_TMR0 0x2c00
+#define MGAREG_TMR1 0x2c04
+#define MGAREG_TMR2 0x2c08
+#define MGAREG_TMR3 0x2c0c
+#define MGAREG_TMR4 0x2c10
+#define MGAREG_TMR5 0x2c14
+#define MGAREG_TMR6 0x2c18
+#define MGAREG_TMR7 0x2c1c
+#define MGAREG_TMR8 0x2c20
+#define MGAREG_TEXORG 0x2c24
+#define MGAREG_TEXWIDTH 0x2c28
+#define MGAREG_TEXHEIGHT 0x2c2c
+#define MGAREG_TEXCTL 0x2c30
+# define MGA_TW4 (0x00000000)
+# define MGA_TW8 (0x00000001)
+# define MGA_TW15 (0x00000002)
+# define MGA_TW16 (0x00000003)
+# define MGA_TW12 (0x00000004)
+# define MGA_TW32 (0x00000006)
+# define MGA_TW8A (0x00000007)
+# define MGA_TW8AL (0x00000008)
+# define MGA_TW422 (0x0000000A)
+# define MGA_TW422UYVY (0x0000000B)
+# define MGA_PITCHLIN (0x00000100)
+# define MGA_NOPERSPECTIVE (0x00200000)
+# define MGA_TAKEY (0x02000000)
+# define MGA_TAMASK (0x04000000)
+# define MGA_CLAMPUV (0x18000000)
+# define MGA_TEXMODULATE (0x20000000)
+#define MGAREG_TEXCTL2 0x2c3c
+# define MGA_G400_TC2_MAGIC (0x00008000)
+# define MGA_TC2_DECALBLEND (0x00000001)
+# define MGA_TC2_IDECAL (0x00000002)
+# define MGA_TC2_DECALDIS (0x00000004)
+# define MGA_TC2_CKSTRANSDIS (0x00000010)
+# define MGA_TC2_BORDEREN (0x00000020)
+# define MGA_TC2_SPECEN (0x00000040)
+# define MGA_TC2_DUALTEX (0x00000080)
+# define MGA_TC2_TABLEFOG (0x00000100)
+# define MGA_TC2_BUMPMAP (0x00000200)
+# define MGA_TC2_SELECT_TMU1 (0x80000000)
+#define MGAREG_TEXTRANS 0x2c34
+#define MGAREG_TEXTRANSHIGH 0x2c38
+#define MGAREG_TEXFILTER 0x2c58
+# define MGA_MIN_NRST (0x00000000)
+# define MGA_MIN_BILIN (0x00000002)
+# define MGA_MIN_ANISO (0x0000000D)
+# define MGA_MAG_NRST (0x00000000)
+# define MGA_MAG_BILIN (0x00000020)
+# define MGA_FILTERALPHA (0x00100000)
+#define MGAREG_ALPHASTART 0x2c70
+#define MGAREG_ALPHAXINC 0x2c74
+#define MGAREG_ALPHAYINC 0x2c78
+#define MGAREG_ALPHACTRL 0x2c7c
+# define MGA_SRC_ZERO (0x00000000)
+# define MGA_SRC_ONE (0x00000001)
+# define MGA_SRC_DST_COLOR (0x00000002)
+# define MGA_SRC_ONE_MINUS_DST_COLOR (0x00000003)
+# define MGA_SRC_ALPHA (0x00000004)
+# define MGA_SRC_ONE_MINUS_SRC_ALPHA (0x00000005)
+# define MGA_SRC_DST_ALPHA (0x00000006)
+# define MGA_SRC_ONE_MINUS_DST_ALPHA (0x00000007)
+# define MGA_SRC_SRC_ALPHA_SATURATE (0x00000008)
+# define MGA_SRC_BLEND_MASK (0x0000000f)
+# define MGA_DST_ZERO (0x00000000)
+# define MGA_DST_ONE (0x00000010)
+# define MGA_DST_SRC_COLOR (0x00000020)
+# define MGA_DST_ONE_MINUS_SRC_COLOR (0x00000030)
+# define MGA_DST_SRC_ALPHA (0x00000040)
+# define MGA_DST_ONE_MINUS_SRC_ALPHA (0x00000050)
+# define MGA_DST_DST_ALPHA (0x00000060)
+# define MGA_DST_ONE_MINUS_DST_ALPHA (0x00000070)
+# define MGA_DST_BLEND_MASK (0x00000070)
+# define MGA_ALPHACHANNEL (0x00000100)
+# define MGA_VIDEOALPHA (0x00000200)
+# define MGA_DIFFUSEDALPHA (0x01000000)
+# define MGA_MODULATEDALPHA (0x02000000)
+#define MGAREG_TDUALSTAGE0 (0x2CF8)
+#define MGAREG_TDUALSTAGE1 (0x2CFC)
+# define MGA_TDS_COLOR_ARG2_DIFFUSE (0x00000000)
+# define MGA_TDS_COLOR_ARG2_SPECULAR (0x00000001)
+# define MGA_TDS_COLOR_ARG2_FCOL (0x00000002)
+# define MGA_TDS_COLOR_ARG2_PREVSTAGE (0x00000003)
+# define MGA_TDS_COLOR_ALPHA_DIFFUSE (0x00000000)
+# define MGA_TDS_COLOR_ALPHA_FCOL (0x00000004)
+# define MGA_TDS_COLOR_ALPHA_CURRTEX (0x00000008)
+# define MGA_TDS_COLOR_ALPHA_PREVTEX (0x0000000c)
+# define MGA_TDS_COLOR_ALPHA_PREVSTAGE (0x00000010)
+# define MGA_TDS_COLOR_ARG1_REPLICATEALPHA (0x00000020)
+# define MGA_TDS_COLOR_ARG1_INV (0x00000040)
+# define MGA_TDS_COLOR_ARG2_REPLICATEALPHA (0x00000080)
+# define MGA_TDS_COLOR_ARG2_INV (0x00000100)
+# define MGA_TDS_COLOR_ALPHA1INV (0x00000200)
+# define MGA_TDS_COLOR_ALPHA2INV (0x00000400)
+# define MGA_TDS_COLOR_ARG1MUL_ALPHA1 (0x00000800)
+# define MGA_TDS_COLOR_ARG2MUL_ALPHA2 (0x00001000)
+# define MGA_TDS_COLOR_ARG1ADD_MULOUT (0x00002000)
+# define MGA_TDS_COLOR_ARG2ADD_MULOUT (0x00004000)
+# define MGA_TDS_COLOR_MODBRIGHT_2X (0x00008000)
+# define MGA_TDS_COLOR_MODBRIGHT_4X (0x00010000)
+# define MGA_TDS_COLOR_ADD_SUB (0x00000000)
+# define MGA_TDS_COLOR_ADD_ADD (0x00020000)
+# define MGA_TDS_COLOR_ADD2X (0x00040000)
+# define MGA_TDS_COLOR_ADDBIAS (0x00080000)
+# define MGA_TDS_COLOR_BLEND (0x00100000)
+# define MGA_TDS_COLOR_SEL_ARG1 (0x00000000)
+# define MGA_TDS_COLOR_SEL_ARG2 (0x00200000)
+# define MGA_TDS_COLOR_SEL_ADD (0x00400000)
+# define MGA_TDS_COLOR_SEL_MUL (0x00600000)
+# define MGA_TDS_ALPHA_ARG1_INV (0x00800000)
+# define MGA_TDS_ALPHA_ARG2_DIFFUSE (0x00000000)
+# define MGA_TDS_ALPHA_ARG2_FCOL (0x01000000)
+# define MGA_TDS_ALPHA_ARG2_PREVTEX (0x02000000)
+# define MGA_TDS_ALPHA_ARG2_PREVSTAGE (0x03000000)
+# define MGA_TDS_ALPHA_ARG2_INV (0x04000000)
+# define MGA_TDS_ALPHA_ADD (0x08000000)
+# define MGA_TDS_ALPHA_ADDBIAS (0x10000000)
+# define MGA_TDS_ALPHA_ADD2X (0x20000000)
+# define MGA_TDS_ALPHA_SEL_ARG1 (0x00000000)
+# define MGA_TDS_ALPHA_SEL_ARG2 (0x40000000)
+# define MGA_TDS_ALPHA_SEL_ADD (0x80000000)
+# define MGA_TDS_ALPHA_SEL_MUL (0xc0000000)
+
+#define MGAREG_DWGSYNC 0x2c4c
+
+#define MGAREG_AGP_PLL 0x1e4c
+#define MGA_AGP2XPLL_ENABLE 0x1
+#define MGA_AGP2XPLL_DISABLE 0x0
+
+#endif
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
new file mode 100644
index 000000000000..12264793108f
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+#include "drmP.h"
+#include "mgag200_drv.h"
+#include <ttm/ttm_page_alloc.h>
+
+static inline struct mga_device *
+mgag200_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct mga_device, ttm.bdev);
+}
+
+static int
+mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
+{
+ return ttm_mem_global_init(ref->object);
+}
+
+static void
+mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
+{
+ ttm_mem_global_release(ref->object);
+}
+
+static int mgag200_ttm_global_init(struct mga_device *ast)
+{
+ struct drm_global_reference *global_ref;
+ int r;
+
+ global_ref = &ast->ttm.mem_global_ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
+ global_ref->size = sizeof(struct ttm_mem_global);
+ global_ref->init = &mgag200_ttm_mem_global_init;
+ global_ref->release = &mgag200_ttm_mem_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM memory accounting "
+ "subsystem.\n");
+ return r;
+ }
+
+ ast->ttm.bo_global_ref.mem_glob =
+ ast->ttm.mem_global_ref.object;
+ global_ref = &ast->ttm.bo_global_ref.ref;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
+ global_ref->size = sizeof(struct ttm_bo_global);
+ global_ref->init = &ttm_bo_global_init;
+ global_ref->release = &ttm_bo_global_release;
+ r = drm_global_item_ref(global_ref);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up TTM BO subsystem.\n");
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ return r;
+ }
+ return 0;
+}
+
+void
+mgag200_ttm_global_release(struct mga_device *ast)
+{
+ if (ast->ttm.mem_global_ref.release == NULL)
+ return;
+
+ drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&ast->ttm.mem_global_ref);
+ ast->ttm.mem_global_ref.release = NULL;
+}
+
+
+static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct mgag200_bo *bo;
+
+ bo = container_of(tbo, struct mgag200_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &mgag200_bo_ttm_destroy)
+ return true;
+ return false;
+}
+
+static int
+mgag200_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void
+mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct mgag200_bo *mgabo = mgag200_bo(bo);
+
+ if (!mgag200_ttm_bo_is_mgag200_bo(bo))
+ return;
+
+ mgag200_ttm_placement(mgabo, TTM_PL_FLAG_SYSTEM);
+ *pl = mgabo->placement;
+}
+
+static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+ return 0;
+}
+
+static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct mga_device *mdev = mgag200_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(mdev->dev->pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int mgag200_bo_move(struct ttm_buffer_object *bo,
+ bool evict, bool interruptible,
+ bool no_wait_reserve, bool no_wait_gpu,
+ struct ttm_mem_reg *new_mem)
+{
+ int r;
+ r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ return r;
+}
+
+
+static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func mgag200_tt_backend_func = {
+ .destroy = &mgag200_ttm_backend_destroy,
+};
+
+
+struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (tt == NULL)
+ return NULL;
+ tt->func = &mgag200_tt_backend_func;
+ if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
+ kfree(tt);
+ return NULL;
+ }
+ return tt;
+}
+
+static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ return ttm_pool_populate(ttm);
+}
+
+static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
+}
+
+struct ttm_bo_driver mgag200_bo_driver = {
+ .ttm_tt_create = mgag200_ttm_tt_create,
+ .ttm_tt_populate = mgag200_ttm_tt_populate,
+ .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
+ .init_mem_type = mgag200_bo_init_mem_type,
+ .evict_flags = mgag200_bo_evict_flags,
+ .move = mgag200_bo_move,
+ .verify_access = mgag200_bo_verify_access,
+ .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
+ .io_mem_free = &mgag200_ttm_io_mem_free,
+};
+
+int mgag200_mm_init(struct mga_device *mdev)
+{
+ int ret;
+ struct drm_device *dev = mdev->dev;
+ struct ttm_bo_device *bdev = &mdev->ttm.bdev;
+
+ ret = mgag200_ttm_global_init(mdev);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&mdev->ttm.bdev,
+ mdev->ttm.bo_global_ref.ref.object,
+ &mgag200_bo_driver, DRM_FILE_PAGE_OFFSET,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, mdev->mc.vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ return ret;
+ }
+
+ mdev->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+
+ return 0;
+}
+
+void mgag200_mm_fini(struct mga_device *mdev)
+{
+ struct drm_device *dev = mdev->dev;
+ ttm_bo_device_release(&mdev->ttm.bdev);
+
+ mgag200_ttm_global_release(mdev);
+
+ if (mdev->fb_mtrr >= 0) {
+ drm_mtrr_del(mdev->fb_mtrr,
+ pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
+ mdev->fb_mtrr = -1;
+ }
+}
+
+void mgag200_ttm_placement(struct mgag200_bo *bo, int domain)
+{
+ u32 c = 0;
+ bo->placement.fpfn = 0;
+ bo->placement.lpfn = 0;
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+}
+
+int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+void mgag200_bo_unreserve(struct mgag200_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+int mgag200_bo_create(struct drm_device *dev, int size, int align,
+ uint32_t flags, struct mgag200_bo **pmgabo)
+{
+ struct mga_device *mdev = dev->dev_private;
+ struct mgag200_bo *mgabo;
+ size_t acc_size;
+ int ret;
+
+ mgabo = kzalloc(sizeof(struct mgag200_bo), GFP_KERNEL);
+ if (!mgabo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(dev, &mgabo->gem, size);
+ if (ret) {
+ kfree(mgabo);
+ return ret;
+ }
+
+ mgabo->gem.driver_private = NULL;
+ mgabo->bo.bdev = &mdev->ttm.bdev;
+
+ mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size,
+ sizeof(struct mgag200_bo));
+
+ ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
+ ttm_bo_type_device, &mgabo->placement,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ mgag200_bo_ttm_destroy);
+ if (ret)
+ return ret;
+
+ *pmgabo = mgabo;
+ return 0;
+}
+
+static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
+{
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ if (gpu_addr)
+ *gpu_addr = mgag200_bo_gpu_offset(bo);
+ }
+
+ mgag200_ttm_placement(bo, pl_flag);
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ bo->pin_count = 1;
+ if (gpu_addr)
+ *gpu_addr = mgag200_bo_gpu_offset(bo);
+ return 0;
+}
+
+int mgag200_bo_unpin(struct mgag200_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int mgag200_bo_push_sysram(struct mgag200_bo *bo)
+{
+ int i, ret;
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual)
+ ttm_bo_kunmap(&bo->kmap);
+
+ mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+ for (i = 0; i < bo->placement.num_placement ; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv;
+ struct mga_device *mdev;
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_mmap(filp, vma);
+
+ file_priv = filp->private_data;
+ mdev = file_priv->minor->dev->dev_private;
+ return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index c2a8511e855a..298c09b75569 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -662,6 +662,12 @@ error:
return ret;
}
+static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
+ .set_gpu_state = nouveau_switcheroo_set_state,
+ .reprobe = nouveau_switcheroo_reprobe,
+ .can_switch = nouveau_switcheroo_can_switch,
+};
+
int
nouveau_card_init(struct drm_device *dev)
{
@@ -670,9 +676,7 @@ nouveau_card_init(struct drm_device *dev)
int ret, e = 0;
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
- vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
- nouveau_switcheroo_reprobe,
- nouveau_switcheroo_can_switch);
+ vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
/* Initialise internal driver API hooks */
ret = nouveau_init_engine_ptrs(dev);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 9d83729956ff..1efb6eba3c25 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,8 +70,9 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
- radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o si_blit_shaders.o
+ evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
+ atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
+ si_blit_shaders.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index b92a694caa0d..e7b1ec5ae8c6 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1926,7 +1926,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
r600_hdmi_enable(encoder);
- r600_hdmi_setmode(encoder, adjusted_mode);
+ if (ASIC_IS_DCE4(rdev))
+ evergreen_hdmi_setmode(encoder, adjusted_mode);
+ else
+ r600_hdmi_setmode(encoder, adjusted_mode);
}
}
@@ -2081,6 +2084,7 @@ radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
+ struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@@ -2089,8 +2093,16 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
(radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (dig)
+ if (dig) {
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
+ if (rdev->family >= CHIP_R600)
+ dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
+ else
+ /* RS600/690/740 have only 1 afmt block */
+ dig->afmt = rdev->mode_info.afmt[0];
+ }
+ }
}
radeon_atom_output_lock(encoder, true);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index ecc29bc1cbe3..58991af90502 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3435,10 +3435,6 @@ int evergreen_init(struct radeon_device *rdev)
{
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -3550,7 +3546,6 @@ void evergreen_fini(struct radeon_device *rdev)
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 222acd2d33df..30f04800b38b 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -637,7 +637,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
if (rdev->r600_blit.shader_obj)
goto done;
- mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family < CHIP_CAYMAN)
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 70089d32b80f..4e7dd2b4843d 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1057,7 +1057,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg, wait_reg_mem_info;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1215,7 +1215,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(evergreen_reg_safe_bm[i] & m))
return 0;
}
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
switch (reg) {
/* force following reg to 0 in an attempt to disable out buffer
* which will need us to better understand how it works to perform
@@ -1896,7 +1896,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u32 idx_value;
track = (struct evergreen_cs_track *)p->track;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -2610,8 +2610,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
- for (r = 0; r < p->ib->length_dw; r++) {
- printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ for (r = 0; r < p->ib.length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
mdelay(1);
}
#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
new file mode 100644
index 000000000000..a51f880985f8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Christian König.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ * Rafał Miłecki
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+#include "atom.h"
+
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
+
+ WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
+}
+
+/*
+ * calculate the crc for a given info frame
+ */
+static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
+ uint8_t versionNumber,
+ uint8_t length,
+ uint8_t *frame)
+{
+ int i;
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
+}
+
+/*
+ * build a HDMI Video Info Frame
+ */
+static void evergreen_hdmi_videoinfoframe(
+ struct drm_encoder *encoder,
+ uint8_t color_format,
+ int active_information_present,
+ uint8_t active_format_aspect_ratio,
+ uint8_t scan_information,
+ uint8_t colorimetry,
+ uint8_t ex_colorimetry,
+ uint8_t quantization,
+ int ITC,
+ uint8_t picture_aspect_ratio,
+ uint8_t video_format_identification,
+ uint8_t pixel_repetition,
+ uint8_t non_uniform_picture_scaling,
+ uint8_t bar_info_data_valid,
+ uint16_t top_bar,
+ uint16_t bottom_bar,
+ uint16_t left_bar,
+ uint16_t right_bar
+)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ uint8_t frame[14];
+
+ frame[0x0] = 0;
+ frame[0x1] =
+ (scan_information & 0x3) |
+ ((bar_info_data_valid & 0x3) << 2) |
+ ((active_information_present & 0x1) << 4) |
+ ((color_format & 0x3) << 5);
+ frame[0x2] =
+ (active_format_aspect_ratio & 0xF) |
+ ((picture_aspect_ratio & 0x3) << 4) |
+ ((colorimetry & 0x3) << 6);
+ frame[0x3] =
+ (non_uniform_picture_scaling & 0x3) |
+ ((quantization & 0x3) << 2) |
+ ((ex_colorimetry & 0x7) << 4) |
+ ((ITC & 0x1) << 7);
+ frame[0x4] = (video_format_identification & 0x7F);
+ frame[0x5] = (pixel_repetition & 0xF);
+ frame[0x6] = (top_bar & 0xFF);
+ frame[0x7] = (top_bar >> 8);
+ frame[0x8] = (bottom_bar & 0xFF);
+ frame[0x9] = (bottom_bar >> 8);
+ frame[0xA] = (left_bar & 0xFF);
+ frame[0xB] = (left_bar >> 8);
+ frame[0xC] = (right_bar & 0xFF);
+ frame[0xD] = (right_bar >> 8);
+
+ evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+ /* Our header values (type, version, length) should be alright, Intel
+ * is using the same. Checksum function also seems to be OK, it works
+ * fine for audio infoframe. However calculated value is always lower
+ * by 2 in comparison to fglrx. It breaks displaying anything in case
+ * of TVs that strictly check the checksum. Hack it manually here to
+ * workaround this issue. */
+ frame[0x0] += 2;
+
+ WREG32(AFMT_AVI_INFO0 + offset,
+ frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
+ WREG32(AFMT_AVI_INFO1 + offset,
+ frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
+ WREG32(AFMT_AVI_INFO2 + offset,
+ frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
+ WREG32(AFMT_AVI_INFO3 + offset,
+ frame[0xC] | (frame[0xD] << 8));
+}
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
+
+ if (ASIC_IS_DCE5(rdev))
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ r600_audio_set_clock(encoder, mode->clock);
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND); /* send null packets when required */
+
+ WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI_ACR_SOURCE); /* select SW CTS value */
+
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND | /* send null packets when required */
+ HDMI_GC_SEND | /* send general control packets */
+ HDMI_GC_CONT); /* send general control packets every frame */
+
+ WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+
+ WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
+ AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
+
+ WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
+ HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
+
+ evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0);
+
+ evergreen_hdmi_update_ACR(encoder, mode->clock);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 9cd2657eb2ca..b01c2dd627b0 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1629,10 +1629,6 @@ int cayman_init(struct radeon_device *rdev)
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -1744,7 +1740,6 @@ void cayman_fini(struct radeon_device *rdev)
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad6ceb731713..fb44e7e49083 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -139,9 +139,9 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
}
tmp |= tile_flags;
- p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
+ p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
} else
- p->ib->ptr[idx] = (value & 0xffc00000) | tmp;
+ p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
return 0;
}
@@ -156,7 +156,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
volatile uint32_t *ib;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
if (c > 16) {
@@ -1275,7 +1275,7 @@ void r100_cs_dump_packet(struct radeon_cs_parser *p,
unsigned i;
unsigned idx;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++) {
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -1354,7 +1354,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the wait until */
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -1533,7 +1533,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
u32 tile_flags = 0;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
@@ -1889,7 +1889,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
volatile uint32_t *ib;
int r;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch (pkt->opcode) {
@@ -2008,6 +2008,8 @@ int r100_cs_parse(struct radeon_cs_parser *p)
int r;
track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (!track)
+ return -ENOMEM;
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
@@ -3684,7 +3686,7 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ib *ib;
+ struct radeon_ib ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
@@ -3700,22 +3702,22 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
if (r) {
return r;
}
- ib->ptr[0] = PACKET0(scratch, 0);
- ib->ptr[1] = 0xDEADBEEF;
- ib->ptr[2] = PACKET2(0);
- ib->ptr[3] = PACKET2(0);
- ib->ptr[4] = PACKET2(0);
- ib->ptr[5] = PACKET2(0);
- ib->ptr[6] = PACKET2(0);
- ib->ptr[7] = PACKET2(0);
- ib->length_dw = 8;
- r = radeon_ib_schedule(rdev, ib);
+ ib.ptr[0] = PACKET0(scratch, 0);
+ ib.ptr[1] = 0xDEADBEEF;
+ ib.ptr[2] = PACKET2(0);
+ ib.ptr[3] = PACKET2(0);
+ ib.ptr[4] = PACKET2(0);
+ ib.ptr[5] = PACKET2(0);
+ ib.ptr[6] = PACKET2(0);
+ ib.ptr[7] = PACKET2(0);
+ ib.length_dw = 8;
+ r = radeon_ib_schedule(rdev, &ib);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
}
- r = radeon_fence_wait(ib->fence, false);
+ r = radeon_fence_wait(ib.fence, false);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a59cc474d537..a26144d01207 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -154,7 +154,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
u32 tile_flags = 0;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 6419a5900e67..97722a33e513 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -604,7 +604,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
int r;
u32 idx_value;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
@@ -1146,7 +1146,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
unsigned idx;
int r;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch(pkt->opcode) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index d02f13fdaa66..ab5d6f2a06c2 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -713,6 +713,14 @@ void r600_hpd_init(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
+ * aux dp channel on imac and help (but not completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ */
+ continue;
+ }
if (ASIC_IS_DCE3(rdev)) {
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
if (ASIC_IS_DCE32(rdev))
@@ -2363,20 +2371,15 @@ int r600_copy_blit(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
+ struct radeon_sa_bo *vb = NULL;
int r;
- mutex_lock(&rdev->r600_blit.mutex);
- rdev->r600_blit.vb_ib = NULL;
- r = r600_blit_prepare_copy(rdev, num_gpu_pages);
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb);
if (r) {
- if (rdev->r600_blit.vb_ib)
- radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
- mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages);
- r600_blit_done_copy(rdev, fence);
- mutex_unlock(&rdev->r600_blit.mutex);
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
+ r600_blit_done_copy(rdev, fence, vb);
return 0;
}
@@ -2557,10 +2560,6 @@ int r600_init(struct radeon_device *rdev)
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -2658,7 +2657,6 @@ void r600_fini(struct radeon_device *rdev)
r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -2687,7 +2685,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
- struct radeon_ib *ib;
+ struct radeon_ib ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
@@ -2705,18 +2703,18 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
}
- ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
- ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- ib->ptr[2] = 0xDEADBEEF;
- ib->length_dw = 3;
- r = radeon_ib_schedule(rdev, ib);
+ ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+ ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ ib.ptr[2] = 0xDEADBEEF;
+ ib.length_dw = 3;
+ r = radeon_ib_schedule(rdev, &ib);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
return r;
}
- r = radeon_fence_wait(ib->fence, false);
+ r = radeon_fence_wait(ib.fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
return r;
@@ -2728,7 +2726,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index b922a3cd90db..b5a602e71c12 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -30,6 +30,29 @@
#include "atom.h"
/*
+ * check if enc_priv stores radeon_encoder_atom_dig
+ */
+static bool radeon_dig_encoder(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ }
+ return false;
+}
+
+/*
* check if the chipset is supported
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
@@ -135,6 +158,8 @@ void r600_audio_update_hdmi(struct work_struct *work)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (!radeon_dig_encoder(encoder))
+ continue;
if (changes || r600_hdmi_buffer_status_changed(encoder))
r600_hdmi_update_audio_settings(encoder);
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index db38f587f27a..ef20822a1586 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -513,7 +513,6 @@ int r600_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.set_default_state = set_default_state;
rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
@@ -528,7 +527,6 @@ int r600_blit_init(struct radeon_device *rdev)
if (rdev->r600_blit.shader_obj)
goto done;
- mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.state_offset = 0;
if (rdev->family >= CHIP_RV770)
@@ -621,27 +619,6 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
-static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
-{
- int r;
- r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
- &rdev->r600_blit.vb_ib, size);
- if (r) {
- DRM_ERROR("failed to get IB for vertex buffer\n");
- return r;
- }
-
- rdev->r600_blit.vb_total = size;
- rdev->r600_blit.vb_used = 0;
- return 0;
-}
-
-static void r600_vb_ib_put(struct radeon_device *rdev)
-{
- radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
- radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
-}
-
static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int *width, int *height, int max_dim)
{
@@ -688,7 +665,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
}
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_sa_bo **vb)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
@@ -705,46 +683,54 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
}
/* 48 bytes for vertex per loop */
- r = r600_vb_ib_get(rdev, (num_loops*48)+256);
- if (r)
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
+ (num_loops*48)+256, 256, true);
+ if (r) {
return r;
+ }
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring, ring_size);
- if (r)
+ if (r) {
+ radeon_sa_bo_free(rdev, vb, NULL);
return r;
+ }
rdev->r600_blit.primitives.set_default_state(rdev);
rdev->r600_blit.primitives.set_shaders(rdev);
return 0;
}
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
+ struct radeon_sa_bo *vb)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- if (rdev->r600_blit.vb_ib)
- r600_vb_ib_put(rdev);
-
- if (fence)
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return;
+ }
- radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_sa_bo_free(rdev, &vb, fence);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages)
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb)
{
u64 vb_gpu_addr;
- u32 *vb;
+ u32 *vb_cpu_addr;
- DRM_DEBUG("emitting copy %16llx %16llx %d %d\n",
- src_gpu_addr, dst_gpu_addr,
- num_gpu_pages, rdev->r600_blit.vb_used);
- vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
+ DRM_DEBUG("emitting copy %16llx %16llx %d\n",
+ src_gpu_addr, dst_gpu_addr, num_gpu_pages);
+ vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
+ vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
while (num_gpu_pages) {
int w, h;
@@ -756,39 +742,34 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
- if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
- WARN_ON(1);
- }
-
- vb[0] = 0;
- vb[1] = 0;
- vb[2] = 0;
- vb[3] = 0;
+ vb_cpu_addr[0] = 0;
+ vb_cpu_addr[1] = 0;
+ vb_cpu_addr[2] = 0;
+ vb_cpu_addr[3] = 0;
- vb[4] = 0;
- vb[5] = i2f(h);
- vb[6] = 0;
- vb[7] = i2f(h);
+ vb_cpu_addr[4] = 0;
+ vb_cpu_addr[5] = i2f(h);
+ vb_cpu_addr[6] = 0;
+ vb_cpu_addr[7] = i2f(h);
- vb[8] = i2f(w);
- vb[9] = i2f(h);
- vb[10] = i2f(w);
- vb[11] = i2f(h);
+ vb_cpu_addr[8] = i2f(w);
+ vb_cpu_addr[9] = i2f(h);
+ vb_cpu_addr[10] = i2f(w);
+ vb_cpu_addr[11] = i2f(h);
rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
w, h, w, src_gpu_addr, size_in_bytes);
rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
w, h, dst_gpu_addr);
rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
- vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
rdev->r600_blit.primitives.draw_auto(rdev);
rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
size_in_bytes, dst_gpu_addr);
- vb += 12;
- rdev->r600_blit.vb_used += 4*12;
+ vb_cpu_addr += 12;
+ vb_gpu_addr += 4*12;
src_gpu_addr += size_in_bytes;
dst_gpu_addr += size_in_bytes;
num_gpu_pages -= pages_per_loop;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index b8e12af304a9..0133f5f09bd6 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -345,7 +345,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
- volatile u32 *ib = p->ib->ptr;
+ volatile u32 *ib = p->ib.ptr;
unsigned array_mode;
u32 format;
@@ -471,7 +471,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
u64 base_offset, base_align;
struct array_mode_checker array_check;
int array_mode;
- volatile u32 *ib = p->ib->ptr;
+ volatile u32 *ib = p->ib.ptr;
if (track->db_bo == NULL) {
@@ -961,7 +961,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
uint32_t header, h_idx, reg, wait_reg_mem_info;
volatile uint32_t *ib;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
@@ -1110,7 +1110,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
m = 1 << ((reg >> 2) & 31);
if (!(r600_reg_safe_bm[i] & m))
return 0;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
switch (reg) {
/* force following reg to 0 in an attempt to disable out buffer
* which will need us to better understand how it works to perform
@@ -1714,7 +1714,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u32 idx_value;
track = (struct r600_cs_track *)p->track;
- ib = p->ib->ptr;
+ ib = p->ib.ptr;
idx = pkt->idx + 1;
idx_value = radeon_get_ib_value(p, idx);
@@ -2249,8 +2249,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
- for (r = 0; r < p->ib->length_dw; r++) {
- printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
+ for (r = 0; r < p->ib.length_dw; r++) {
+ printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
mdelay(1);
}
#endif
@@ -2298,7 +2298,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
{
struct radeon_cs_parser parser;
struct radeon_cs_chunk *ib_chunk;
- struct radeon_ib fake_ib;
struct r600_cs_track *track;
int r;
@@ -2314,9 +2313,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
parser.dev = &dev->pdev->dev;
parser.rdev = NULL;
parser.family = family;
- parser.ib = &fake_ib;
parser.track = track;
- fake_ib.ptr = ib;
+ parser.ib.ptr = ib;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
@@ -2333,8 +2331,8 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
* input memory (cached) and write to the IB (which can be
* uncached). */
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
- parser.ib->length_dw = ib_chunk->length_dw;
- *l = parser.ib->length_dw;
+ parser.ib.length_dw = ib_chunk->length_dw;
+ *l = parser.ib.length_dw;
r = r600_cs_parse(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index c6de0022c070..31d19509e1d5 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -53,19 +53,7 @@ enum r600_hdmi_iec_status_bits {
AUDIO_STATUS_LEVEL = 0x80
};
-struct {
- uint32_t Clock;
-
- int N_32kHz;
- int CTS_32kHz;
-
- int N_44_1kHz;
- int CTS_44_1kHz;
-
- int N_48kHz;
- int CTS_48kHz;
-
-} r600_hdmi_ACR[] = {
+struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
{ 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
@@ -84,7 +72,7 @@ struct {
/*
* calculate CTS value if it's not found in the table
*/
-static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
{
if (*CTS == 0)
*CTS = clock * N / (128 * freq) * 1000;
@@ -92,6 +80,24 @@ static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
N, *CTS, freq);
}
+struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
+{
+ struct radeon_hdmi_acr res;
+ u8 i;
+
+ for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
+ r600_hdmi_predefined_acr[i].clock != 0; i++)
+ ;
+ res = r600_hdmi_predefined_acr[i];
+
+ /* In case some CTS are missing */
+ r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
+ r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
+ r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
+
+ return res;
+}
+
/*
* update the N and CTS parameters for a given pixel clock rate
*/
@@ -99,30 +105,19 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
- int CTS;
- int N;
- int i;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
- for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
-
- CTS = r600_hdmi_ACR[i].CTS_32kHz;
- N = r600_hdmi_ACR[i].N_32kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
- WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(CTS));
- WREG32(HDMI0_ACR_32_1 + offset, N);
-
- CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
- N = r600_hdmi_ACR[i].N_44_1kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
- WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(CTS));
- WREG32(HDMI0_ACR_44_1 + offset, N);
-
- CTS = r600_hdmi_ACR[i].CTS_48kHz;
- N = r600_hdmi_ACR[i].N_48kHz;
- r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
- WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(CTS));
- WREG32(HDMI0_ACR_48_1 + offset, N);
+ WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
}
/*
@@ -166,7 +161,9 @@ static void r600_hdmi_videoinfoframe(
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
uint8_t frame[14];
@@ -232,7 +229,9 @@ static void r600_hdmi_audioinfoframe(
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
uint8_t frame[11];
@@ -259,11 +258,13 @@ static void r600_hdmi_audioinfoframe(
/*
* test if audio buffer is filled enough to start playing
*/
-static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
+static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
}
@@ -274,14 +275,15 @@ static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
int status, result;
- if (!radeon_encoder->hdmi_enabled)
+ if (!dig->afmt || !dig->afmt->enabled)
return 0;
status = r600_hdmi_is_audio_buffer_filled(encoder);
- result = radeon_encoder->hdmi_buffer_status != status;
- radeon_encoder->hdmi_buffer_status = status;
+ result = dig->afmt->last_buffer_filled_status != status;
+ dig->afmt->last_buffer_filled_status = status;
return result;
}
@@ -289,26 +291,23 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
/*
* write the audio workaround status to the hardware
*/
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- uint32_t offset = radeon_encoder->hdmi_offset;
-
- if (!radeon_encoder->hdmi_enabled)
- return;
-
- if (!radeon_encoder->hdmi_audio_workaround ||
- r600_hdmi_is_audio_buffer_filled(encoder)) {
-
- /* disable audio workaround */
- WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset, 0x0001, ~0x1001);
-
- } else {
- /* enable audio workaround */
- WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset, 0x1001, ~0x1001);
- }
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+ bool hdmi_audio_workaround = false; /* FIXME */
+ u32 value;
+
+ if (!hdmi_audio_workaround ||
+ r600_hdmi_is_audio_buffer_filled(encoder))
+ value = 0; /* disable workaround */
+ else
+ value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
+ WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ value, ~HDMI0_AUDIO_TEST_EN);
}
@@ -319,29 +318,68 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
if (ASIC_IS_DCE5(rdev))
return;
- if (!to_radeon_encoder(encoder)->hdmi_enabled)
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
return;
+ offset = dig->afmt->offset;
r600_audio_set_clock(encoder, mode->clock);
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND); /* send null packets when required */
+
WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
- WREG32(HDMI0_GC + offset, 0x0);
- WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 0x1000);
- r600_hdmi_update_ACR(encoder, mode->clock);
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ } else {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
+ }
+
+ WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+ HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI0_ACR_SOURCE); /* select SW CTS value */
+
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
- WREG32(HDMI0_INFOFRAME_CONTROL0 + offset, 0x13);
+ /* TODO: HDMI0_AUDIO_INFO_UPDATE */
+ WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
- WREG32(HDMI0_INFOFRAME_CONTROL1 + offset, 0x202);
+ WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
+
+ WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
@@ -349,9 +387,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
r600_hdmi_audio_workaround(encoder);
-
- /* audio packets per line, does anyone know how to calc this ? */
- WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset, 0x00040000, ~0x001F0000);
}
/*
@@ -361,7 +396,9 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
int channels = r600_audio_channels(rdev);
int rate = r600_audio_rate(rdev);
@@ -371,8 +408,9 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
uint32_t iec;
- if (!to_radeon_encoder(encoder)->hdmi_enabled)
+ if (!dig->afmt || !dig->afmt->enabled)
return;
+ offset = dig->afmt->offset;
DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
@@ -390,86 +428,55 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
if (status_bits & AUDIO_STATUS_EMPHASIS)
iec |= 1 << 3;
- iec |= category_code << 8;
+ iec |= HDMI0_60958_CS_CATEGORY_CODE(category_code);
switch (rate) {
- case 32000: iec |= 0x3 << 24; break;
- case 44100: iec |= 0x0 << 24; break;
- case 88200: iec |= 0x8 << 24; break;
- case 176400: iec |= 0xc << 24; break;
- case 48000: iec |= 0x2 << 24; break;
- case 96000: iec |= 0xa << 24; break;
- case 192000: iec |= 0xe << 24; break;
+ case 32000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
+ break;
+ case 44100:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
+ break;
+ case 48000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
+ break;
+ case 88200:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
+ break;
+ case 96000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
+ break;
+ case 176400:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
+ break;
+ case 192000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
+ break;
}
WREG32(HDMI0_60958_0 + offset, iec);
iec = 0;
switch (bps) {
- case 16: iec |= 0x2; break;
- case 20: iec |= 0x3; break;
- case 24: iec |= 0xb; break;
+ case 16:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
+ break;
+ case 20:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
+ break;
+ case 24:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
+ break;
}
if (status_bits & AUDIO_STATUS_V)
iec |= 0x5 << 16;
-
WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
- /* 0x021 or 0x031 sets the audio frame length */
- WREG32(HDMI0_VBI_PACKET_CONTROL + offset, 0x31);
- r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
+ r600_hdmi_audioinfoframe(encoder, channels - 1, 0, 0, 0, 0, 0, 0, 0);
r600_hdmi_audio_workaround(encoder);
}
-static void r600_hdmi_assign_block(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
- u16 eg_offsets[] = {
- EVERGREEN_CRTC0_REGISTER_OFFSET,
- EVERGREEN_CRTC1_REGISTER_OFFSET,
- EVERGREEN_CRTC2_REGISTER_OFFSET,
- EVERGREEN_CRTC3_REGISTER_OFFSET,
- EVERGREEN_CRTC4_REGISTER_OFFSET,
- EVERGREEN_CRTC5_REGISTER_OFFSET,
- };
-
- if (!dig) {
- dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
- return;
- }
-
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
- dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
- return;
- }
- radeon_encoder->hdmi_offset = eg_offsets[dig->dig_encoder];
- /* Temp hack for Evergreen until we split r600_hdmi.c
- * Evergreen first block is 0x7030 instead of 0x7400.
- */
- radeon_encoder->hdmi_offset -= 0x3d0;
- } else if (ASIC_IS_DCE3(rdev)) {
- radeon_encoder->hdmi_offset = dig->dig_encoder ?
- DCE3_HDMI_OFFSET1 : DCE3_HDMI_OFFSET0;
- } else if (rdev->family >= CHIP_R600) {
- /* 2 routable blocks, but using dig_encoder should be fine */
- radeon_encoder->hdmi_offset = dig->dig_encoder ?
- DCE2_HDMI_OFFSET1 : DCE2_HDMI_OFFSET0;
- } else if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
- rdev->family == CHIP_RS740) {
- /* Only 1 routable block */
- radeon_encoder->hdmi_offset = DCE2_HDMI_OFFSET0;
- }
- radeon_encoder->hdmi_enabled = true;
-}
-
/*
* enable the HDMI engine
*/
@@ -478,55 +485,57 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
+ u32 hdmi;
if (ASIC_IS_DCE5(rdev))
return;
- if (!radeon_encoder->hdmi_enabled) {
- r600_hdmi_assign_block(encoder);
- if (!radeon_encoder->hdmi_enabled) {
- dev_warn(rdev->dev, "Could not find HDMI block for "
- "0x%x encoder\n", radeon_encoder->encoder_id);
- return;
- }
- }
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
- offset = radeon_encoder->hdmi_offset;
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- WREG32_P(0x74fc + radeon_encoder->hdmi_offset, 0x1, ~0x1);
- } else if (ASIC_IS_DCE32(rdev)) {
- WREG32_P(AFMT_AUDIO_PACKET_CONTROL + radeon_encoder->hdmi_offset, 0x1, ~0x1);
- } else if (ASIC_IS_DCE3(rdev)) {
- /* TODO */
- } else if (rdev->family >= CHIP_R600) {
+ /* Older chipsets require setting HDMI and routing manually */
+ if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
~AVIVO_TMDSA_CNTL_HDMI_EN);
- WREG32(HDMI0_CONTROL + offset, 0x101);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
~AVIVO_LVTMA_CNTL_HDMI_EN);
- WREG32(HDMI0_CONTROL + offset, 0x105);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
break;
default:
- dev_err(rdev->dev, "Unknown HDMI output type\n");
+ dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+ radeon_encoder->encoder_id);
break;
}
+ WREG32(HDMI0_CONTROL + offset, hdmi);
}
if (rdev->irq.installed) {
/* if irq is available use it */
- rdev->irq.afmt[offset == 0 ? 0 : 1] = true;
+ rdev->irq.afmt[dig->afmt->id] = true;
radeon_irq_set(rdev);
}
+ dig->afmt->enabled = true;
+
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
}
/*
@@ -537,49 +546,51 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
if (ASIC_IS_DCE5(rdev))
return;
- offset = radeon_encoder->hdmi_offset;
- if (!radeon_encoder->hdmi_enabled) {
- dev_err(rdev->dev, "Disabling not enabled HDMI\n");
+ /* Called for ATOM_ENCODER_MODE_HDMI only */
+ if (!dig || !dig->afmt) {
+ WARN_ON(1);
return;
}
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- offset, radeon_encoder->encoder_id);
+ offset, radeon_encoder->encoder_id);
/* disable irq */
- rdev->irq.afmt[offset == 0 ? 0 : 1] = false;
+ rdev->irq.afmt[dig->afmt->id] = false;
radeon_irq_set(rdev);
-
- if (ASIC_IS_DCE5(rdev)) {
- /* TODO */
- } else if (ASIC_IS_DCE4(rdev)) {
- WREG32_P(0x74fc + radeon_encoder->hdmi_offset, 0, ~0x1);
- } else if (ASIC_IS_DCE32(rdev)) {
- WREG32_P(AFMT_AUDIO_PACKET_CONTROL + radeon_encoder->hdmi_offset, 0, ~0x1);
- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ /* Older chipsets not handled by AtomBIOS */
+ if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0,
~AVIVO_TMDSA_CNTL_HDMI_EN);
- WREG32(HDMI0_CONTROL + offset, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, 0,
~AVIVO_LVTMA_CNTL_HDMI_EN);
- WREG32(HDMI0_CONTROL + offset, 0);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
break;
default:
- dev_err(rdev->dev, "Unknown HDMI output type\n");
+ dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+ radeon_encoder->encoder_id);
break;
}
+ WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
}
- radeon_encoder->hdmi_enabled = false;
- radeon_encoder->hdmi_offset = 0;
+ dig->afmt->enabled = false;
}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index c44304ad8bda..2b960cb5c18a 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -156,4 +156,10 @@
#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
#define R600_AUDIO_STATUS_BITS 0x73d8
+#define DCE2_HDMI_OFFSET0 (0x7400 - 0x7400)
+#define DCE2_HDMI_OFFSET1 (0x7700 - 0x7400)
+/* DCE3.2 second instance starts at 0x7800 */
+#define DCE3_HDMI_OFFSET0 (0x7400 - 0x7400)
+#define DCE3_HDMI_OFFSET1 (0x7800 - 0x7400)
+
#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a9652be93b66..15bd3b216243 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1057,12 +1057,6 @@
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
-#define DCE2_HDMI_OFFSET0 (0x7400 - 0x7400)
-#define DCE2_HDMI_OFFSET1 (0x7700 - 0x7400)
-/* DCE3.2 second instance starts at 0x7800 */
-#define DCE3_HDMI_OFFSET0 (0x7400 - 0x7400)
-#define DCE3_HDMI_OFFSET1 (0x7800 - 0x7400)
-
/*
* PM4
*/
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 82ffa6a05cc6..9783178a8ff0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -100,28 +100,32 @@ extern int radeon_lockup_timeout;
* Copy from radeon_drv.h so we don't have to include both and have conflicting
* symbol;
*/
-#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
-#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
+#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
-#define RADEON_IB_POOL_SIZE 16
-#define RADEON_DEBUGFS_MAX_COMPONENTS 32
-#define RADEONFB_CONN_LIMIT 4
-#define RADEON_BIOS_NUM_SCRATCH 8
+#define RADEON_IB_POOL_SIZE 16
+#define RADEON_DEBUGFS_MAX_COMPONENTS 32
+#define RADEONFB_CONN_LIMIT 4
+#define RADEON_BIOS_NUM_SCRATCH 8
/* max number of rings */
-#define RADEON_NUM_RINGS 3
+#define RADEON_NUM_RINGS 3
+
+/* fence seq are set to this number when signaled */
+#define RADEON_FENCE_SIGNALED_SEQ 0LL
+#define RADEON_FENCE_NOTEMITED_SEQ (~0LL)
/* internal ring indices */
/* r1xx+ has gfx CP ring */
-#define RADEON_RING_TYPE_GFX_INDEX 0
+#define RADEON_RING_TYPE_GFX_INDEX 0
/* cayman has 2 compute CP rings */
-#define CAYMAN_RING_TYPE_CP1_INDEX 1
-#define CAYMAN_RING_TYPE_CP2_INDEX 2
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
/* hardcode those limit for now */
-#define RADEON_VA_RESERVED_SIZE (8 << 20)
-#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+#define RADEON_VA_RESERVED_SIZE (8 << 20)
+#define RADEON_IB_VM_MAX_SIZE (64 << 10)
/*
* Errata workarounds.
@@ -254,26 +258,20 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
- atomic_t seq;
- uint32_t last_seq;
+ /* seq is protected by ring emission lock */
+ uint64_t seq;
+ atomic64_t last_seq;
unsigned long last_activity;
- wait_queue_head_t queue;
- struct list_head emitted;
- struct list_head signaled;
bool initialized;
};
struct radeon_fence {
struct radeon_device *rdev;
struct kref kref;
- struct list_head list;
/* protected by radeon_fence.lock */
- uint32_t seq;
- bool emitted;
- bool signaled;
+ uint64_t seq;
/* RB, DMA, etc. */
- int ring;
- struct radeon_semaphore *semaphore;
+ unsigned ring;
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -284,11 +282,14 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_any(struct radeon_device *rdev,
+ struct radeon_fence **fences,
+ bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
-int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
/*
* Tiling registers
@@ -381,8 +382,11 @@ struct radeon_bo_list {
* alignment).
*/
struct radeon_sa_manager {
+ spinlock_t lock;
struct radeon_bo *bo;
- struct list_head sa_bo;
+ struct list_head *hole;
+ struct list_head flist[RADEON_NUM_RINGS];
+ struct list_head olist;
unsigned size;
uint64_t gpu_addr;
void *cpu_ptr;
@@ -393,10 +397,12 @@ struct radeon_sa_bo;
/* sub-allocation buffer */
struct radeon_sa_bo {
- struct list_head list;
+ struct list_head olist;
+ struct list_head flist;
struct radeon_sa_manager *manager;
- unsigned offset;
- unsigned size;
+ unsigned soffset;
+ unsigned eoffset;
+ struct radeon_fence *fence;
};
/*
@@ -427,34 +433,13 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
/*
* Semaphores.
*/
-struct radeon_ring;
-
-#define RADEON_SEMAPHORE_BO_SIZE 256
-
-struct radeon_semaphore_driver {
- rwlock_t lock;
- struct list_head bo;
-};
-
-struct radeon_semaphore_bo;
-
/* everything here is constant */
struct radeon_semaphore {
- struct list_head list;
+ struct radeon_sa_bo *sa_bo;
+ signed waiters;
uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- struct radeon_semaphore_bo *bo;
};
-struct radeon_semaphore_bo {
- struct list_head list;
- struct radeon_ib *ib;
- struct list_head free;
- struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
- unsigned nused;
-};
-
-void radeon_semaphore_driver_fini(struct radeon_device *rdev);
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
@@ -466,7 +451,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
bool sync_to[RADEON_NUM_RINGS],
int dst_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore);
+ struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence);
/*
* GART structures, functions & helpers
@@ -637,26 +623,14 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
*/
struct radeon_ib {
- struct radeon_sa_bo sa_bo;
- unsigned idx;
- uint32_t length_dw;
- uint64_t gpu_addr;
- uint32_t *ptr;
- struct radeon_fence *fence;
- unsigned vm_id;
- bool is_const_ib;
-};
-
-/*
- * locking -
- * mutex protects scheduled_ibs, ready, alloc_bm
- */
-struct radeon_ib_pool {
- struct radeon_mutex mutex;
- struct radeon_sa_manager sa_manager;
- struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
- bool ready;
- unsigned head_id;
+ struct radeon_sa_bo *sa_bo;
+ uint32_t length_dw;
+ uint64_t gpu_addr;
+ uint32_t *ptr;
+ struct radeon_fence *fence;
+ unsigned vm_id;
+ bool is_const_ib;
+ struct radeon_semaphore *semaphore;
};
struct radeon_ring {
@@ -676,7 +650,6 @@ struct radeon_ring {
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
- struct mutex mutex;
bool ready;
u32 ptr_reg_shift;
u32 ptr_reg_mask;
@@ -693,7 +666,7 @@ struct radeon_vm {
unsigned last_pfn;
u64 pt_gpu_addr;
u64 *pt;
- struct radeon_sa_bo sa_bo;
+ struct radeon_sa_bo *sa_bo;
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@@ -770,7 +743,6 @@ struct r600_blit_cp_primitives {
};
struct r600_blit {
- struct mutex mutex;
struct radeon_bo *shader_obj;
struct r600_blit_cp_primitives primitives;
int max_dim;
@@ -780,8 +752,6 @@ struct r600_blit {
u32 vs_offset, ps_offset;
u32 state_offset;
u32 state_len;
- u32 vb_used, vb_total;
- struct radeon_ib *vb_ib;
};
void r600_blit_suspend(struct radeon_device *rdev);
@@ -799,9 +769,8 @@ struct si_rlc {
};
int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib **ib, unsigned size);
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
+ struct radeon_ib *ib, unsigned size);
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
@@ -815,6 +784,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsign
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
@@ -868,8 +838,8 @@ struct radeon_cs_parser {
int chunk_relocs_idx;
int chunk_flags_idx;
int chunk_const_ib_idx;
- struct radeon_ib *ib;
- struct radeon_ib *const_ib;
+ struct radeon_ib ib;
+ struct radeon_ib const_ib;
void *track;
unsigned family;
int parser_error;
@@ -1531,11 +1501,12 @@ struct radeon_device {
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
- rwlock_t fence_lock;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
- struct radeon_semaphore_driver semaphore_drv;
+ wait_queue_head_t fence_queue;
+ struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
- struct radeon_ib_pool ib_pool;
+ bool ib_pool_ready;
+ struct radeon_sa_manager ring_tmp_bo;
struct radeon_irq irq;
struct radeon_asic *asic;
struct radeon_gem gem;
@@ -1856,10 +1827,32 @@ int r600_fmt_get_nblocksy(u32 format, u32 h);
/*
* r600 functions used by radeon_encoder.c
*/
+struct radeon_hdmi_acr {
+ u32 clock;
+
+ int n_32khz;
+ int cts_32khz;
+
+ int n_44_1khz;
+ int cts_44_1khz;
+
+ int n_48khz;
+ int cts_48khz;
+
+};
+
+extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
+
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+/*
+ * evergreen functions used by radeon_encoder.c
+ */
+
+extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 78309318bd3f..ef9ccb4def4d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -364,15 +364,17 @@ int r600_audio_rate(struct radeon_device *rdev);
uint8_t r600_audio_status_bits(struct radeon_device *rdev);
uint8_t r600_audio_category_code(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
-void r600_hdmi_init(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_sa_bo **vb);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
+ struct radeon_sa_bo *vb);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages);
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
/*
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c66beb1662b5..c7d64a739033 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -122,15 +122,15 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
int i, r;
for (i = 0; i < p->nrelocs; i++) {
+ struct radeon_fence *fence;
+
if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
continue;
- if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) {
- struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
- if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
- sync_to_ring[fence->ring] = true;
- need_sync = true;
- }
+ fence = p->relocs[i].robj->tbo.sync_obj;
+ if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
+ sync_to_ring[fence->ring] = true;
+ need_sync = true;
}
}
@@ -138,12 +138,12 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
return 0;
}
- r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
+ r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
if (r) {
return r;
}
- return radeon_semaphore_sync_rings(p->rdev, p->ib->fence->semaphore,
+ return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
sync_to_ring, p->ring);
}
@@ -161,8 +161,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
/* get chunks */
INIT_LIST_HEAD(&p->validated);
p->idx = 0;
- p->ib = NULL;
- p->const_ib = NULL;
+ p->ib.sa_bo = NULL;
+ p->ib.semaphore = NULL;
+ p->const_ib.sa_bo = NULL;
+ p->const_ib.semaphore = NULL;
p->chunk_ib_idx = -1;
p->chunk_relocs_idx = -1;
p->chunk_flags_idx = -1;
@@ -301,10 +303,9 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
-
- if (!error && parser->ib)
+ if (!error)
ttm_eu_fence_buffer_objects(&parser->validated,
- parser->ib->fence);
+ parser->ib.fence);
else
ttm_eu_backoff_reservation(&parser->validated);
@@ -327,9 +328,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
kfree(parser->chunks);
kfree(parser->chunks_array);
radeon_ib_free(parser->rdev, &parser->ib);
- if (parser->const_ib) {
- radeon_ib_free(parser->rdev, &parser->const_ib);
- }
+ radeon_ib_free(parser->rdev, &parser->const_ib);
}
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
@@ -355,7 +354,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get ib !\n");
return r;
}
- parser->ib->length_dw = ib_chunk->length_dw;
+ parser->ib.length_dw = ib_chunk->length_dw;
r = radeon_cs_parse(rdev, parser->ring, parser);
if (r || parser->parser_error) {
DRM_ERROR("Invalid command stream !\n");
@@ -370,8 +369,8 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
if (r) {
DRM_ERROR("Failed to synchronize rings !\n");
}
- parser->ib->vm_id = 0;
- r = radeon_ib_schedule(rdev, parser->ib);
+ parser->ib.vm_id = 0;
+ r = radeon_ib_schedule(rdev, &parser->ib);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
@@ -422,14 +421,14 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get const ib !\n");
return r;
}
- parser->const_ib->is_const_ib = true;
- parser->const_ib->length_dw = ib_chunk->length_dw;
+ parser->const_ib.is_const_ib = true;
+ parser->const_ib.length_dw = ib_chunk->length_dw;
/* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->const_ib->ptr, ib_chunk->user_ptr,
+ if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
ib_chunk->length_dw * 4)) {
return -EFAULT;
}
- r = radeon_ring_ib_parse(rdev, parser->ring, parser->const_ib);
+ r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
if (r) {
return r;
}
@@ -446,13 +445,13 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
DRM_ERROR("Failed to get ib !\n");
return r;
}
- parser->ib->length_dw = ib_chunk->length_dw;
+ parser->ib.length_dw = ib_chunk->length_dw;
/* Copy the packet into the IB */
- if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
+ if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
ib_chunk->length_dw * 4)) {
return -EFAULT;
}
- r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
+ r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
if (r) {
return r;
}
@@ -473,29 +472,29 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
- parser->const_ib->vm_id = vm->id;
+ parser->const_ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
* offset inside the pool bo
*/
- parser->const_ib->gpu_addr = parser->const_ib->sa_bo.offset;
- r = radeon_ib_schedule(rdev, parser->const_ib);
+ parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
+ r = radeon_ib_schedule(rdev, &parser->const_ib);
if (r)
goto out;
}
- parser->ib->vm_id = vm->id;
+ parser->ib.vm_id = vm->id;
/* ib pool is bind at 0 in virtual address space to gpu_addr is the
* offset inside the pool bo
*/
- parser->ib->gpu_addr = parser->ib->sa_bo.offset;
- parser->ib->is_const_ib = false;
- r = radeon_ib_schedule(rdev, parser->ib);
+ parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
+ parser->ib.is_const_ib = false;
+ r = radeon_ib_schedule(rdev, &parser->ib);
out:
if (!r) {
if (vm->fence) {
radeon_fence_unref(&vm->fence);
}
- vm->fence = radeon_fence_ref(parser->ib->fence);
+ vm->fence = radeon_fence_ref(parser->ib.fence);
}
mutex_unlock(&fpriv->vm.mutex);
return r;
@@ -573,7 +572,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
size = PAGE_SIZE;
}
- if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
ibc->user_ptr + (i * PAGE_SIZE),
size))
return -EFAULT;
@@ -590,7 +589,7 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true;
for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
- if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
ibc->user_ptr + (i * PAGE_SIZE),
PAGE_SIZE)) {
p->parser_error = -EFAULT;
@@ -606,7 +605,7 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
if (copy1)
- ibc->kpage[new_page] = p->ib->ptr + (pg_idx * (PAGE_SIZE / 4));
+ ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
ibc->user_ptr + (pg_idx * PAGE_SIZE),
@@ -617,7 +616,7 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
/* copy to IB for non single case */
if (!copy1)
- memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
+ memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
ibc->last_copied_page = pg_idx;
ibc->kpage_idx[new_page] = pg_idx;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ff28210dedec..57637017eed6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -225,9 +225,9 @@ int radeon_wb_init(struct radeon_device *rdev)
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
- if (radeon_no_wb == 1)
+ if (radeon_no_wb == 1) {
rdev->wb.enabled = false;
- else {
+ } else {
if (rdev->flags & RADEON_IS_AGP) {
/* often unreliable on AGP */
rdev->wb.enabled = false;
@@ -237,8 +237,9 @@ int radeon_wb_init(struct radeon_device *rdev)
} else {
rdev->wb.enabled = true;
/* event_write fences are only available on r600+ */
- if (rdev->family >= CHIP_R600)
+ if (rdev->family >= CHIP_R600) {
rdev->wb.use_event = true;
+ }
}
}
/* always use writeback/events on NI, APUs */
@@ -696,6 +697,11 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
+static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
+ .set_gpu_state = radeon_switcheroo_set_state,
+ .reprobe = NULL,
+ .can_switch = radeon_switcheroo_can_switch,
+};
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
@@ -723,21 +729,18 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
- radeon_mutex_init(&rdev->ib_pool.mutex);
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- mutex_init(&rdev->ring[i].mutex);
+ mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
- rwlock_init(&rdev->fence_lock);
- rwlock_init(&rdev->semaphore_drv.lock);
- INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
- INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
/* initialize vm here */
rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20;
@@ -813,10 +816,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- vga_switcheroo_register_client(rdev->pdev,
- radeon_switcheroo_set_state,
- NULL,
- radeon_switcheroo_can_switch);
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
r = radeon_init(rdev);
if (r)
@@ -913,9 +913,12 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
}
/* evict vram memory */
radeon_bo_evict_vram(rdev);
+
+ mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++)
- radeon_fence_wait_empty(rdev, i);
+ radeon_fence_wait_empty_locked(rdev, i);
+ mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0a1d4bd65edc..da3fe8a68f8d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -573,24 +573,6 @@ static const char *encoder_names[37] = {
"INTERNAL_VCE"
};
-static const char *connector_names[15] = {
- "Unknown",
- "VGA",
- "DVI-I",
- "DVI-D",
- "DVI-A",
- "Composite",
- "S-video",
- "LVDS",
- "Component",
- "DIN",
- "DisplayPort",
- "HDMI-A",
- "HDMI-B",
- "TV",
- "eDP",
-};
-
static const char *hpd_names[6] = {
"HPD1",
"HPD2",
@@ -613,7 +595,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
- DRM_INFO(" %s\n", connector_names[connector->connector_type]);
+ DRM_INFO(" %s\n", drm_get_connector_name(connector));
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus) {
@@ -1243,6 +1225,93 @@ void radeon_update_display_priority(struct radeon_device *rdev)
}
+/*
+ * Allocate hdmi structs and determine register offsets
+ */
+static void radeon_afmt_init(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
+ rdev->mode_info.afmt[i] = NULL;
+
+ if (ASIC_IS_DCE6(rdev)) {
+ /* todo */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* DCE4/5 has 6 audio blocks tied to DIG encoders */
+ /* DCE4.1 has 2 audio blocks tied to DIG encoders */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ if (!ASIC_IS_DCE41(rdev)) {
+ rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[2]) {
+ rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ rdev->mode_info.afmt[2]->id = 2;
+ }
+ rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[3]) {
+ rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ rdev->mode_info.afmt[3]->id = 3;
+ }
+ rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[4]) {
+ rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ rdev->mode_info.afmt[4]->id = 4;
+ }
+ rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[5]) {
+ rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ rdev->mode_info.afmt[5]->id = 5;
+ }
+ }
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* DCE3.x has 2 audio blocks tied to DIG encoders */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ } else if (ASIC_IS_DCE2(rdev)) {
+ /* DCE2 has at least 1 routable audio block */
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ /* r6xx has 2 routable audio blocks */
+ if (rdev->family >= CHIP_R600) {
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ }
+ }
+}
+
+static void radeon_afmt_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
+ kfree(rdev->mode_info.afmt[i]);
+ rdev->mode_info.afmt[i] = NULL;
+ }
+}
+
int radeon_modeset_init(struct radeon_device *rdev)
{
int i;
@@ -1303,6 +1372,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* initialize hpd */
radeon_hpd_init(rdev);
+ /* setup afmt */
+ radeon_afmt_init(rdev);
+
/* Initialize power management */
radeon_pm_init(rdev);
@@ -1319,6 +1391,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_pm_fini(rdev);
if (rdev->mode_info.mode_config_initialized) {
+ radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 5bb78bf547ea..11f5f402d22c 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -63,75 +63,82 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (fence->emitted) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ /* we are protected by the ring emission mutex */
+ if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
return 0;
}
- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
+ fence->seq = ++rdev->fence_drv[fence->ring].seq;
radeon_fence_ring_emit(rdev, fence->ring, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
- fence->emitted = true;
- /* are we the first fence on a previusly idle ring? */
- if (list_empty(&rdev->fence_drv[fence->ring].emitted)) {
- rdev->fence_drv[fence->ring].last_activity = jiffies;
- }
- list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
+void radeon_fence_process(struct radeon_device *rdev, int ring)
{
- struct radeon_fence *fence;
- struct list_head *i, *n;
- uint32_t seq;
+ uint64_t seq, last_seq;
+ unsigned count_loop = 0;
bool wake = false;
- seq = radeon_fence_read(rdev, ring);
- if (seq == rdev->fence_drv[ring].last_seq)
- return false;
-
- rdev->fence_drv[ring].last_seq = seq;
- rdev->fence_drv[ring].last_activity = jiffies;
+ /* Note there is a scenario here for an infinite loop but it's
+ * very unlikely to happen. For it to happen, the current polling
+ * process need to be interrupted by another process and another
+ * process needs to update the last_seq btw the atomic read and
+ * xchg of the current process.
+ *
+ * More over for this to go in infinite loop there need to be
+ * continuously new fence signaled ie radeon_fence_read needs
+ * to return a different value each time for both the currently
+ * polling process and the other process that xchg the last_seq
+ * btw atomic read and xchg of the current process. And the
+ * value the other process set as last seq must be higher than
+ * the seq value we just read. Which means that current process
+ * need to be interrupted after radeon_fence_read and before
+ * atomic xchg.
+ *
+ * To be even more safe we count the number of time we loop and
+ * we bail after 10 loop just accepting the fact that we might
+ * have temporarly set the last_seq not to the true real last
+ * seq but to an older one.
+ */
+ last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ do {
+ seq = radeon_fence_read(rdev, ring);
+ seq |= last_seq & 0xffffffff00000000LL;
+ if (seq < last_seq) {
+ seq += 0x100000000LL;
+ }
- n = NULL;
- list_for_each(i, &rdev->fence_drv[ring].emitted) {
- fence = list_entry(i, struct radeon_fence, list);
- if (fence->seq == seq) {
- n = i;
+ if (seq == last_seq) {
break;
}
- }
- /* all fence previous to this one are considered as signaled */
- if (n) {
- i = n;
- do {
- n = i->prev;
- list_move_tail(i, &rdev->fence_drv[ring].signaled);
- fence = list_entry(i, struct radeon_fence, list);
- fence->signaled = true;
- i = n;
- } while (i != &rdev->fence_drv[ring].emitted);
+ /* If we loop over we don't want to return without
+ * checking if a fence is signaled as it means that the
+ * seq we just read is different from the previous on.
+ */
wake = true;
+ last_seq = seq;
+ if ((count_loop++) > 10) {
+ /* We looped over too many time leave with the
+ * fact that we might have set an older fence
+ * seq then the current real last seq as signaled
+ * by the hw.
+ */
+ break;
+ }
+ } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
+
+ if (wake) {
+ rdev->fence_drv[ring].last_activity = jiffies;
+ wake_up_all(&rdev->fence_queue);
}
- return wake;
}
static void radeon_fence_destroy(struct kref *kref)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
+ struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
- write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
- list_del(&fence->list);
- fence->emitted = false;
- write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
- if (fence->semaphore)
- radeon_semaphore_free(fence->rdev, fence->semaphore);
+ fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
kfree(fence);
}
@@ -145,83 +152,84 @@ int radeon_fence_create(struct radeon_device *rdev,
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
- (*fence)->emitted = false;
- (*fence)->signaled = false;
- (*fence)->seq = 0;
+ (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
(*fence)->ring = ring;
- (*fence)->semaphore = NULL;
- INIT_LIST_HEAD(&(*fence)->list);
return 0;
}
-bool radeon_fence_signaled(struct radeon_fence *fence)
+static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
+ u64 seq, unsigned ring)
{
- unsigned long irq_flags;
- bool signaled = false;
-
- if (!fence)
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+ return true;
+ }
+ /* poll new last sequence at least once */
+ radeon_fence_process(rdev, ring);
+ if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
+ }
+ return false;
+}
- write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
- signaled = fence->signaled;
- /* if we are shuting down report all fence as signaled */
- if (fence->rdev->shutdown) {
- signaled = true;
+bool radeon_fence_signaled(struct radeon_fence *fence)
+{
+ if (!fence) {
+ return true;
}
- if (!fence->emitted) {
+ if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
WARN(1, "Querying an unemitted fence : %p !\n", fence);
- signaled = true;
+ return true;
}
- if (!signaled) {
- radeon_fence_poll_locked(fence->rdev, fence->ring);
- signaled = fence->signaled;
+ if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ return true;
+ }
+ if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return true;
}
- write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
- return signaled;
+ return false;
}
-int radeon_fence_wait(struct radeon_fence *fence, bool intr)
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
+ unsigned ring, bool intr, bool lock_ring)
{
- struct radeon_device *rdev;
- unsigned long irq_flags, timeout;
- u32 seq;
- int i, r;
+ unsigned long timeout, last_activity;
+ uint64_t seq;
+ unsigned i;
bool signaled;
+ int r;
- if (fence == NULL) {
- WARN(1, "Querying an invalid fence : %p !\n", fence);
- return -EINVAL;
- }
+ while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ if (!rdev->ring[ring].ready) {
+ return -EBUSY;
+ }
- rdev = fence->rdev;
- signaled = radeon_fence_signaled(fence);
- while (!signaled) {
- read_lock_irqsave(&rdev->fence_lock, irq_flags);
timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
- if (time_after(rdev->fence_drv[fence->ring].last_activity, timeout)) {
+ if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
/* the normal case, timeout is somewhere before last_activity */
- timeout = rdev->fence_drv[fence->ring].last_activity - timeout;
+ timeout = rdev->fence_drv[ring].last_activity - timeout;
} else {
/* either jiffies wrapped around, or no fence was signaled in the last 500ms
- * anyway we will just wait for the minimum amount and then check for a lockup */
+ * anyway we will just wait for the minimum amount and then check for a lockup
+ */
timeout = 1;
}
- /* save current sequence value used to check for GPU lockups */
- seq = rdev->fence_drv[fence->ring].last_seq;
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* Save current last activity valuee, used to check for GPU lockups */
+ last_activity = rdev->fence_drv[ring].last_activity;
trace_radeon_fence_wait_begin(rdev->ddev, seq);
- radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+ radeon_irq_kms_sw_irq_get(rdev, ring);
if (intr) {
- r = wait_event_interruptible_timeout(
- rdev->fence_drv[fence->ring].queue,
- (signaled = radeon_fence_signaled(fence)), timeout);
- } else {
- r = wait_event_timeout(
- rdev->fence_drv[fence->ring].queue,
- (signaled = radeon_fence_signaled(fence)), timeout);
+ r = wait_event_interruptible_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+ timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
+ timeout);
}
- radeon_irq_kms_sw_irq_put(rdev, fence->ring);
+ radeon_irq_kms_sw_irq_put(rdev, ring);
if (unlikely(r < 0)) {
return r;
}
@@ -234,81 +242,246 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
continue;
}
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
/* check if sequence value has changed since last_activity */
- if (seq != rdev->fence_drv[fence->ring].last_seq) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
continue;
}
- /* change sequence value on all rings, so nobody else things there is a lockup */
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- rdev->fence_drv[i].last_seq -= 0x10000;
-
- rdev->fence_drv[fence->ring].last_activity = jiffies;
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ if (lock_ring) {
+ mutex_lock(&rdev->ring_lock);
+ }
- if (radeon_ring_is_lockup(rdev, fence->ring, &rdev->ring[fence->ring])) {
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != rdev->fence_drv[ring].last_activity) {
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ continue;
+ }
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
/* good news we believe it's a lockup */
- printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
- fence->seq, seq);
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
+ target_seq, seq);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
/* mark the ring as not ready any more */
- rdev->ring[fence->ring].ready = false;
+ rdev->ring[ring].ready = false;
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
return -EDEADLK;
}
+
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
}
}
return 0;
}
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
+int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
int r;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (!rdev->ring[ring].ready) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return -EBUSY;
+ if (fence == NULL) {
+ WARN(1, "Querying an invalid fence : %p !\n", fence);
+ return -EINVAL;
}
- if (list_empty(&rdev->fence_drv[ring].emitted)) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return -ENOENT;
+
+ r = radeon_fence_wait_seq(fence->rdev, fence->seq,
+ fence->ring, intr, true);
+ if (r) {
+ return r;
}
- fence = list_entry(rdev->fence_drv[ring].emitted.next,
- struct radeon_fence, list);
- radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- r = radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
- return r;
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return 0;
}
-int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
+bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
{
- unsigned long irq_flags;
- struct radeon_fence *fence;
+ unsigned i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
+ u64 *target_seq, bool intr)
+{
+ unsigned long timeout, last_activity, tmp;
+ unsigned i, ring = RADEON_NUM_RINGS;
+ bool signaled;
int r;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (!rdev->ring[ring].ready) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return -EBUSY;
+ for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i]) {
+ continue;
+ }
+
+ /* use the most recent one as indicator */
+ if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
+ last_activity = rdev->fence_drv[i].last_activity;
+ }
+
+ /* For lockup detection just pick the lowest ring we are
+ * actively waiting for
+ */
+ if (i < ring) {
+ ring = i;
+ }
}
- if (list_empty(&rdev->fence_drv[ring].emitted)) {
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+
+ /* nothing to wait for ? */
+ if (ring == RADEON_NUM_RINGS) {
return 0;
}
- fence = list_entry(rdev->fence_drv[ring].emitted.prev,
- struct radeon_fence, list);
- radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- r = radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
- return r;
+
+ while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
+ */
+ timeout = 1;
+ }
+
+ trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_get(rdev, i);
+ }
+ }
+ if (intr) {
+ r = wait_event_interruptible_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+ timeout);
+ } else {
+ r = wait_event_timeout(rdev->fence_queue,
+ (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
+ timeout);
+ }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_put(rdev, i);
+ }
+ }
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ mutex_lock(&rdev->ring_lock);
+ for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
+ tmp = rdev->fence_drv[i].last_activity;
+ }
+ }
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != tmp) {
+ last_activity = tmp;
+ mutex_unlock(&rdev->ring_lock);
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
+ target_seq[ring]);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ mutex_unlock(&rdev->ring_lock);
+ return -EDEADLK;
+ }
+ mutex_unlock(&rdev->ring_lock);
+ }
+ }
+ return 0;
+}
+
+int radeon_fence_wait_any(struct radeon_device *rdev,
+ struct radeon_fence **fences,
+ bool intr)
+{
+ uint64_t seq[RADEON_NUM_RINGS];
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ seq[i] = 0;
+
+ if (!fences[i]) {
+ continue;
+ }
+
+ if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ /* something was allready signaled */
+ return 0;
+ }
+
+ if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ seq[i] = fences[i]->seq;
+ }
+ }
+
+ r = radeon_fence_wait_any_seq(rdev, seq, intr);
+ if (r) {
+ return r;
+ }
+ return 0;
+}
+
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
+{
+ uint64_t seq;
+
+ /* We are not protected by ring lock when reading current seq but
+ * it's ok as worst case is we return to early while we could have
+ * wait.
+ */
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+ if (seq >= rdev->fence_drv[ring].seq) {
+ /* nothing to wait for, last_seq is
+ already the last emited fence */
+ return -ENOENT;
+ }
+ return radeon_fence_wait_seq(rdev, seq, ring, false, false);
+}
+
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+{
+ /* We are not protected by ring lock when reading current seq
+ * but it's ok as wait empty is call from place where no more
+ * activity can be scheduled so there won't be concurrent access
+ * to seq value.
+ */
+ return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq,
+ ring, false, false);
}
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
@@ -327,49 +500,27 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
-void radeon_fence_process(struct radeon_device *rdev, int ring)
-{
- unsigned long irq_flags;
- bool wake;
-
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
- wake = radeon_fence_poll_locked(rdev, ring);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- if (wake) {
- wake_up_all(&rdev->fence_drv[ring].queue);
- }
-}
-
-int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
- int not_processed = 0;
-
- read_lock_irqsave(&rdev->fence_lock, irq_flags);
- if (!rdev->fence_drv[ring].initialized) {
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return 0;
+ uint64_t emitted;
+
+ /* We are not protected by ring lock when reading the last sequence
+ * but it's ok to report slightly wrong fence count here.
+ */
+ radeon_fence_process(rdev, ring);
+ emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* to avoid 32bits warp around */
+ if (emitted > 0x10000000) {
+ emitted = 0x10000000;
}
-
- if (!list_empty(&rdev->fence_drv[ring].emitted)) {
- struct list_head *ptr;
- list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
- /* count up to 3, that's enought info */
- if (++not_processed >= 3)
- break;
- }
- }
- read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
- return not_processed;
+ return (unsigned)emitted;
}
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
- unsigned long irq_flags;
uint64_t index;
int r;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
if (rdev->wb.use_event) {
rdev->fence_drv[ring].scratch_reg = 0;
@@ -378,7 +529,6 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
if (r) {
dev_err(rdev->dev, "fence failed to get scratch register\n");
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return r;
}
index = RADEON_WB_SCRATCH_OFFSET +
@@ -387,11 +537,10 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
}
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
+ radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
rdev->fence_drv[ring].initialized = true;
- DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
+ dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
@@ -400,23 +549,20 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
rdev->fence_drv[ring].scratch_reg = -1;
rdev->fence_drv[ring].cpu_addr = NULL;
rdev->fence_drv[ring].gpu_addr = 0;
- atomic_set(&rdev->fence_drv[ring].seq, 0);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
- INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
- init_waitqueue_head(&rdev->fence_drv[ring].queue);
+ rdev->fence_drv[ring].seq = 0;
+ atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
+ rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
}
int radeon_fence_driver_init(struct radeon_device *rdev)
{
- unsigned long irq_flags;
int ring;
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ init_waitqueue_head(&rdev->fence_queue);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
radeon_fence_driver_init_ring(rdev, ring);
}
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
@@ -425,19 +571,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
- unsigned long irq_flags;
int ring;
+ mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
- radeon_fence_wait_empty(rdev, ring);
- wake_up_all(&rdev->fence_drv[ring].queue);
- write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ radeon_fence_wait_empty_locked(rdev, ring);
+ wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
- write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
rdev->fence_drv[ring].initialized = false;
}
+ mutex_unlock(&rdev->ring_lock);
}
@@ -450,7 +595,6 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_fence *fence;
int i;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -458,14 +602,10 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
continue;
seq_printf(m, "--- ring %d ---\n", i);
- seq_printf(m, "Last signaled fence 0x%08X\n",
- radeon_fence_read(rdev, i));
- if (!list_empty(&rdev->fence_drv[i].emitted)) {
- fence = list_entry(rdev->fence_drv[i].emitted.prev,
- struct radeon_fence, list);
- seq_printf(m, "Last emitted fence %p with 0x%08X\n",
- fence, fence->seq);
- }
+ seq_printf(m, "Last signaled fence 0x%016llx\n",
+ (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
+ seq_printf(m, "Last emitted 0x%016llx\n",
+ rdev->fence_drv[i].seq);
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index c58a036233fb..8e9ef3403acd 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -326,7 +326,7 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
list_del_init(&vm->list);
vm->id = -1;
- radeon_sa_bo_free(rdev, &vm->sa_bo);
+ radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
vm->pt = NULL;
list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -395,7 +395,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
- RADEON_GPU_PAGE_SIZE);
+ RADEON_GPU_PAGE_SIZE, false);
if (r) {
if (list_empty(&rdev->vm_manager.lru_vm)) {
return r;
@@ -404,10 +404,8 @@ retry:
radeon_vm_unbind(rdev, vm_evict);
goto retry;
}
- vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
- vm->pt += (vm->sa_bo.offset >> 3);
- vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
- vm->pt_gpu_addr += vm->sa_bo.offset;
+ vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
+ vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
retry_id:
@@ -428,14 +426,14 @@ retry_id:
/* do hw bind */
r = rdev->vm_manager.funcs->bind(rdev, vm, id);
if (r) {
- radeon_sa_bo_free(rdev, &vm->sa_bo);
+ radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
return r;
}
rdev->vm_manager.use_bitmap |= 1 << id;
vm->id = id;
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
- return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
- &rdev->ib_pool.sa_manager.bo->tbo.mem);
+ return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
+ &rdev->ring_tmp_bo.bo->tbo.mem);
}
/* object have to be reserved */
@@ -633,7 +631,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
- r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
+ r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
return r;
}
@@ -650,12 +648,12 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_mutex_unlock(&rdev->cs_mutex);
/* remove all bo */
- r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
+ bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list);
- radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va);
}
if (!list_empty(&vm->va)) {
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 499a5fed8b26..5b10ffd7bb2f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -220,12 +220,20 @@ enum radeon_dvo_chip {
struct radeon_fbdev;
+struct radeon_afmt {
+ bool enabled;
+ int offset;
+ bool last_buffer_filled_status;
+ int id;
+};
+
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[6];
+ struct radeon_afmt *afmt[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -364,6 +372,7 @@ struct radeon_encoder_atom_dig {
int dpms_mode;
uint8_t backlight_level;
int panel_mode;
+ struct radeon_afmt *afmt;
};
struct radeon_encoder_atom_dac {
@@ -385,10 +394,6 @@ struct radeon_encoder {
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
- bool hdmi_enabled;
- int hdmi_offset;
- int hdmi_audio_workaround;
- int hdmi_buffer_status;
bool is_ext_encoder;
u16 caps;
};
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index f9104be88d7c..befec7d12132 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -146,6 +146,17 @@ extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
/*
* sub allocation
*/
+
+static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+{
+ return sa_bo->manager->gpu_addr + sa_bo->soffset;
+}
+
+static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+{
+ return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+}
+
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain);
@@ -157,9 +168,15 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align);
+ struct radeon_sa_bo **sa_bo,
+ unsigned size, unsigned align, bool block);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
- struct radeon_sa_bo *sa_bo);
+ struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence);
+#if defined(CONFIG_DEBUG_FS)
+extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+ struct seq_file *m);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index caa55d68f319..08825548ee69 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -252,10 +252,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (rdev->ring[i].ring_obj)
- mutex_lock(&rdev->ring[i].mutex);
- }
+ mutex_lock(&rdev->ring_lock);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
@@ -273,13 +270,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
} else {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
if (ring->ready) {
- struct radeon_fence *fence;
- radeon_ring_alloc(rdev, ring, 64);
- radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
- radeon_fence_emit(rdev, fence);
- radeon_ring_commit(rdev, ring);
- radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
+ radeon_fence_wait_empty_locked(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
}
radeon_unmap_vram_bos(rdev);
@@ -311,10 +302,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (rdev->ring[i].ring_obj)
- mutex_unlock(&rdev->ring[i].mutex);
- }
+ mutex_unlock(&rdev->ring_lock);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 2eb4c6ed198a..a5dee76f4ebb 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -24,6 +24,7 @@
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
+ * Christian König
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -33,8 +34,10 @@
#include "radeon.h"
#include "atom.h"
-int radeon_debugfs_ib_init(struct radeon_device *rdev);
-int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+/*
+ * IB.
+ */
+int radeon_debugfs_sa_init(struct radeon_device *rdev);
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
@@ -61,123 +64,37 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
return idx_value;
}
-void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
-{
-#if DRM_DEBUG_CODE
- if (ring->count_dw <= 0) {
- DRM_ERROR("radeon: writting more dword to ring than expected !\n");
- }
-#endif
- ring->ring[ring->wptr++] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
- ring->ring_free_dw--;
-}
-
-/*
- * IB.
- */
-bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- bool done = false;
-
- /* only free ib which have been emited */
- if (ib->fence && ib->fence->emitted) {
- if (radeon_fence_signaled(ib->fence)) {
- radeon_fence_unref(&ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo);
- done = true;
- }
- }
- return done;
-}
-
int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib **ib, unsigned size)
+ struct radeon_ib *ib, unsigned size)
{
- struct radeon_fence *fence;
- unsigned cretry = 0;
- int r = 0, i, idx;
-
- *ib = NULL;
- /* align size on 256 bytes */
- size = ALIGN(size, 256);
+ int r;
- r = radeon_fence_create(rdev, &fence, ring);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
- dev_err(rdev->dev, "failed to create fence for new IB\n");
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
}
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- idx = rdev->ib_pool.head_id;
-retry:
- if (cretry > 5) {
- dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return -ENOMEM;
- }
- cretry++;
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
- if (rdev->ib_pool.ibs[idx].fence == NULL) {
- r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
- &rdev->ib_pool.ibs[idx].sa_bo,
- size, 256);
- if (!r) {
- *ib = &rdev->ib_pool.ibs[idx];
- (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
- (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- (*ib)->gpu_addr += (*ib)->sa_bo.offset;
- (*ib)->fence = fence;
- (*ib)->vm_id = 0;
- (*ib)->is_const_ib = false;
- /* ib are most likely to be allocated in a ring fashion
- * thus rdev->ib_pool.head_id should be the id of the
- * oldest ib
- */
- rdev->ib_pool.head_id = (1 + idx);
- rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- return 0;
- }
- }
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
- }
- /* this should be rare event, ie all ib scheduled none signaled yet.
- */
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
- r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
- if (!r) {
- goto retry;
- }
- /* an error happened */
- break;
- }
- idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ r = radeon_fence_create(rdev, &ib->fence, ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
+ return r;
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return r;
+
+ ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ ib->vm_id = 0;
+ ib->is_const_ib = false;
+ ib->semaphore = NULL;
+
+ return 0;
}
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ib *tmp = *ib;
-
- *ib = NULL;
- if (tmp == NULL) {
- return;
- }
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (tmp->fence && !tmp->fence->emitted) {
- radeon_sa_bo_free(rdev, &tmp->sa_bo);
- radeon_fence_unref(&tmp->fence);
- }
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_semaphore_free(rdev, ib->semaphore, ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_fence_unref(&ib->fence);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
@@ -187,14 +104,14 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
- DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
+ dev_err(rdev->dev, "couldn't schedule ib\n");
return -EINVAL;
}
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, ring, 64);
if (r) {
- DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
+ dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
@@ -205,66 +122,40 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
int radeon_ib_pool_init(struct radeon_device *rdev)
{
- struct radeon_sa_manager tmp;
- int i, r;
+ int r;
- r = radeon_sa_bo_manager_init(rdev, &tmp,
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
RADEON_GEM_DOMAIN_GTT);
if (r) {
return r;
}
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
- radeon_sa_bo_manager_fini(rdev, &tmp);
- return 0;
+ rdev->ib_pool_ready = true;
+ if (radeon_debugfs_sa_init(rdev)) {
+ dev_err(rdev->dev, "failed to register debugfs file for SA\n");
}
-
- rdev->ib_pool.sa_manager = tmp;
- INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- rdev->ib_pool.ibs[i].fence = NULL;
- rdev->ib_pool.ibs[i].idx = i;
- rdev->ib_pool.ibs[i].length_dw = 0;
- INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
- }
- rdev->ib_pool.head_id = 0;
- rdev->ib_pool.ready = true;
- DRM_INFO("radeon: ib pool ready.\n");
-
- if (radeon_debugfs_ib_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for IB !\n");
- }
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
return 0;
}
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
- unsigned i;
-
- radeon_mutex_lock(&rdev->ib_pool.mutex);
- if (rdev->ib_pool.ready) {
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
- radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
- }
- radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
- rdev->ib_pool.ready = false;
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
}
- radeon_mutex_unlock(&rdev->ib_pool.mutex);
}
int radeon_ib_pool_start(struct radeon_device *rdev)
{
- return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+ return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
}
int radeon_ib_pool_suspend(struct radeon_device *rdev)
{
- return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
}
int radeon_ib_ring_tests(struct radeon_device *rdev)
@@ -300,6 +191,21 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
/*
* Ring.
*/
+int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+ if (ring->count_dw <= 0) {
+ DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ }
+#endif
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
+}
+
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
{
/* r1xx-r5xx only has CP ring */
@@ -346,9 +252,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
if (ndw < ring->ring_free_dw) {
break;
}
- mutex_unlock(&ring->mutex);
- r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
- mutex_lock(&ring->mutex);
+ r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring));
if (r)
return r;
}
@@ -361,10 +265,10 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
{
int r;
- mutex_lock(&ring->mutex);
+ mutex_lock(&rdev->ring_lock);
r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
return r;
}
return 0;
@@ -389,20 +293,24 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_commit(rdev, ring);
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
}
-void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+void radeon_ring_undo(struct radeon_ring *ring)
{
ring->wptr = ring->wptr_old;
- mutex_unlock(&ring->mutex);
+}
+
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_undo(ring);
+ mutex_unlock(&rdev->ring_lock);
}
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
- mutex_lock(&ring->mutex);
radeon_ring_free_size(rdev, ring);
if (ring->rptr == ring->wptr) {
r = radeon_ring_alloc(rdev, ring, 1);
@@ -411,7 +319,6 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *
radeon_ring_commit(rdev, ring);
}
}
- mutex_unlock(&ring->mutex);
}
void radeon_ring_lockup_update(struct radeon_ring *ring)
@@ -520,11 +427,12 @@ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
int r;
struct radeon_bo *ring_obj;
- mutex_lock(&ring->mutex);
+ mutex_lock(&rdev->ring_lock);
ring_obj = ring->ring_obj;
+ ring->ready = false;
ring->ring = NULL;
ring->ring_obj = NULL;
- mutex_unlock(&ring->mutex);
+ mutex_unlock(&rdev->ring_lock);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
@@ -577,29 +485,22 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
};
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
- unsigned i;
- if (ib == NULL) {
- return 0;
- }
- seq_printf(m, "IB %04u\n", ib->idx);
- seq_printf(m, "IB fence %p\n", ib->fence);
- seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
- for (i = 0; i < ib->length_dw; i++) {
- seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
- }
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
return 0;
+
}
-static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
-static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
-static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
#endif
int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
@@ -622,21 +523,10 @@ int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *rin
return 0;
}
-int radeon_debugfs_ib_init(struct radeon_device *rdev)
+int radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
- radeon_debugfs_ib_idx[i] = i;
- radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
- radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
- radeon_debugfs_ib_list[i].driver_features = 0;
- radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
- }
- return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
- RADEON_IB_POOL_SIZE);
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
#else
return 0;
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 8fbfe69b7bcb..c3ac7f4c7b70 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -27,20 +27,42 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
#include "drmP.h"
#include "drm.h"
#include "radeon.h"
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
+
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain)
{
- int r;
+ int i, r;
+ spin_lock_init(&sa_manager->lock);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
- INIT_LIST_HEAD(&sa_manager->sa_bo);
+ sa_manager->hole = &sa_manager->olist;
+ INIT_LIST_HEAD(&sa_manager->olist);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ INIT_LIST_HEAD(&sa_manager->flist[i]);
+ }
r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
@@ -57,11 +79,15 @@ void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
{
struct radeon_sa_bo *sa_bo, *tmp;
- if (!list_empty(&sa_manager->sa_bo)) {
- dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ if (!list_empty(&sa_manager->olist)) {
+ sa_manager->hole = &sa_manager->olist,
+ radeon_sa_bo_try_free(sa_manager);
+ if (!list_empty(&sa_manager->olist)) {
+ dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ }
}
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
- list_del_init(&sa_bo->list);
+ list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
+ radeon_sa_bo_remove_locked(sa_bo);
}
radeon_bo_unref(&sa_manager->bo);
sa_manager->size = 0;
@@ -113,77 +139,248 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
return r;
}
-/*
- * Principe is simple, we keep a list of sub allocation in offset
- * order (first entry has offset == 0, last entry has the highest
- * offset).
- *
- * When allocating new object we first check if there is room at
- * the end total_size - (last_object_offset + last_object_size) >=
- * alloc_size. If so we allocate new object there.
- *
- * When there is not enough room at the end, we start waiting for
- * each sub object until we reach object_offset+object_size >=
- * alloc_size, this object then become the sub object we return.
- *
- * Alignment can't be bigger than page size
- */
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+{
+ struct radeon_sa_manager *sa_manager = sa_bo->manager;
+ if (sa_manager->hole == &sa_bo->olist) {
+ sa_manager->hole = sa_bo->olist.prev;
+ }
+ list_del_init(&sa_bo->olist);
+ list_del_init(&sa_bo->flist);
+ radeon_fence_unref(&sa_bo->fence);
+ kfree(sa_bo);
+}
+
+static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
+{
+ struct radeon_sa_bo *sa_bo, *tmp;
+
+ if (sa_manager->hole->next == &sa_manager->olist)
+ return;
+
+ sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
+ list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
+ if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
+ return;
+ }
+ radeon_sa_bo_remove_locked(sa_bo);
+ }
+}
+
+static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole != &sa_manager->olist) {
+ return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
+ }
+ return 0;
+}
+
+static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole->next != &sa_manager->olist) {
+ return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
+ }
+ return sa_manager->size;
+}
+
+static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align)
+{
+ unsigned soffset, eoffset, wasted;
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+ wasted = (align - (soffset % align)) % align;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ soffset += wasted;
+
+ sa_bo->manager = sa_manager;
+ sa_bo->soffset = soffset;
+ sa_bo->eoffset = soffset + size;
+ list_add(&sa_bo->olist, sa_manager->hole);
+ INIT_LIST_HEAD(&sa_bo->flist);
+ sa_manager->hole = &sa_bo->olist;
+ return true;
+ }
+ return false;
+}
+
+static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
+ struct radeon_fence **fences,
+ unsigned *tries)
+{
+ struct radeon_sa_bo *best_bo = NULL;
+ unsigned i, soffset, best, tmp;
+
+ /* if hole points to the end of the buffer */
+ if (sa_manager->hole->next == &sa_manager->olist) {
+ /* try again with its beginning */
+ sa_manager->hole = &sa_manager->olist;
+ return true;
+ }
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ /* to handle wrap around we add sa_manager->size */
+ best = sa_manager->size * 2;
+ /* go over all fence list and try to find the closest sa_bo
+ * of the current last
+ */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_sa_bo *sa_bo;
+
+ if (list_empty(&sa_manager->flist[i])) {
+ continue;
+ }
+
+ sa_bo = list_first_entry(&sa_manager->flist[i],
+ struct radeon_sa_bo, flist);
+
+ if (!radeon_fence_signaled(sa_bo->fence)) {
+ fences[i] = sa_bo->fence;
+ continue;
+ }
+
+ /* limit the number of tries each ring gets */
+ if (tries[i] > 2) {
+ continue;
+ }
+
+ tmp = sa_bo->soffset;
+ if (tmp < soffset) {
+ /* wrap around, pretend it's after */
+ tmp += sa_manager->size;
+ }
+ tmp -= soffset;
+ if (tmp < best) {
+ /* this sa bo is the closest one */
+ best = tmp;
+ best_bo = sa_bo;
+ }
+ }
+
+ if (best_bo) {
+ ++tries[best_bo->fence->ring];
+ sa_manager->hole = best_bo->olist.prev;
+
+ /* we knew that this one is signaled,
+ so it's save to remote it */
+ radeon_sa_bo_remove_locked(best_bo);
+ return true;
+ }
+ return false;
+}
+
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align)
+ struct radeon_sa_bo **sa_bo,
+ unsigned size, unsigned align, bool block)
{
- struct radeon_sa_bo *tmp;
- struct list_head *head;
- unsigned offset = 0, wasted = 0;
+ struct radeon_fence *fences[RADEON_NUM_RINGS];
+ unsigned tries[RADEON_NUM_RINGS];
+ int i, r = -ENOMEM;
BUG_ON(align > RADEON_GPU_PAGE_SIZE);
BUG_ON(size > sa_manager->size);
- /* no one ? */
- head = sa_manager->sa_bo.prev;
- if (list_empty(&sa_manager->sa_bo)) {
- goto out;
+ *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
+ if ((*sa_bo) == NULL) {
+ return -ENOMEM;
}
+ (*sa_bo)->manager = sa_manager;
+ (*sa_bo)->fence = NULL;
+ INIT_LIST_HEAD(&(*sa_bo)->olist);
+ INIT_LIST_HEAD(&(*sa_bo)->flist);
- /* look for a hole big enough */
- offset = 0;
- list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
- /* room before this object ? */
- if (offset < tmp->offset && (tmp->offset - offset) >= size) {
- head = tmp->list.prev;
- goto out;
+ spin_lock(&sa_manager->lock);
+ do {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ fences[i] = NULL;
+ tries[i] = 0;
}
- offset = tmp->offset + tmp->size;
- wasted = offset % align;
- if (wasted) {
- wasted = align - wasted;
+
+ do {
+ radeon_sa_bo_try_free(sa_manager);
+
+ if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
+ size, align)) {
+ spin_unlock(&sa_manager->lock);
+ return 0;
+ }
+
+ /* see if we can skip over some allocations */
+ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+
+ if (block) {
+ spin_unlock(&sa_manager->lock);
+ r = radeon_fence_wait_any(rdev, fences, false);
+ spin_lock(&sa_manager->lock);
+ if (r) {
+ /* if we have nothing to wait for we
+ are practically out of memory */
+ if (r == -ENOENT) {
+ r = -ENOMEM;
+ }
+ goto out_err;
+ }
}
- offset += wasted;
- }
- /* room at the end ? */
- head = sa_manager->sa_bo.prev;
- tmp = list_entry(head, struct radeon_sa_bo, list);
- offset = tmp->offset + tmp->size;
- wasted = offset % align;
- if (wasted) {
- wasted = align - wasted;
- }
- offset += wasted;
- if ((sa_manager->size - offset) < size) {
- /* failed to find somethings big enough */
- return -ENOMEM;
+ } while (block);
+
+out_err:
+ spin_unlock(&sa_manager->lock);
+ kfree(*sa_bo);
+ *sa_bo = NULL;
+ return r;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+ struct radeon_fence *fence)
+{
+ struct radeon_sa_manager *sa_manager;
+
+ if (sa_bo == NULL || *sa_bo == NULL) {
+ return;
}
-out:
- sa_bo->manager = sa_manager;
- sa_bo->offset = offset;
- sa_bo->size = size;
- list_add(&sa_bo->list, head);
- return 0;
+ sa_manager = (*sa_bo)->manager;
+ spin_lock(&sa_manager->lock);
+ if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ (*sa_bo)->fence = radeon_fence_ref(fence);
+ list_add_tail(&(*sa_bo)->flist,
+ &sa_manager->flist[fence->ring]);
+ } else {
+ radeon_sa_bo_remove_locked(*sa_bo);
+ }
+ spin_unlock(&sa_manager->lock);
+ *sa_bo = NULL;
}
-void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
+#if defined(CONFIG_DEBUG_FS)
+void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
+ struct seq_file *m)
{
- list_del_init(&sa_bo->list);
+ struct radeon_sa_bo *i;
+
+ spin_lock(&sa_manager->lock);
+ list_for_each_entry(i, &sa_manager->olist, olist) {
+ if (&i->olist == sa_manager->hole) {
+ seq_printf(m, ">");
+ } else {
+ seq_printf(m, " ");
+ }
+ seq_printf(m, "[0x%08x 0x%08x] size %8d",
+ i->soffset, i->eoffset, i->eoffset - i->soffset);
+ if (i->fence) {
+ seq_printf(m, " protected by 0x%016llx on ring %d",
+ i->fence->seq, i->fence->ring);
+ }
+ seq_printf(m, "\n");
+ }
+ spin_unlock(&sa_manager->lock);
}
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 930a08af900f..e2ace5dce117 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -31,121 +31,40 @@
#include "drm.h"
#include "radeon.h"
-static int radeon_semaphore_add_bo(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- int r, i;
-
-
- bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
- if (bo == NULL) {
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&bo->free);
- INIT_LIST_HEAD(&bo->list);
- bo->nused = 0;
-
- r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
- if (r) {
- dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
- kfree(bo);
- return r;
- }
- gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- gpu_addr += bo->ib->sa_bo.offset;
- cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- cpu_ptr += (bo->ib->sa_bo.offset >> 2);
- for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
- bo->semaphores[i].gpu_addr = gpu_addr;
- bo->semaphores[i].cpu_ptr = cpu_ptr;
- bo->semaphores[i].bo = bo;
- list_add_tail(&bo->semaphores[i].list, &bo->free);
- gpu_addr += 8;
- cpu_ptr += 2;
- }
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
- return 0;
-}
-
-static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
- struct radeon_semaphore_bo *bo)
-{
- radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
- radeon_fence_unref(&bo->ib->fence);
- list_del(&bo->list);
- kfree(bo);
-}
-
-void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo, *n;
-
- if (list_empty(&rdev->semaphore_drv.bo)) {
- return;
- }
- /* only shrink if first bo has free semaphore */
- bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
- if (list_empty(&bo->free)) {
- return;
- }
- list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
- if (bo->nused)
- continue;
- radeon_semaphore_del_bo_locked(rdev, bo);
- }
-}
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- bool do_retry = true;
int r;
-retry:
- *semaphore = NULL;
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
- if (list_empty(&bo->free))
- continue;
- *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
- (*semaphore)->cpu_ptr[0] = 0;
- (*semaphore)->cpu_ptr[1] = 0;
- list_del(&(*semaphore)->list);
- bo->nused++;
- break;
- }
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
-
+ *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
- if (do_retry) {
- do_retry = false;
- r = radeon_semaphore_add_bo(rdev);
- if (r)
- return r;
- goto retry;
- }
return -ENOMEM;
}
-
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+ &(*semaphore)->sa_bo, 8, 8, true);
+ if (r) {
+ kfree(*semaphore);
+ *semaphore = NULL;
+ return r;
+ }
+ (*semaphore)->waiters = 0;
+ (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
+ *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
return 0;
}
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
+ --semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
}
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
+ ++semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
@@ -154,13 +73,17 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
bool sync_to[RADEON_NUM_RINGS],
int dst_ring)
{
- int i, r;
+ int i = 0, r;
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- unsigned num_ops = i == dst_ring ? RADEON_NUM_RINGS : 1;
+ mutex_lock(&rdev->ring_lock);
+ r = radeon_ring_alloc(rdev, &rdev->ring[dst_ring], RADEON_NUM_RINGS * 8);
+ if (r) {
+ goto error;
+ }
- /* don't lock unused rings */
- if (!sync_to[i] && i != dst_ring)
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (!sync_to[i] || i == dst_ring)
continue;
/* prevent GPU deadlocks */
@@ -170,65 +93,45 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
goto error;
}
- r = radeon_ring_lock(rdev, &rdev->ring[i], num_ops * 8);
- if (r)
+ r = radeon_ring_alloc(rdev, &rdev->ring[i], 8);
+ if (r) {
goto error;
- }
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (!sync_to[i] || i == dst_ring)
- continue;
+ }
radeon_semaphore_emit_signal(rdev, i, semaphore);
radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
- }
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* don't unlock unused rings */
- if (!sync_to[i] && i != dst_ring)
- continue;
-
- radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
+ radeon_ring_commit(rdev, &rdev->ring[i]);
}
+ radeon_ring_commit(rdev, &rdev->ring[dst_ring]);
+ mutex_unlock(&rdev->ring_lock);
+
return 0;
error:
/* unlock all locks taken so far */
for (--i; i >= 0; --i) {
if (sync_to[i] || i == dst_ring) {
- radeon_ring_unlock_undo(rdev, &rdev->ring[i]);
+ radeon_ring_undo(&rdev->ring[i]);
}
}
+ radeon_ring_undo(&rdev->ring[dst_ring]);
+ mutex_unlock(&rdev->ring_lock);
return r;
}
void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore)
+ struct radeon_semaphore *semaphore,
+ struct radeon_fence *fence)
{
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- semaphore->bo->nused--;
- list_add_tail(&semaphore->list, &semaphore->bo->free);
- radeon_semaphore_shrink_locked(rdev);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
-}
-
-void radeon_semaphore_driver_fini(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo, *n;
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- /* we force to free everything */
- list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
- if (!list_empty(&bo->free)) {
- dev_err(rdev->dev, "still in use semaphore\n");
- }
- radeon_semaphore_del_bo_locked(rdev, bo);
+ if (semaphore == NULL) {
+ return;
+ }
+ if (semaphore->waiters > 0) {
+ dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
+ " hardware lockup imminent!\n", semaphore);
}
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ radeon_sa_bo_free(rdev, &semaphore->sa_bo, fence);
+ kfree(semaphore);
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index dc5dcf483aa3..b05738762790 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -317,7 +317,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
out_cleanup:
if (semaphore)
- radeon_semaphore_free(rdev, semaphore);
+ radeon_semaphore_free(rdev, semaphore, NULL);
if (fence1)
radeon_fence_unref(&fence1);
@@ -437,7 +437,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
out_cleanup:
if (semaphore)
- radeon_semaphore_free(rdev, semaphore);
+ radeon_semaphore_free(rdev, semaphore, NULL);
if (fenceA)
radeon_fence_unref(&fenceA);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5e3d54ded1b3..0f6aee8aa152 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -223,6 +223,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence, *old_fence;
+ struct radeon_semaphore *sem = NULL;
int r;
rdev = radeon_get_rdev(bo->bdev);
@@ -272,15 +273,16 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
bool sync_to_ring[RADEON_NUM_RINGS] = { };
sync_to_ring[old_fence->ring] = true;
- r = radeon_semaphore_create(rdev, &fence->semaphore);
+ r = radeon_semaphore_create(rdev, &sem);
if (r) {
radeon_fence_unref(&fence);
return r;
}
- r = radeon_semaphore_sync_rings(rdev, fence->semaphore,
+ r = radeon_semaphore_sync_rings(rdev, sem,
sync_to_ring, fence->ring);
if (r) {
+ radeon_semaphore_free(rdev, sem, NULL);
radeon_fence_unref(&fence);
return r;
}
@@ -292,6 +294,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem);
+ radeon_semaphore_free(rdev, sem, fence);
radeon_fence_unref(&fence);
return r;
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a8b001641e4b..c2f473bc13b8 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1175,10 +1175,6 @@ int rv770_init(struct radeon_device *rdev)
{
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -1278,7 +1274,6 @@ void rv770_fini(struct radeon_device *rdev)
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 779f0b604fad..63ae41dac5ca 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2265,6 +2265,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
SOFT_RESET_GDS |
SOFT_RESET_PA |
SOFT_RESET_SC |
+ SOFT_RESET_BCI |
SOFT_RESET_SPI |
SOFT_RESET_SX |
SOFT_RESET_TC |
@@ -3986,10 +3987,6 @@ int si_init(struct radeon_device *rdev)
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- /* This don't do much */
- r = radeon_gem_init(rdev);
- if (r)
- return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -4109,7 +4106,6 @@ void si_fini(struct radeon_device *rdev)
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index cb1ee4e0050a..6eb507a5d130 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -735,7 +735,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
return -EINVAL;
}
drm_core_ioremap(dev->agp_buffer_map, dev);
- if (!dev->agp_buffer_map) {
+ if (!dev->agp_buffer_map->handle) {
DRM_ERROR("failed to ioremap DMA buffer region!\n");
savage_do_cleanup_bci(dev);
return -ENOMEM;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 9d830286f883..38f9534ac513 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -34,11 +34,10 @@ struct vga_switcheroo_client {
struct pci_dev *pdev;
struct fb_info *fb_info;
int pwr_state;
- void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
- void (*reprobe)(struct pci_dev *pdev);
- bool (*can_switch)(struct pci_dev *pdev);
+ const struct vga_switcheroo_client_ops *ops;
int id;
bool active;
+ struct list_head list;
};
static DEFINE_MUTEX(vgasr_mutex);
@@ -53,16 +52,23 @@ struct vgasr_priv {
struct dentry *switch_file;
int registered_clients;
- struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS];
+ struct list_head clients;
struct vga_switcheroo_handler *handler;
};
+#define ID_BIT_AUDIO 0x100
+#define client_is_audio(c) ((c)->id & ID_BIT_AUDIO)
+#define client_is_vga(c) ((c)->id == -1 || !client_is_audio(c))
+#define client_id(c) ((c)->id & ~ID_BIT_AUDIO)
+
static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
/* only one switcheroo per system */
-static struct vgasr_priv vgasr_priv;
+static struct vgasr_priv vgasr_priv = {
+ .clients = LIST_HEAD_INIT(vgasr_priv.clients),
+};
int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
{
@@ -88,72 +94,119 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
static void vga_switcheroo_enable(void)
{
- int i;
int ret;
+ struct vga_switcheroo_client *client;
+
/* call the handler to init */
vgasr_priv.handler->init();
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev);
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->id != -1)
+ continue;
+ ret = vgasr_priv.handler->get_client_id(client->pdev);
if (ret < 0)
return;
- vgasr_priv.clients[i].id = ret;
+ client->id = ret;
}
vga_switcheroo_debugfs_init(&vgasr_priv);
vgasr_priv.active = true;
}
-int vga_switcheroo_register_client(struct pci_dev *pdev,
- void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
- void (*reprobe)(struct pci_dev *pdev),
- bool (*can_switch)(struct pci_dev *pdev))
+static int register_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active)
{
- int index;
+ struct vga_switcheroo_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->pwr_state = VGA_SWITCHEROO_ON;
+ client->pdev = pdev;
+ client->ops = ops;
+ client->id = id;
+ client->active = active;
mutex_lock(&vgasr_mutex);
- /* don't do IGD vs DIS here */
- if (vgasr_priv.registered_clients & 1)
- index = 1;
- else
- index = 0;
-
- vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
- vgasr_priv.clients[index].pdev = pdev;
- vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
- vgasr_priv.clients[index].reprobe = reprobe;
- vgasr_priv.clients[index].can_switch = can_switch;
- vgasr_priv.clients[index].id = -1;
- if (pdev == vga_default_device())
- vgasr_priv.clients[index].active = true;
-
- vgasr_priv.registered_clients |= (1 << index);
+ list_add_tail(&client->list, &vgasr_priv.clients);
+ if (client_is_vga(client))
+ vgasr_priv.registered_clients++;
/* if we get two clients + handler */
- if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) {
+ if (!vgasr_priv.active &&
+ vgasr_priv.registered_clients == 2 && vgasr_priv.handler) {
printk(KERN_INFO "vga_switcheroo: enabled\n");
vga_switcheroo_enable();
}
mutex_unlock(&vgasr_mutex);
return 0;
}
+
+int vga_switcheroo_register_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops)
+{
+ return register_client(pdev, ops, -1,
+ pdev == vga_default_device());
+}
EXPORT_SYMBOL(vga_switcheroo_register_client);
+int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active)
+{
+ return register_client(pdev, ops, id | ID_BIT_AUDIO, active);
+}
+EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
+
+static struct vga_switcheroo_client *
+find_client_from_pci(struct list_head *head, struct pci_dev *pdev)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->pdev == pdev)
+ return client;
+ return NULL;
+}
+
+static struct vga_switcheroo_client *
+find_client_from_id(struct list_head *head, int client_id)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->id == client_id)
+ return client;
+ return NULL;
+}
+
+static struct vga_switcheroo_client *
+find_active_client(struct list_head *head)
+{
+ struct vga_switcheroo_client *client;
+ list_for_each_entry(client, head, list)
+ if (client->active && client_is_vga(client))
+ return client;
+ return NULL;
+}
+
void vga_switcheroo_unregister_client(struct pci_dev *pdev)
{
- int i;
+ struct vga_switcheroo_client *client;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].pdev == pdev) {
- vgasr_priv.registered_clients &= ~(1 << i);
- break;
- }
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (client) {
+ if (client_is_vga(client))
+ vgasr_priv.registered_clients--;
+ list_del(&client->list);
+ kfree(client);
+ }
+ if (vgasr_priv.active && vgasr_priv.registered_clients < 2) {
+ printk(KERN_INFO "vga_switcheroo: disabled\n");
+ vga_switcheroo_debugfs_fini(&vgasr_priv);
+ vgasr_priv.active = false;
}
-
- printk(KERN_INFO "vga_switcheroo: disabled\n");
- vga_switcheroo_debugfs_fini(&vgasr_priv);
- vgasr_priv.active = false;
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_unregister_client);
@@ -161,29 +214,29 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_client);
void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
struct fb_info *info)
{
- int i;
+ struct vga_switcheroo_client *client;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].pdev == pdev) {
- vgasr_priv.clients[i].fb_info = info;
- break;
- }
- }
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (client)
+ client->fb_info = info;
mutex_unlock(&vgasr_mutex);
}
EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
static int vga_switcheroo_show(struct seq_file *m, void *v)
{
- int i;
+ struct vga_switcheroo_client *client;
+ int i = 0;
mutex_lock(&vgasr_mutex);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- seq_printf(m, "%d:%s:%c:%s:%s\n", i,
- vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
- vgasr_priv.clients[i].active ? '+' : ' ',
- vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
- pci_name(vgasr_priv.clients[i].pdev));
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ seq_printf(m, "%d:%s%s:%c:%s:%s\n", i,
+ client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
+ client_is_vga(client) ? "" : "-Audio",
+ client->active ? '+' : ' ',
+ client->pwr_state ? "Pwr" : "Off",
+ pci_name(client->pdev));
+ i++;
}
mutex_unlock(&vgasr_mutex);
return 0;
@@ -199,7 +252,7 @@ static int vga_switchon(struct vga_switcheroo_client *client)
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
/* call the driver callback to turn on device */
- client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
+ client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
client->pwr_state = VGA_SWITCHEROO_ON;
return 0;
}
@@ -207,25 +260,30 @@ static int vga_switchon(struct vga_switcheroo_client *client)
static int vga_switchoff(struct vga_switcheroo_client *client)
{
/* call the driver callback to turn off device */
- client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
+ client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
client->pwr_state = VGA_SWITCHEROO_OFF;
return 0;
}
+static void set_audio_state(int id, int state)
+{
+ struct vga_switcheroo_client *client;
+
+ client = find_client_from_id(&vgasr_priv.clients, id | ID_BIT_AUDIO);
+ if (client && client->pwr_state != state) {
+ client->ops->set_gpu_state(client->pdev, state);
+ client->pwr_state = state;
+ }
+}
+
/* stage one happens before delay */
static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
{
- int i;
- struct vga_switcheroo_client *active = NULL;
+ struct vga_switcheroo_client *active;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active == true) {
- active = &vgasr_priv.clients[i];
- break;
- }
- }
+ active = find_active_client(&vgasr_priv.clients);
if (!active)
return 0;
@@ -233,6 +291,7 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
vga_switchon(new_client);
vga_set_default_device(new_client->pdev);
+ set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
return 0;
}
@@ -241,15 +300,9 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
{
int ret;
- int i;
- struct vga_switcheroo_client *active = NULL;
+ struct vga_switcheroo_client *active;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active == true) {
- active = &vgasr_priv.clients[i];
- break;
- }
- }
+ active = find_active_client(&vgasr_priv.clients);
if (!active)
return 0;
@@ -265,8 +318,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (ret)
return ret;
- if (new_client->reprobe)
- new_client->reprobe(new_client->pdev);
+ if (new_client->ops->reprobe)
+ new_client->ops->reprobe(new_client->pdev);
+
+ set_audio_state(active->id, VGA_SWITCHEROO_OFF);
if (active->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(active);
@@ -275,13 +330,26 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
return 0;
}
+static bool check_can_switch(void)
+{
+ struct vga_switcheroo_client *client;
+
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (!client->ops->can_switch(client->pdev)) {
+ printk(KERN_ERR "vga_switcheroo: client %x refused switch\n", client->id);
+ return false;
+ }
+ }
+ return true;
+}
+
static ssize_t
vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char usercmd[64];
const char *pdev_name;
- int i, ret;
+ int ret;
bool delay = false, can_switch;
bool just_mux = false;
int client_id = -1;
@@ -302,21 +370,21 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
/* pwr off the device not in use */
if (strncmp(usercmd, "OFF", 3) == 0) {
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active)
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->active)
continue;
- if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON)
- vga_switchoff(&vgasr_priv.clients[i]);
+ if (client->pwr_state == VGA_SWITCHEROO_ON)
+ vga_switchoff(client);
}
goto out;
}
/* pwr on the device not in use */
if (strncmp(usercmd, "ON", 2) == 0) {
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].active)
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (client->active)
continue;
- if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF)
- vga_switchon(&vgasr_priv.clients[i]);
+ if (client->pwr_state == VGA_SWITCHEROO_OFF)
+ vga_switchon(client);
}
goto out;
}
@@ -349,13 +417,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
if (client_id == -1)
goto out;
-
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].id == client_id) {
- client = &vgasr_priv.clients[i];
- break;
- }
- }
+ client = find_client_from_id(&vgasr_priv.clients, client_id);
+ if (!client)
+ goto out;
vgasr_priv.delayed_switch_active = false;
@@ -364,23 +428,16 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
goto out;
}
- if (client->active == true)
+ if (client->active)
goto out;
/* okay we want a switch - test if devices are willing to switch */
- can_switch = true;
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
- if (can_switch == false) {
- printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
- break;
- }
- }
+ can_switch = check_can_switch();
if (can_switch == false && delay == false)
goto out;
- if (can_switch == true) {
+ if (can_switch) {
pdev_name = pci_name(client->pdev);
ret = vga_switchto_stage1(client);
if (ret)
@@ -452,10 +509,8 @@ fail:
int vga_switcheroo_process_delayed_switch(void)
{
- struct vga_switcheroo_client *client = NULL;
+ struct vga_switcheroo_client *client;
const char *pdev_name;
- bool can_switch = true;
- int i;
int ret;
int err = -EINVAL;
@@ -465,17 +520,9 @@ int vga_switcheroo_process_delayed_switch(void)
printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
- for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
- if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id)
- client = &vgasr_priv.clients[i];
- can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
- if (can_switch == false) {
- printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
- break;
- }
- }
-
- if (can_switch == false || client == NULL)
+ client = find_client_from_id(&vgasr_priv.clients,
+ vgasr_priv.delayed_client_id);
+ if (!client || !check_can_switch())
goto err;
pdev_name = pci_name(client->pdev);
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 64ff02d5b730..e51035a3757f 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -730,6 +730,8 @@ struct drm_prime_handle {
#define DRM_IOCTL_MODE_GETPLANE DRM_IOWR(0xB6, struct drm_mode_get_plane)
#define DRM_IOCTL_MODE_SETPLANE DRM_IOWR(0xB7, struct drm_mode_set_plane)
#define DRM_IOCTL_MODE_ADDFB2 DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
+#define DRM_IOCTL_MODE_OBJ_SETPROPERTY DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
/**
* Device specific ioctls should only be in their respective headers
@@ -775,6 +777,10 @@ struct drm_event_vblank {
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+#define DRM_CAP_PRIME 0x5
+
+#define DRM_PRIME_CAP_IMPORT 0x1
+#define DRM_PRIME_CAP_EXPORT 0x2
/* typedef area */
#ifndef __KERNEL__
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 10a5f371e658..efd124903761 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1309,8 +1309,8 @@ extern int drm_release(struct inode *inode, struct file *filp);
/* Mapping support (drm_vm.h) */
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
-extern void drm_vm_open_locked(struct vm_area_struct *vma);
-extern void drm_vm_close_locked(struct vm_area_struct *vma);
+extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
+extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
/* Memory management support (drm_memory.h) */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index f35e7edd7de2..d59bb7d93657 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -36,6 +36,7 @@
struct drm_device;
struct drm_mode_set;
struct drm_framebuffer;
+struct drm_object_properties;
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
@@ -50,6 +51,14 @@ struct drm_framebuffer;
struct drm_mode_object {
uint32_t id;
uint32_t type;
+ struct drm_object_properties *properties;
+};
+
+#define DRM_OBJECT_MAX_PROPERTY 16
+struct drm_object_properties {
+ int count;
+ uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
+ uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
/*
@@ -297,7 +306,8 @@ struct drm_plane;
* @mode_fixup: fixup proposed mode
* @mode_set: set the desired mode on the CRTC
* @gamma_set: specify color ramp for CRTC
- * @destroy: deinit and free object.
+ * @destroy: deinit and free object
+ * @set_property: called when a property is changed
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -341,6 +351,9 @@ struct drm_crtc_funcs {
int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
+
+ int (*set_property)(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val);
};
/**
@@ -360,6 +373,7 @@ struct drm_crtc_funcs {
* @framedur_ns: precise line timing
* @pixeldur_ns: precise pixel timing
* @helper_private: mid-layer private data
+ * @properties: property tracking for this CRTC
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
@@ -395,6 +409,8 @@ struct drm_crtc {
/* if you are using the helper */
void *helper_private;
+
+ struct drm_object_properties properties;
};
@@ -451,7 +467,6 @@ struct drm_encoder_funcs {
};
#define DRM_CONNECTOR_MAX_UMODES 16
-#define DRM_CONNECTOR_MAX_PROPERTY 16
#define DRM_CONNECTOR_LEN 32
#define DRM_CONNECTOR_MAX_ENCODER 3
@@ -520,8 +535,7 @@ enum drm_connector_force {
* @funcs: connector control functions
* @user_modes: user added mode list
* @edid_blob_ptr: DRM property containing EDID if present
- * @property_ids: property tracking for this connector
- * @property_values: value pointers or data for properties
+ * @properties: property tracking for this connector
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
* @dpms: current dpms state
* @helper_private: mid-layer private data
@@ -565,8 +579,7 @@ struct drm_connector {
struct list_head user_modes;
struct drm_property_blob *edid_blob_ptr;
- u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
- uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
+ struct drm_object_properties properties;
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
@@ -898,6 +911,12 @@ extern int drm_connector_property_set_value(struct drm_connector *connector,
extern int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t *value);
+extern int drm_object_property_set_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t val);
+extern int drm_object_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *value);
extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
extern void drm_framebuffer_set_object(struct drm_device *dev,
unsigned long handle);
@@ -910,8 +929,11 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
extern bool drm_crtc_in_use(struct drm_crtc *crtc);
-extern int drm_connector_attach_property(struct drm_connector *connector,
- struct drm_property *property, uint64_t init_val);
+extern void drm_connector_attach_property(struct drm_connector *connector,
+ struct drm_property *property, uint64_t init_val);
+extern void drm_object_attach_property(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t init_val);
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values);
extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
@@ -1024,6 +1046,10 @@ extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 4a0aae38e160..326f2be0d497 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -254,6 +254,21 @@ struct drm_mode_connector_set_property {
__u32 connector_id;
};
+struct drm_mode_obj_get_properties {
+ __u64 props_ptr;
+ __u64 prop_values_ptr;
+ __u32 count_props;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
+struct drm_mode_obj_set_property {
+ __u64 value;
+ __u32 prop_id;
+ __u32 obj_id;
+ __u32 obj_type;
+};
+
struct drm_mode_get_blob {
__u32 blob_id;
__u32 length;
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 7c491b4bcf65..58056865b8e9 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -926,7 +926,6 @@ struct drm_radeon_cs_chunk {
};
/* drm_radeon_cs_reloc.flags */
-#define RADEON_RELOC_DONT_SYNC 0x01
struct drm_radeon_cs_reloc {
uint32_t handle;
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 4b9a7f596f92..b455c7c212eb 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -28,13 +28,19 @@ struct vga_switcheroo_handler {
int (*get_client_id)(struct pci_dev *pdev);
};
+struct vga_switcheroo_client_ops {
+ void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
+ void (*reprobe)(struct pci_dev *dev);
+ bool (*can_switch)(struct pci_dev *dev);
+};
#if defined(CONFIG_VGA_SWITCHEROO)
void vga_switcheroo_unregister_client(struct pci_dev *dev);
int vga_switcheroo_register_client(struct pci_dev *dev,
- void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
- void (*reprobe)(struct pci_dev *dev),
- bool (*can_switch)(struct pci_dev *dev));
+ const struct vga_switcheroo_client_ops *ops);
+int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active);
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
@@ -48,11 +54,12 @@ int vga_switcheroo_process_delayed_switch(void);
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
static inline int vga_switcheroo_register_client(struct pci_dev *dev,
- void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
- void (*reprobe)(struct pci_dev *dev),
- bool (*can_switch)(struct pci_dev *dev)) { return 0; }
+ const struct vga_switcheroo_client_ops *ops) { return 0; }
static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
+static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }