[kernel] r19786 - in dists/sid/linux/debian: . config patches patches/bugfix/ia64 patches/bugfix/x86 patches/features/all/drm patches/features/all/fermi-accel

Ben Hutchings benh at alioth.debian.org
Wed Feb 6 03:52:59 UTC 2013


Author: benh
Date: Wed Feb  6 03:52:57 2013
New Revision: 19786

Log:
Backport drm and agp subsystems from Linux 3.4.29 (closes: #687442)

- [x86] i915: Fixes freezes on Ivy Bridge (Closes: #689268)
- nouveau: Support for newer nvidia chipsets (Closes: #690284)
- radeon: Support for HD7000 'Southern Islands' chips

Mostly by Julien Cristau, but I've made some changes:
- Added a script to refresh the main patch
- Put upstream commit references in the usual format
- Instead of reverting the use of kmalloc_array(), cherry-picked its
  addition, as it helps to avoid integer overflow vulnerabilities
- Moved all other DRM patches after this, and refresh them
- Updated debian/config/defines to ignore the ABI changes, because OOT
  GPU drivers don't use the DRM (or intel-gtt) API

Added:
   dists/sid/linux/debian/patches/bugfix/x86/drm-i915-kick-any-firmware-framebuffers-before-claim.patch
   dists/sid/linux/debian/patches/features/all/drm/
   dists/sid/linux/debian/patches/features/all/drm/Remove-gma500-driver-from-staging.patch
   dists/sid/linux/debian/patches/features/all/drm/Revert-VM-add-vm_mmap-helper-function.patch
   dists/sid/linux/debian/patches/features/all/drm/Revert-drm-base-prime-dma-buf-support-v5.patch
   dists/sid/linux/debian/patches/features/all/drm/drm-3.4.patch
   dists/sid/linux/debian/patches/features/all/drm/drm-i915-revert-switch-to-simple_open.patch
   dists/sid/linux/debian/patches/features/all/drm/drm-revert-part-of-2c9ede55ecec58099b72e4bb8eab719f3.patch
   dists/sid/linux/debian/patches/features/all/drm/genpatch.py   (contents, props changed)
   dists/sid/linux/debian/patches/features/all/drm/i2c-export-bit-banging-algo-functions.patch
   dists/sid/linux/debian/patches/features/all/drm/revert-vm-add-vm_munmap-helper-function.patch
   dists/sid/linux/debian/patches/features/all/drm/slab-introduce-kmalloc_array.patch
   dists/sid/linux/debian/patches/features/all/drm/swiotlb-Expose-swiotlb_nr_tlb-function-to-modules.patch
Deleted:
   dists/sid/linux/debian/patches/features/all/fermi-accel/
Modified:
   dists/sid/linux/debian/changelog
   dists/sid/linux/debian/config/defines
   dists/sid/linux/debian/patches/bugfix/ia64/nouveau-ACPI-support-is-dependent-on-X86.patch
   dists/sid/linux/debian/patches/bugfix/x86/drm-i915-Only-kick-out-vesafb-if-we-takeover-the-fbc.patch
   dists/sid/linux/debian/patches/bugfix/x86/drm-i915-add-quirk_invert_brightness-for-ncr-machine.patch
   dists/sid/linux/debian/patches/series

Modified: dists/sid/linux/debian/changelog
==============================================================================
--- dists/sid/linux/debian/changelog	Sat Feb  2 01:19:29 2013	(r19785)
+++ dists/sid/linux/debian/changelog	Wed Feb  6 03:52:57 2013	(r19786)
@@ -116,6 +116,12 @@
   * Bump python build-dep, needed since the switch from local SortedDict
     to collections.OrderedDict (new in version 2.7).
 
+  [ Julien Cristau ]
+  * Backport drm and agp subsystems from Linux 3.4.29 (closes: #687442)
+    - [x86] i915: Fixes freezes on Ivy Bridge (Closes: #689268)
+    - nouveau: Support for newer nvidia chipsets (Closes: #690284)
+    - radeon: Support for HD7000 'Southern Islands' chips
+
  -- Ben Hutchings <ben at decadent.org.uk>  Thu, 27 Dec 2012 02:17:44 +0100
 
 linux (3.2.35-2) unstable; urgency=low

Modified: dists/sid/linux/debian/config/defines
==============================================================================
--- dists/sid/linux/debian/config/defines	Sat Feb  2 01:19:29 2013	(r19785)
+++ dists/sid/linux/debian/config/defines	Wed Feb  6 03:52:57 2013	(r19786)
@@ -22,6 +22,11 @@
  xprt_*
 # Inline function, did not need to be exported at all
  get_write_access
+# AGP and DRM were bumped to 3.4, but no-one tries to use these from OOT
+ module:drivers/gpu/drm/*
+ intel_agp_enabled
+ intel_gmch_*
+ intel_gtt_*
 
 [base]
 arches:

Modified: dists/sid/linux/debian/patches/bugfix/ia64/nouveau-ACPI-support-is-dependent-on-X86.patch
==============================================================================
--- dists/sid/linux/debian/patches/bugfix/ia64/nouveau-ACPI-support-is-dependent-on-X86.patch	Sat Feb  2 01:19:29 2013	(r19785)
+++ dists/sid/linux/debian/patches/bugfix/ia64/nouveau-ACPI-support-is-dependent-on-X86.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -13,10 +13,10 @@
  drivers/gpu/drm/nouveau/nouveau_drv.h |    2 +-
  3 files changed, 5 insertions(+), 3 deletions(-)
 
-diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
-index ca16399..5a85697 100644
---- a/drivers/gpu/drm/nouveau/Kconfig
-+++ b/drivers/gpu/drm/nouveau/Kconfig
+Index: linux/drivers/gpu/drm/nouveau/Kconfig
+===================================================================
+--- linux.orig/drivers/gpu/drm/nouveau/Kconfig
++++ linux/drivers/gpu/drm/nouveau/Kconfig
 @@ -11,8 +11,8 @@ config DRM_NOUVEAU
  	select FRAMEBUFFER_CONSOLE if !EXPERT
  	select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
@@ -25,14 +25,14 @@
 -	select MXM_WMI if ACPI
 +	select ACPI_WMI if ACPI && X86
 +	select MXM_WMI if ACPI && X86
+ 	select POWER_SUPPLY
  	help
  	  Choose this option for open-source nVidia support.
- 
-diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
-index 0583677..797b808 100644
---- a/drivers/gpu/drm/nouveau/Makefile
-+++ b/drivers/gpu/drm/nouveau/Makefile
-@@ -37,6 +37,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
+Index: linux/drivers/gpu/drm/nouveau/Makefile
+===================================================================
+--- linux.orig/drivers/gpu/drm/nouveau/Makefile
++++ linux/drivers/gpu/drm/nouveau/Makefile
+@@ -42,6 +42,8 @@ nouveau-y := nouveau_drv.o nouveau_state
  nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
  nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
  nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
@@ -41,11 +41,11 @@
 +endif
  
  obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
-diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
-index 9c56331..9ea5a17 100644
---- a/drivers/gpu/drm/nouveau/nouveau_drv.h
-+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
-@@ -968,7 +968,7 @@ extern int  nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
+Index: linux/drivers/gpu/drm/nouveau/nouveau_drv.h
+===================================================================
+--- linux.orig/drivers/gpu/drm/nouveau/nouveau_drv.h
++++ linux/drivers/gpu/drm/nouveau/nouveau_drv.h
+@@ -1095,7 +1095,7 @@ extern int  nouveau_dma_wait(struct nouv
  
  /* nouveau_acpi.c */
  #define ROM_BIOS_PAGE 4096
@@ -53,7 +53,4 @@
 +#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
  void nouveau_register_dsm_handler(void);
  void nouveau_unregister_dsm_handler(void);
- int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
--- 
-1.7.5.3
-
+ void nouveau_switcheroo_optimus_dsm(void);

Modified: dists/sid/linux/debian/patches/bugfix/x86/drm-i915-Only-kick-out-vesafb-if-we-takeover-the-fbc.patch
==============================================================================
--- dists/sid/linux/debian/patches/bugfix/x86/drm-i915-Only-kick-out-vesafb-if-we-takeover-the-fbc.patch	Sat Feb  2 01:19:29 2013	(r19785)
+++ dists/sid/linux/debian/patches/bugfix/x86/drm-i915-Only-kick-out-vesafb-if-we-takeover-the-fbc.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -23,7 +23,7 @@
 
 --- a/drivers/gpu/drm/i915/i915_dma.c
 +++ b/drivers/gpu/drm/i915/i915_dma.c
-@@ -1955,7 +1955,8 @@ int i915_driver_load(struct drm_device *
+@@ -1999,7 +1999,8 @@ int i915_driver_load(struct drm_device *
  		goto put_bridge;
  	}
  
@@ -31,5 +31,5 @@
 +	if (drm_core_check_feature(dev, DRIVER_MODESET))
 +		i915_kick_out_firmware_fb(dev_priv);
  
- 	/* overlay on gen2 is broken and can't address above 1G */
- 	if (IS_GEN2(dev))
+ 	pci_set_master(dev->pdev);
+ 

Modified: dists/sid/linux/debian/patches/bugfix/x86/drm-i915-add-quirk_invert_brightness-for-ncr-machine.patch
==============================================================================
--- dists/sid/linux/debian/patches/bugfix/x86/drm-i915-add-quirk_invert_brightness-for-ncr-machine.patch	Sat Feb  2 01:19:29 2013	(r19785)
+++ dists/sid/linux/debian/patches/bugfix/x86/drm-i915-add-quirk_invert_brightness-for-ncr-machine.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -29,7 +29,7 @@
  #include <linux/module.h>
  #include <linux/input.h>
  #include <linux/i2c.h>
-@@ -8839,6 +8840,34 @@ struct intel_quirk {
+@@ -9172,6 +9173,34 @@ struct intel_quirk {
  	void (*hook)(struct drm_device *dev);
  };
  
@@ -62,9 +62,9 @@
 +};
 +
  struct intel_quirk intel_quirks[] = {
- 	/* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
- 	{ 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
-@@ -8886,6 +8915,10 @@ static void intel_init_quirks(struct drm
+ 	/* HP Mini needs pipe A force quirk (LP: #322104) */
+ 	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
+@@ -9217,6 +9246,10 @@ static void intel_init_quirks(struct drm
  		     q->subsystem_device == PCI_ANY_ID))
  			q->hook(dev);
  	}

Added: dists/sid/linux/debian/patches/bugfix/x86/drm-i915-kick-any-firmware-framebuffers-before-claim.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/bugfix/x86/drm-i915-kick-any-firmware-framebuffers-before-claim.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,115 @@
+From: Daniel Vetter <daniel.vetter at ffwll.ch>
+Date: Sun, 1 Jul 2012 17:09:42 +0200
+Subject: drm/i915: kick any firmware framebuffers before claiming the gtt
+
+commit 9f846a16d213523fbe6daea17e20df6b8ac5a1e5 upstream.
+
+Especially vesafb likes to map everything as uc- (yikes), and if that
+mapping hangs around still while we try to map the gtt as wc the
+kernel will downgrade our request to uc-, resulting in abyssal
+performance.
+
+Unfortunately we can't do this as early as readon does (i.e. as the
+first thing we do when initializing the hw) because our fb/mmio space
+region moves around on a per-gen basis. So I've had to move it below
+the gtt initialization, but that seems to work, too. The important
+thing is that we do this before we set up the gtt wc mapping.
+
+Now an altogether different question is why people compile their
+kernels with vesafb enabled, but I guess making things just work isn't
+bad per se ...
+
+v2:
+- s/radeondrmfb/inteldrmfb/
+- fix up error handling
+
+v3: Kill #ifdef X86, this is Intel after all. Noticed by Ben Widawsky.
+
+v4: Jani Nikula complained about the pointless bool primary
+initialization.
+
+v5: Don't oops if we can't allocate, noticed by Chris Wilson.
+
+v6: Resolve conflicts with agp rework and fixup whitespace.
+
+This is commit e188719a2891f01b3100d in drm-next.
+
+Backport to 3.5 -fixes queue requested by Dave Airlie - due to grub
+using vesa on fedora their initrd seems to load vesafb before loading
+the real kms driver. So tons more people actually experience a
+dead-slow gpu. Hence also the Cc: stable.
+
+Reported-and-tested-by: "Kilarski, Bernard R" <bernard.r.kilarski at intel.com>
+Reviewed-by: Chris Wilson <chris at chris-wilson.co.uk>
+Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
+Signed-off-by: Dave Airlie <airlied at redhat.com>
+[jcristau: fix up context for 3.4]
+---
+ drivers/gpu/drm/i915/i915_dma.c |   37 ++++++++++++++++++++++++++++++-------
+ 1 file changed, 30 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index f947926..36822b9 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
+ 	}
+ }
+ 
++static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
++{
++	struct apertures_struct *ap;
++	struct pci_dev *pdev = dev_priv->dev->pdev;
++	bool primary;
++
++	ap = alloc_apertures(1);
++	if (!ap)
++		return;
++
++	ap->ranges[0].base = dev_priv->dev->agp->base;
++	ap->ranges[0].size =
++		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
++	primary =
++		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
++
++	remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
++
++	kfree(ap);
++}
++
+ /**
+  * i915_driver_load - setup chip and create an initial config
+  * @dev: DRM device
+@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 		goto free_priv;
+ 	}
+ 
++	dev_priv->mm.gtt = intel_gtt_get();
++	if (!dev_priv->mm.gtt) {
++		DRM_ERROR("Failed to initialize GTT\n");
++		ret = -ENODEV;
++		goto put_bridge;
++	}
++
++	i915_kick_out_firmware_fb(dev_priv);
++
+ 	pci_set_master(dev->pdev);
+ 
+ 	/* overlay on gen2 is broken and can't address above 1G */
+@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 		goto put_bridge;
+ 	}
+ 
+-	dev_priv->mm.gtt = intel_gtt_get();
+-	if (!dev_priv->mm.gtt) {
+-		DRM_ERROR("Failed to initialize GTT\n");
+-		ret = -ENODEV;
+-		goto out_rmmap;
+-	}
+-
+ 	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ 
+ 	dev_priv->mm.gtt_mapping =
+-- 
+1.7.10.4
+

Added: dists/sid/linux/debian/patches/features/all/drm/Remove-gma500-driver-from-staging.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/Remove-gma500-driver-from-staging.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,31414 @@
+From: Julien Cristau <jcristau at debian.org>
+Date: Sun, 24 Jun 2012 12:03:53 +0200
+Subject: [PATCH 6/7] Remove gma500 driver from staging
+
+commit b7cdd9e6323af368e26121c5b791eddc78e79fea upstream.
+
+It moved to the main tree
+---
+ drivers/staging/Kconfig                       |    2 -
+ drivers/staging/Makefile                      |    1 -
+ drivers/staging/gma500/Kconfig                |   33 -
+ drivers/staging/gma500/Makefile               |   52 -
+ drivers/staging/gma500/TODO                   |   15 -
+ drivers/staging/gma500/accel_2d.c             |  414 -------
+ drivers/staging/gma500/backlight.c            |   49 -
+ drivers/staging/gma500/cdv_device.c           |  350 ------
+ drivers/staging/gma500/cdv_device.h           |   36 -
+ drivers/staging/gma500/cdv_intel_crt.c        |  326 ------
+ drivers/staging/gma500/cdv_intel_display.c    | 1508 -------------------------
+ drivers/staging/gma500/cdv_intel_hdmi.c       |  376 ------
+ drivers/staging/gma500/cdv_intel_lvds.c       |  721 ------------
+ drivers/staging/gma500/displays/hdmi.h        |   33 -
+ drivers/staging/gma500/displays/pyr_cmd.h     |   34 -
+ drivers/staging/gma500/displays/pyr_vid.h     |   34 -
+ drivers/staging/gma500/displays/tmd_cmd.h     |   34 -
+ drivers/staging/gma500/displays/tmd_vid.h     |   34 -
+ drivers/staging/gma500/displays/tpo_cmd.h     |   35 -
+ drivers/staging/gma500/displays/tpo_vid.h     |   33 -
+ drivers/staging/gma500/framebuffer.c          |  849 --------------
+ drivers/staging/gma500/framebuffer.h          |   48 -
+ drivers/staging/gma500/gem.c                  |  292 -----
+ drivers/staging/gma500/gem_glue.c             |   89 --
+ drivers/staging/gma500/gem_glue.h             |    2 -
+ drivers/staging/gma500/gtt.c                  |  553 ---------
+ drivers/staging/gma500/gtt.h                  |   64 --
+ drivers/staging/gma500/intel_bios.c           |  303 -----
+ drivers/staging/gma500/intel_bios.h           |  430 -------
+ drivers/staging/gma500/intel_i2c.c            |  170 ---
+ drivers/staging/gma500/intel_opregion.c       |   81 --
+ drivers/staging/gma500/mdfld_device.c         |  714 ------------
+ drivers/staging/gma500/mdfld_dsi_dbi.c        |  761 -------------
+ drivers/staging/gma500/mdfld_dsi_dbi.h        |  173 ---
+ drivers/staging/gma500/mdfld_dsi_dbi_dpu.c    |  778 -------------
+ drivers/staging/gma500/mdfld_dsi_dbi_dpu.h    |  154 ---
+ drivers/staging/gma500/mdfld_dsi_dpi.c        |  805 -------------
+ drivers/staging/gma500/mdfld_dsi_dpi.h        |   78 --
+ drivers/staging/gma500/mdfld_dsi_output.c     | 1014 -----------------
+ drivers/staging/gma500/mdfld_dsi_output.h     |  138 ---
+ drivers/staging/gma500/mdfld_dsi_pkg_sender.c | 1484 ------------------------
+ drivers/staging/gma500/mdfld_dsi_pkg_sender.h |  184 ---
+ drivers/staging/gma500/mdfld_intel_display.c  | 1404 -----------------------
+ drivers/staging/gma500/mdfld_msic.h           |   31 -
+ drivers/staging/gma500/mdfld_output.c         |  171 ---
+ drivers/staging/gma500/mdfld_output.h         |   41 -
+ drivers/staging/gma500/mdfld_pyr_cmd.c        |  558 ---------
+ drivers/staging/gma500/mdfld_tmd_vid.c        |  206 ----
+ drivers/staging/gma500/mdfld_tpo_cmd.c        |  509 ---------
+ drivers/staging/gma500/mdfld_tpo_vid.c        |  140 ---
+ drivers/staging/gma500/medfield.h             |  268 -----
+ drivers/staging/gma500/mid_bios.c             |  270 -----
+ drivers/staging/gma500/mid_bios.h             |   21 -
+ drivers/staging/gma500/mmu.c                  |  858 --------------
+ drivers/staging/gma500/mrst.h                 |  252 -----
+ drivers/staging/gma500/mrst_crtc.c            |  604 ----------
+ drivers/staging/gma500/mrst_device.c          |  634 -----------
+ drivers/staging/gma500/mrst_hdmi.c            |  852 --------------
+ drivers/staging/gma500/mrst_hdmi_i2c.c        |  328 ------
+ drivers/staging/gma500/mrst_lvds.c            |  407 -------
+ drivers/staging/gma500/power.c                |  318 ------
+ drivers/staging/gma500/power.h                |   67 --
+ drivers/staging/gma500/psb_device.c           |  321 ------
+ drivers/staging/gma500/psb_drm.h              |  219 ----
+ drivers/staging/gma500/psb_drv.c              | 1229 --------------------
+ drivers/staging/gma500/psb_drv.h              |  952 ----------------
+ drivers/staging/gma500/psb_intel_display.c    | 1429 -----------------------
+ drivers/staging/gma500/psb_intel_display.h    |   28 -
+ drivers/staging/gma500/psb_intel_drv.h        |  230 ----
+ drivers/staging/gma500/psb_intel_lvds.c       |  854 --------------
+ drivers/staging/gma500/psb_intel_modes.c      |   77 --
+ drivers/staging/gma500/psb_intel_reg.h        | 1235 --------------------
+ drivers/staging/gma500/psb_intel_sdvo.c       | 1293 ---------------------
+ drivers/staging/gma500/psb_intel_sdvo_regs.h  |  338 ------
+ drivers/staging/gma500/psb_irq.c              |  627 ----------
+ drivers/staging/gma500/psb_irq.h              |   45 -
+ drivers/staging/gma500/psb_lid.c              |   88 --
+ drivers/staging/gma500/psb_reg.h              |  582 ----------
+ 78 files changed, 30770 deletions(-)
+ delete mode 100644 drivers/staging/gma500/Kconfig
+ delete mode 100644 drivers/staging/gma500/Makefile
+ delete mode 100644 drivers/staging/gma500/TODO
+ delete mode 100644 drivers/staging/gma500/accel_2d.c
+ delete mode 100644 drivers/staging/gma500/backlight.c
+ delete mode 100644 drivers/staging/gma500/cdv_device.c
+ delete mode 100644 drivers/staging/gma500/cdv_device.h
+ delete mode 100644 drivers/staging/gma500/cdv_intel_crt.c
+ delete mode 100644 drivers/staging/gma500/cdv_intel_display.c
+ delete mode 100644 drivers/staging/gma500/cdv_intel_hdmi.c
+ delete mode 100644 drivers/staging/gma500/cdv_intel_lvds.c
+ delete mode 100644 drivers/staging/gma500/displays/hdmi.h
+ delete mode 100644 drivers/staging/gma500/displays/pyr_cmd.h
+ delete mode 100644 drivers/staging/gma500/displays/pyr_vid.h
+ delete mode 100644 drivers/staging/gma500/displays/tmd_cmd.h
+ delete mode 100644 drivers/staging/gma500/displays/tmd_vid.h
+ delete mode 100644 drivers/staging/gma500/displays/tpo_cmd.h
+ delete mode 100644 drivers/staging/gma500/displays/tpo_vid.h
+ delete mode 100644 drivers/staging/gma500/framebuffer.c
+ delete mode 100644 drivers/staging/gma500/framebuffer.h
+ delete mode 100644 drivers/staging/gma500/gem.c
+ delete mode 100644 drivers/staging/gma500/gem_glue.c
+ delete mode 100644 drivers/staging/gma500/gem_glue.h
+ delete mode 100644 drivers/staging/gma500/gtt.c
+ delete mode 100644 drivers/staging/gma500/gtt.h
+ delete mode 100644 drivers/staging/gma500/intel_bios.c
+ delete mode 100644 drivers/staging/gma500/intel_bios.h
+ delete mode 100644 drivers/staging/gma500/intel_i2c.c
+ delete mode 100644 drivers/staging/gma500/intel_opregion.c
+ delete mode 100644 drivers/staging/gma500/mdfld_device.c
+ delete mode 100644 drivers/staging/gma500/mdfld_dsi_dbi.c
+ delete mode 100644 drivers/staging/gma500/mdfld_dsi_dbi.h
+ delete mode 100644 drivers/staging/gma500/mdfld_dsi_dbi_dpu.c
+ delete mode 100644 drivers/staging/gma500/mdfld_dsi_dbi_dpu.h
+ delete mode 100644 drivers/staging/gma500/mdfld_dsi_dpi.c
+ delete mode 100644 drivers/staging/gma500/mdfld_dsi_dpi.h
+ delete mode 100644 drivers/staging/gma500/mdfld_dsi_output.c
+ delete mode 100644 drivers/staging/gma500/mdfld_dsi_output.h
+ delete mode 100644 drivers/staging/gma500/mdfld_dsi_pkg_sender.c
+ delete mode 100644 drivers/staging/gma500/mdfld_dsi_pkg_sender.h
+ delete mode 100644 drivers/staging/gma500/mdfld_intel_display.c
+ delete mode 100644 drivers/staging/gma500/mdfld_msic.h
+ delete mode 100644 drivers/staging/gma500/mdfld_output.c
+ delete mode 100644 drivers/staging/gma500/mdfld_output.h
+ delete mode 100644 drivers/staging/gma500/mdfld_pyr_cmd.c
+ delete mode 100644 drivers/staging/gma500/mdfld_tmd_vid.c
+ delete mode 100644 drivers/staging/gma500/mdfld_tpo_cmd.c
+ delete mode 100644 drivers/staging/gma500/mdfld_tpo_vid.c
+ delete mode 100644 drivers/staging/gma500/medfield.h
+ delete mode 100644 drivers/staging/gma500/mid_bios.c
+ delete mode 100644 drivers/staging/gma500/mid_bios.h
+ delete mode 100644 drivers/staging/gma500/mmu.c
+ delete mode 100644 drivers/staging/gma500/mrst.h
+ delete mode 100644 drivers/staging/gma500/mrst_crtc.c
+ delete mode 100644 drivers/staging/gma500/mrst_device.c
+ delete mode 100644 drivers/staging/gma500/mrst_hdmi.c
+ delete mode 100644 drivers/staging/gma500/mrst_hdmi_i2c.c
+ delete mode 100644 drivers/staging/gma500/mrst_lvds.c
+ delete mode 100644 drivers/staging/gma500/power.c
+ delete mode 100644 drivers/staging/gma500/power.h
+ delete mode 100644 drivers/staging/gma500/psb_device.c
+ delete mode 100644 drivers/staging/gma500/psb_drm.h
+ delete mode 100644 drivers/staging/gma500/psb_drv.c
+ delete mode 100644 drivers/staging/gma500/psb_drv.h
+ delete mode 100644 drivers/staging/gma500/psb_intel_display.c
+ delete mode 100644 drivers/staging/gma500/psb_intel_display.h
+ delete mode 100644 drivers/staging/gma500/psb_intel_drv.h
+ delete mode 100644 drivers/staging/gma500/psb_intel_lvds.c
+ delete mode 100644 drivers/staging/gma500/psb_intel_modes.c
+ delete mode 100644 drivers/staging/gma500/psb_intel_reg.h
+ delete mode 100644 drivers/staging/gma500/psb_intel_sdvo.c
+ delete mode 100644 drivers/staging/gma500/psb_intel_sdvo_regs.h
+ delete mode 100644 drivers/staging/gma500/psb_irq.c
+ delete mode 100644 drivers/staging/gma500/psb_irq.h
+ delete mode 100644 drivers/staging/gma500/psb_lid.c
+ delete mode 100644 drivers/staging/gma500/psb_reg.h
+
+diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
+index 25cdff3..1c8d977 100644
+--- a/drivers/staging/Kconfig
++++ b/drivers/staging/Kconfig
+@@ -124,8 +124,6 @@ source "drivers/staging/cptm1217/Kconfig"
+ 
+ source "drivers/staging/ste_rmi4/Kconfig"
+ 
+-source "drivers/staging/gma500/Kconfig"
+-
+ source "drivers/staging/mei/Kconfig"
+ 
+ source "drivers/staging/nvec/Kconfig"
+diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
+index a25f3f2..1b78e0c 100644
+--- a/drivers/staging/Makefile
++++ b/drivers/staging/Makefile
+@@ -54,6 +54,5 @@ obj-$(CONFIG_SND_INTEL_SST)	+= intel_sst/
+ obj-$(CONFIG_SPEAKUP)		+= speakup/
+ obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217)	+= cptm1217/
+ obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4)	+= ste_rmi4/
+-obj-$(CONFIG_DRM_PSB)		+= gma500/
+ obj-$(CONFIG_INTEL_MEI)		+= mei/
+ obj-$(CONFIG_MFD_NVEC)		+= nvec/
+diff --git a/drivers/staging/gma500/Kconfig b/drivers/staging/gma500/Kconfig
+deleted file mode 100644
+index bfe2166..0000000
+--- a/drivers/staging/gma500/Kconfig
++++ /dev/null
+@@ -1,33 +0,0 @@
+-config DRM_PSB
+-	tristate "Intel GMA5/600 KMS Framebuffer"
+-	depends on DRM && PCI && X86
+-	select FB_CFB_COPYAREA
+-        select FB_CFB_FILLRECT
+-        select FB_CFB_IMAGEBLIT
+-        select DRM_KMS_HELPER
+-        select DRM_TTM
+-	help
+-	  Say yes for an experimental 2D KMS framebuffer driver for the
+-	  Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
+-	  devices.
+-
+-config DRM_PSB_MRST
+-	bool "Intel GMA600 support (Experimental)"
+-	depends on DRM_PSB
+-	help
+-	  Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
+-	  platforms with LVDS ports. HDMI and MIPI are not currently
+-	  supported.
+-
+-config DRM_PSB_MFLD
+-	bool "Intel Medfield support (Experimental)"
+-	depends on DRM_PSB
+-	help
+-	  Say yes to include support for Intel Medfield platforms with MIPI
+-	  interfaces.
+-	
+-config DRM_PSB_CDV
+-	bool "Intel Cedarview support (Experimental)"
+-	depends on DRM_PSB
+-	help
+-	  Say yes to include support for Intel Cedarview platforms
+diff --git a/drivers/staging/gma500/Makefile b/drivers/staging/gma500/Makefile
+deleted file mode 100644
+index c729868..0000000
+--- a/drivers/staging/gma500/Makefile
++++ /dev/null
+@@ -1,52 +0,0 @@
+-#
+-#	KMS driver for the GMA500
+-#
+-ccflags-y += -Iinclude/drm
+-
+-psb_gfx-y += gem_glue.o \
+-	  accel_2d.o \
+-	  backlight.o \
+-	  framebuffer.o \
+-	  gem.o \
+-	  gtt.o \
+-	  intel_bios.o \
+-	  intel_i2c.o \
+-	  intel_opregion.o \
+-	  mmu.o \
+-	  power.o \
+-	  psb_drv.o \
+-	  psb_intel_display.o \
+-	  psb_intel_lvds.o \
+-	  psb_intel_modes.o \
+-	  psb_intel_sdvo.o \
+-	  psb_lid.o \
+-	  psb_irq.o \
+-	  psb_device.o \
+-	  mid_bios.o
+-
+-psb_gfx-$(CONFIG_DRM_PSB_CDV) +=  cdv_device.o \
+-	  cdv_intel_crt.o \
+-	  cdv_intel_display.o \
+-	  cdv_intel_hdmi.o \
+-	  cdv_intel_lvds.o
+-
+-psb_gfx-$(CONFIG_DRM_PSB_MRST) += mrst_device.o \
+-	  mrst_crtc.o \
+-	  mrst_lvds.o \
+-	  mrst_hdmi.o \
+-	  mrst_hdmi_i2c.o
+-
+-psb_gfx-$(CONFIG_DRM_PSB_MFLD) += mdfld_device.o \
+-	  mdfld_output.o \
+-	  mdfld_pyr_cmd.o \
+-	  mdfld_tmd_vid.o \
+-	  mdfld_tpo_cmd.o \
+-	  mdfld_tpo_vid.o \
+-	  mdfld_dsi_pkg_sender.o \
+-	  mdfld_dsi_dpi.o \
+-	  mdfld_dsi_output.o \
+-	  mdfld_dsi_dbi.o \
+-	  mdfld_dsi_dbi_dpu.o \
+-	  mdfld_intel_display.o
+-
+-obj-$(CONFIG_DRM_PSB) += psb_gfx.o
+diff --git a/drivers/staging/gma500/TODO b/drivers/staging/gma500/TODO
+deleted file mode 100644
+index fc83615..0000000
+--- a/drivers/staging/gma500/TODO
++++ /dev/null
+@@ -1,15 +0,0 @@
+--	Sort out the power management side. Not important for Poulsbo but
+-	matters for Moorestown/Medfield
+--	Debug Oaktrail/Moorestown support (single pipe, no BIOS on mrst,
+-					some other differences)
+--	Add 2D acceleration via console and DRM
+--	Add scrolling acceleration using the GTT to do remapping on the main
+-	framebuffer.
+--	HDMI testing
+--	Oaktrail HDMI and other features
+--	Oaktrail MIPI
+--	Medfield needs a lot of further love
+-
+-As per kernel policy and the in the interest of the safety of various
+-kittens there is no support or plans to add hooks for the closed user space
+-stuff.
+diff --git a/drivers/staging/gma500/accel_2d.c b/drivers/staging/gma500/accel_2d.c
+deleted file mode 100644
+index 114b99a..0000000
+--- a/drivers/staging/gma500/accel_2d.c
++++ /dev/null
+@@ -1,414 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2007-2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+- * develop this driver.
+- *
+- **************************************************************************/
+-
+-#include <linux/module.h>
+-#include <linux/kernel.h>
+-#include <linux/errno.h>
+-#include <linux/string.h>
+-#include <linux/mm.h>
+-#include <linux/tty.h>
+-#include <linux/slab.h>
+-#include <linux/delay.h>
+-#include <linux/fb.h>
+-#include <linux/init.h>
+-#include <linux/console.h>
+-
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include <drm/drm_crtc.h>
+-
+-#include "psb_drv.h"
+-#include "psb_reg.h"
+-#include "framebuffer.h"
+-
+-/**
+- *	psb_spank		-	reset the 2D engine
+- *	@dev_priv: our PSB DRM device
+- *
+- *	Soft reset the graphics engine and then reload the necessary registers.
+- *	We use this at initialisation time but it will become relevant for
+- *	accelerated X later
+- */
+-void psb_spank(struct drm_psb_private *dev_priv)
+-{
+-	PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
+-		_PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
+-		_PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
+-		_PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
+-	PSB_RSGX32(PSB_CR_SOFT_RESET);
+-
+-	msleep(1);
+-
+-	PSB_WSGX32(0, PSB_CR_SOFT_RESET);
+-	wmb();
+-	PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
+-		   PSB_CR_BIF_CTRL);
+-	wmb();
+-	(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+-
+-	msleep(1);
+-	PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
+-		   PSB_CR_BIF_CTRL);
+-	(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+-	PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+-}
+-
+-/**
+- *	psb2_2d_wait_available	-	wait for FIFO room
+- *	@dev_priv: our DRM device
+- *	@size: size (in dwords) of the command we want to issue
+- *
+- *	Wait until there is room to load the FIFO with our data. If the
+- *	device is not responding then reset it
+- */
+-static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
+-			  unsigned size)
+-{
+-	uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+-	unsigned long t = jiffies + HZ;
+-
+-	while (avail < size) {
+-		avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
+-		if (time_after(jiffies, t)) {
+-			psb_spank(dev_priv);
+-			return -EIO;
+-		}
+-	}
+-	return 0;
+-}
+-
+-/**
+- *	psb_2d_submit		-	submit a 2D command
+- *	@dev_priv: our DRM device
+- *	@cmdbuf: command to issue
+- *	@size: length (in dwords)
+- *
+- *	Issue one or more 2D commands to the accelerator. This needs to be
+- *	serialized later when we add the GEM interfaces for acceleration
+- */
+-static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
+-								unsigned size)
+-{
+-	int ret = 0;
+-	int i;
+-	unsigned submit_size;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dev_priv->lock_2d, flags);
+-	while (size > 0) {
+-		submit_size = (size < 0x60) ? size : 0x60;
+-		size -= submit_size;
+-		ret = psb_2d_wait_available(dev_priv, submit_size);
+-		if (ret)
+-			break;
+-
+-		submit_size <<= 2;
+-
+-		for (i = 0; i < submit_size; i += 4)
+-			PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
+-
+-		(void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
+-	}
+-	spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+-	return ret;
+-}
+-
+-
+-/**
+- *	psb_accel_2d_copy_direction	-	compute blit order
+- *	@xdir: X direction of move
+- *	@ydir: Y direction of move
+- *
+- *	Compute the correct order setings to ensure that an overlapping blit
+- *	correctly copies all the pixels.
+- */
+-static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
+-{
+-	if (xdir < 0)
+-		return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
+-						PSB_2D_COPYORDER_TR2BL;
+-	else
+-		return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
+-						PSB_2D_COPYORDER_TL2BR;
+-}
+-
+-/**
+- *	psb_accel_2d_copy		-	accelerated 2D copy
+- *	@dev_priv: our DRM device
+- *	@src_offset in bytes
+- *	@src_stride in bytes
+- *	@src_format psb 2D format defines
+- *	@dst_offset in bytes
+- *	@dst_stride in bytes
+- *	@dst_format psb 2D format defines
+- *	@src_x offset in pixels
+- *	@src_y offset in pixels
+- *	@dst_x offset in pixels
+- *	@dst_y offset in pixels
+- *	@size_x of the copied area
+- *	@size_y of the copied area
+- *
+- *	Format and issue a 2D accelerated copy command.
+- */
+-static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
+-			     uint32_t src_offset, uint32_t src_stride,
+-			     uint32_t src_format, uint32_t dst_offset,
+-			     uint32_t dst_stride, uint32_t dst_format,
+-			     uint16_t src_x, uint16_t src_y,
+-			     uint16_t dst_x, uint16_t dst_y,
+-			     uint16_t size_x, uint16_t size_y)
+-{
+-	uint32_t blit_cmd;
+-	uint32_t buffer[10];
+-	uint32_t *buf;
+-	uint32_t direction;
+-
+-	buf = buffer;
+-
+-	direction =
+-	    psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
+-
+-	if (direction == PSB_2D_COPYORDER_BR2TL ||
+-	    direction == PSB_2D_COPYORDER_TR2BL) {
+-		src_x += size_x - 1;
+-		dst_x += size_x - 1;
+-	}
+-	if (direction == PSB_2D_COPYORDER_BR2TL ||
+-	    direction == PSB_2D_COPYORDER_BL2TR) {
+-		src_y += size_y - 1;
+-		dst_y += size_y - 1;
+-	}
+-
+-	blit_cmd =
+-	    PSB_2D_BLIT_BH |
+-	    PSB_2D_ROT_NONE |
+-	    PSB_2D_DSTCK_DISABLE |
+-	    PSB_2D_SRCCK_DISABLE |
+-	    PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
+-
+-	*buf++ = PSB_2D_FENCE_BH;
+-	*buf++ =
+-	    PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
+-					       PSB_2D_DST_STRIDE_SHIFT);
+-	*buf++ = dst_offset;
+-	*buf++ =
+-	    PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
+-					       PSB_2D_SRC_STRIDE_SHIFT);
+-	*buf++ = src_offset;
+-	*buf++ =
+-	    PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
+-	    (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
+-	*buf++ = blit_cmd;
+-	*buf++ =
+-	    (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
+-						  PSB_2D_DST_YSTART_SHIFT);
+-	*buf++ =
+-	    (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
+-						  PSB_2D_DST_YSIZE_SHIFT);
+-	*buf++ = PSB_2D_FLUSH_BH;
+-
+-	return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
+-}
+-
+-/**
+- *	psbfb_copyarea_accel	-	copyarea acceleration for /dev/fb
+- *	@info: our framebuffer
+- *	@a: copyarea parameters from the framebuffer core
+- *
+- *	Perform a 2D copy via the accelerator
+- */
+-static void psbfb_copyarea_accel(struct fb_info *info,
+-				 const struct fb_copyarea *a)
+-{
+-	struct psb_fbdev *fbdev = info->par;
+-	struct psb_framebuffer *psbfb = &fbdev->pfb;
+-	struct drm_device *dev = psbfb->base.dev;
+-	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	uint32_t offset;
+-	uint32_t stride;
+-	uint32_t src_format;
+-	uint32_t dst_format;
+-
+-	if (!fb)
+-		return;
+-
+-	offset = psbfb->gtt->offset;
+-	stride = fb->pitch;
+-
+-	switch (fb->depth) {
+-	case 8:
+-		src_format = PSB_2D_SRC_332RGB;
+-		dst_format = PSB_2D_DST_332RGB;
+-		break;
+-	case 15:
+-		src_format = PSB_2D_SRC_555RGB;
+-		dst_format = PSB_2D_DST_555RGB;
+-		break;
+-	case 16:
+-		src_format = PSB_2D_SRC_565RGB;
+-		dst_format = PSB_2D_DST_565RGB;
+-		break;
+-	case 24:
+-	case 32:
+-		/* this is wrong but since we don't do blending its okay */
+-		src_format = PSB_2D_SRC_8888ARGB;
+-		dst_format = PSB_2D_DST_8888ARGB;
+-		break;
+-	default:
+-		/* software fallback */
+-		cfb_copyarea(info, a);
+-		return;
+-	}
+-
+-	if (!gma_power_begin(dev, false)) {
+-		cfb_copyarea(info, a);
+-		return;
+-	}
+-	psb_accel_2d_copy(dev_priv,
+-			  offset, stride, src_format,
+-			  offset, stride, dst_format,
+-			  a->sx, a->sy, a->dx, a->dy, a->width, a->height);
+-	gma_power_end(dev);
+-}
+-
+-/**
+- *	psbfb_copyarea	-	2D copy interface
+- *	@info: our framebuffer
+- *	@region: region to copy
+- *
+- *	Copy an area of the framebuffer console either by the accelerator
+- *	or directly using the cfb helpers according to the request
+- */
+-void psbfb_copyarea(struct fb_info *info,
+-			   const struct fb_copyarea *region)
+-{
+-	if (unlikely(info->state != FBINFO_STATE_RUNNING))
+-		return;
+-
+-	/* Avoid the 8 pixel erratum */
+-	if (region->width == 8 || region->height == 8 ||
+-		(info->flags & FBINFO_HWACCEL_DISABLED))
+-		return cfb_copyarea(info, region);
+-
+-	psbfb_copyarea_accel(info, region);
+-}
+-
+-/**
+- *	psbfb_sync	-	synchronize 2D
+- *	@info: our framebuffer
+- *
+- *	Wait for the 2D engine to quiesce so that we can do CPU
+- *	access to the framebuffer again
+- */
+-int psbfb_sync(struct fb_info *info)
+-{
+-	struct psb_fbdev *fbdev = info->par;
+-	struct psb_framebuffer *psbfb = &fbdev->pfb;
+-	struct drm_device *dev = psbfb->base.dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned long _end = jiffies + DRM_HZ;
+-	int busy = 0;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dev_priv->lock_2d, flags);
+-	/*
+-	 * First idle the 2D engine.
+-	 */
+-
+-	if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
+-	    ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
+-		goto out;
+-
+-	do {
+-		busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+-		cpu_relax();
+-	} while (busy && !time_after_eq(jiffies, _end));
+-
+-	if (busy)
+-		busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
+-	if (busy)
+-		goto out;
+-
+-	do {
+-		busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+-						_PSB_C2B_STATUS_BUSY) != 0);
+-		cpu_relax();
+-	} while (busy && !time_after_eq(jiffies, _end));
+-	if (busy)
+-		busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
+-					_PSB_C2B_STATUS_BUSY) != 0);
+-
+-out:
+-	spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
+-	return (busy) ? -EBUSY : 0;
+-}
+-
+-int psb_accel_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct drm_psb_2d_op *op = data;
+-	u32 *op_ptr = &op->cmd[0];
+-	int i;
+-	struct drm_gem_object *obj;
+-	struct gtt_range *gtt;
+-	int err = -EINVAL;
+-
+-	if (!dev_priv->ops->accel_2d)
+-		return -EOPNOTSUPP;
+-	if (op->size > PSB_2D_OP_BUFLEN)
+-		return -EINVAL;
+-
+-	/* The GEM object being used. We need to support separate src/dst/etc
+-	   in the end but for now keep them all the same */
+-	obj = drm_gem_object_lookup(dev, file, op->src);
+-	if (obj == NULL)
+-		return -ENOENT;
+-	gtt = container_of(obj, struct gtt_range, gem);
+-
+-	if (psb_gtt_pin(gtt) < 0)
+-		goto bad_2;
+-	for (i = 0; i < op->size; i++, op_ptr++) {
+-		u32 r = *op_ptr & 0xF0000000;
+-		/* Fill in the GTT offsets for the command buffer */
+-		if (r == PSB_2D_SRC_SURF_BH ||
+-			r == PSB_2D_DST_SURF_BH ||
+-			r == PSB_2D_MASK_SURF_BH ||
+-			r == PSB_2D_PAT_SURF_BH) {
+-			i++;
+-			op_ptr++;
+-			if (i == op->size)
+-				goto bad;
+-			if (*op_ptr)
+-				goto bad;
+-			*op_ptr = gtt->offset;
+-			continue;
+-		}
+-	}
+-	psbfb_2d_submit(dev_priv, op->cmd, op->size);
+-	err = 0;
+-bad:
+-	psb_gtt_unpin(gtt);
+-bad_2:
+-	drm_gem_object_unreference(obj);
+-	return err;
+-}
+diff --git a/drivers/staging/gma500/backlight.c b/drivers/staging/gma500/backlight.c
+deleted file mode 100644
+index 2079395..0000000
+--- a/drivers/staging/gma500/backlight.c
++++ /dev/null
+@@ -1,49 +0,0 @@
+-/*
+- * GMA500 Backlight Interface
+- *
+- * Copyright (c) 2009-2011, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors: Eric Knopp
+- *
+- */
+-
+-#include "psb_drv.h"
+-#include "psb_intel_reg.h"
+-#include "psb_intel_drv.h"
+-#include "intel_bios.h"
+-#include "power.h"
+-
+-int gma_backlight_init(struct drm_device *dev)
+-{
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	return dev_priv->ops->backlight_init(dev);
+-#else
+-	return 0;
+-#endif
+-}
+-
+-void gma_backlight_exit(struct drm_device *dev)
+-{
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	if (dev_priv->backlight_device) {
+-		dev_priv->backlight_device->props.brightness = 0;
+-		backlight_update_status(dev_priv->backlight_device);
+-		backlight_device_unregister(dev_priv->backlight_device);
+-	}
+-#endif
+-}
+diff --git a/drivers/staging/gma500/cdv_device.c b/drivers/staging/gma500/cdv_device.c
+deleted file mode 100644
+index 8ec10ca..0000000
+--- a/drivers/staging/gma500/cdv_device.c
++++ /dev/null
+@@ -1,350 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-#include <linux/backlight.h>
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include "psb_drm.h"
+-#include "psb_drv.h"
+-#include "psb_reg.h"
+-#include "psb_intel_reg.h"
+-#include "intel_bios.h"
+-#include "cdv_device.h"
+-
+-#define VGA_SR_INDEX		0x3c4
+-#define VGA_SR_DATA		0x3c5
+-
+-static void cdv_disable_vga(struct drm_device *dev)
+-{
+-	u8 sr1;
+-	u32 vga_reg;
+-
+-	vga_reg = VGACNTRL;
+-
+-	outb(1, VGA_SR_INDEX);
+-	sr1 = inb(VGA_SR_DATA);
+-	outb(sr1 | 1<<5, VGA_SR_DATA);
+-	udelay(300);
+-
+-	REG_WRITE(vga_reg, VGA_DISP_DISABLE);
+-	REG_READ(vga_reg);
+-}
+-
+-static int cdv_output_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	cdv_disable_vga(dev);
+-
+-	cdv_intel_crt_init(dev, &dev_priv->mode_dev);
+-	cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
+-
+-	/* These bits indicate HDMI not SDVO on CDV, but we don't yet support
+-	   the HDMI interface */
+-	if (REG_READ(SDVOB) & SDVO_DETECTED)
+-		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
+-	if (REG_READ(SDVOC) & SDVO_DETECTED)
+-		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
+-	return 0;
+-}
+-
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-
+-/*
+- *	Poulsbo Backlight Interfaces
+- */
+-
+-#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
+-#define BLC_PWM_FREQ_CALC_CONSTANT 32
+-#define MHz 1000000
+-
+-#define PSB_BLC_PWM_PRECISION_FACTOR    10
+-#define PSB_BLC_MAX_PWM_REG_FREQ        0xFFFE
+-#define PSB_BLC_MIN_PWM_REG_FREQ        0x2
+-
+-#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+-#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
+-
+-static int cdv_brightness;
+-static struct backlight_device *cdv_backlight_device;
+-
+-static int cdv_get_brightness(struct backlight_device *bd)
+-{
+-	/* return locally cached var instead of HW read (due to DPST etc.) */
+-	/* FIXME: ideally return actual value in case firmware fiddled with
+-	   it */
+-	return cdv_brightness;
+-}
+-
+-
+-static int cdv_backlight_setup(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned long core_clock;
+-	/* u32 bl_max_freq; */
+-	/* unsigned long value; */
+-	u16 bl_max_freq;
+-	uint32_t value;
+-	uint32_t blc_pwm_precision_factor;
+-
+-	/* get bl_max_freq and pol from dev_priv*/
+-	if (!dev_priv->lvds_bl) {
+-		dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+-		return -ENOENT;
+-	}
+-	bl_max_freq = dev_priv->lvds_bl->freq;
+-	blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+-
+-	core_clock = dev_priv->core_freq;
+-
+-	value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+-	value *= blc_pwm_precision_factor;
+-	value /= bl_max_freq;
+-	value /= blc_pwm_precision_factor;
+-
+-	if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+-		 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+-				return -ERANGE;
+-	else {
+-		/* FIXME */
+-	}
+-	return 0;
+-}
+-
+-static int cdv_set_brightness(struct backlight_device *bd)
+-{
+-	int level = bd->props.brightness;
+-
+-	/* Percentage 1-100% being valid */
+-	if (level < 1)
+-		level = 1;
+-
+-	/*cdv_intel_lvds_set_brightness(dev, level); FIXME */
+-	cdv_brightness = level;
+-	return 0;
+-}
+-
+-static const struct backlight_ops cdv_ops = {
+-	.get_brightness = cdv_get_brightness,
+-	.update_status  = cdv_set_brightness,
+-};
+-
+-static int cdv_backlight_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int ret;
+-	struct backlight_properties props;
+-
+-	memset(&props, 0, sizeof(struct backlight_properties));
+-	props.max_brightness = 100;
+-	props.type = BACKLIGHT_PLATFORM;
+-
+-	cdv_backlight_device = backlight_device_register("psb-bl",
+-					NULL, (void *)dev, &cdv_ops, &props);
+-	if (IS_ERR(cdv_backlight_device))
+-		return PTR_ERR(cdv_backlight_device);
+-
+-	ret = cdv_backlight_setup(dev);
+-	if (ret < 0) {
+-		backlight_device_unregister(cdv_backlight_device);
+-		cdv_backlight_device = NULL;
+-		return ret;
+-	}
+-	cdv_backlight_device->props.brightness = 100;
+-	cdv_backlight_device->props.max_brightness = 100;
+-	backlight_update_status(cdv_backlight_device);
+-	dev_priv->backlight_device = cdv_backlight_device;
+-	return 0;
+-}
+-
+-#endif
+-
+-/*
+- *	Provide the Cedarview specific chip logic and low level methods
+- *	for power management
+- *
+- *	FIXME: we need to implement the apm/ospm base management bits
+- *	for this and the MID devices.
+- */
+-
+-static inline u32 CDV_MSG_READ32(uint port, uint offset)
+-{
+-	int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+-	uint32_t ret_val = 0;
+-	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+-	pci_write_config_dword(pci_root, 0xD0, mcr);
+-	pci_read_config_dword(pci_root, 0xD4, &ret_val);
+-	pci_dev_put(pci_root);
+-	return ret_val;
+-}
+-
+-static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
+-{
+-	int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+-	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+-	pci_write_config_dword(pci_root, 0xD4, value);
+-	pci_write_config_dword(pci_root, 0xD0, mcr);
+-	pci_dev_put(pci_root);
+-}
+-
+-#define PSB_APM_CMD			0x0
+-#define PSB_APM_STS			0x04
+-#define PSB_PM_SSC			0x20
+-#define PSB_PM_SSS			0x30
+-#define PSB_PWRGT_GFX_MASK		0x3
+-#define CDV_PWRGT_DISPLAY_CNTR		0x000fc00c
+-#define CDV_PWRGT_DISPLAY_STS		0x000fc00c
+-
+-static void cdv_init_pm(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 pwr_cnt;
+-	int i;
+-
+-	dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+-							PSB_APMBA) & 0xFFFF;
+-	dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
+-							PSB_OSPMBA) & 0xFFFF;
+-
+-	/* Force power on for now */
+-	pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
+-	pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
+-
+-	outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
+-	for (i = 0; i < 5; i++) {
+-		u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
+-		if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
+-			break;
+-		udelay(10);
+-	}
+-	pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+-	pwr_cnt &= ~CDV_PWRGT_DISPLAY_CNTR;
+-	outl(pwr_cnt, dev_priv->ospm_base + PSB_PM_SSC);
+-	for (i = 0; i < 5; i++) {
+-		u32 pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+-		if ((pwr_sts & CDV_PWRGT_DISPLAY_STS) == 0)
+-			break;
+-		udelay(10);
+-	}
+-}
+-
+-/**
+- *	cdv_save_display_registers	-	save registers lost on suspend
+- *	@dev: our DRM device
+- *
+- *	Save the state we need in order to be able to restore the interface
+- *	upon resume from suspend
+- *
+- *	FIXME: review
+- */
+-static int cdv_save_display_registers(struct drm_device *dev)
+-{
+-	return 0;
+-}
+-
+-/**
+- *	cdv_restore_display_registers	-	restore lost register state
+- *	@dev: our DRM device
+- *
+- *	Restore register state that was lost during suspend and resume.
+- *
+- *	FIXME: review
+- */
+-static int cdv_restore_display_registers(struct drm_device *dev)
+-{
+-	return 0;
+-}
+-
+-static int cdv_power_down(struct drm_device *dev)
+-{
+-	return 0;
+-}
+-
+-static int cdv_power_up(struct drm_device *dev)
+-{
+-	return 0;
+-}
+-
+-/* FIXME ? - shared with Poulsbo */
+-static void cdv_get_core_freq(struct drm_device *dev)
+-{
+-	uint32_t clock;
+-	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+-	pci_read_config_dword(pci_root, 0xD4, &clock);
+-	pci_dev_put(pci_root);
+-
+-	switch (clock & 0x07) {
+-	case 0:
+-		dev_priv->core_freq = 100;
+-		break;
+-	case 1:
+-		dev_priv->core_freq = 133;
+-		break;
+-	case 2:
+-		dev_priv->core_freq = 150;
+-		break;
+-	case 3:
+-		dev_priv->core_freq = 178;
+-		break;
+-	case 4:
+-		dev_priv->core_freq = 200;
+-		break;
+-	case 5:
+-	case 6:
+-	case 7:
+-		dev_priv->core_freq = 266;
+-	default:
+-		dev_priv->core_freq = 0;
+-	}
+-}
+-
+-static int cdv_chip_setup(struct drm_device *dev)
+-{
+-	cdv_get_core_freq(dev);
+-	gma_intel_opregion_init(dev);
+-	psb_intel_init_bios(dev);
+-	return 0;
+-}
+-
+-/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
+-
+-const struct psb_ops cdv_chip_ops = {
+-	.name = "Cedartrail",
+-	.accel_2d = 0,
+-	.pipes = 2,
+-	.sgx_offset = MRST_SGX_OFFSET,
+-	.chip_setup = cdv_chip_setup,
+-
+-	.crtc_helper = &cdv_intel_helper_funcs,
+-	.crtc_funcs = &cdv_intel_crtc_funcs,
+-
+-	.output_init = cdv_output_init,
+-
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	.backlight_init = cdv_backlight_init,
+-#endif
+-
+-	.init_pm = cdv_init_pm,
+-	.save_regs = cdv_save_display_registers,
+-	.restore_regs = cdv_restore_display_registers,
+-	.power_down = cdv_power_down,
+-	.power_up = cdv_power_up,
+-};
+diff --git a/drivers/staging/gma500/cdv_device.h b/drivers/staging/gma500/cdv_device.h
+deleted file mode 100644
+index 2a88b7b..0000000
+--- a/drivers/staging/gma500/cdv_device.h
++++ /dev/null
+@@ -1,36 +0,0 @@
+-/*
+- * Copyright © 2011 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc., 
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- */
+-
+-extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
+-extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+-extern void cdv_intel_crt_init(struct drm_device *dev,
+-			struct psb_intel_mode_device *mode_dev);
+-extern void cdv_intel_lvds_init(struct drm_device *dev,
+-			struct psb_intel_mode_device *mode_dev);
+-extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
+-			int reg);
+-extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+-					     struct drm_crtc *crtc);
+-
+-extern inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
+-{
+-	/* Wait for 20ms, i.e. one cycle at 50hz. */
+-        /* FIXME: msleep ?? */
+-	mdelay(20);
+-}
+-
+-
+diff --git a/drivers/staging/gma500/cdv_intel_crt.c b/drivers/staging/gma500/cdv_intel_crt.c
+deleted file mode 100644
+index efda63b..0000000
+--- a/drivers/staging/gma500/cdv_intel_crt.c
++++ /dev/null
+@@ -1,326 +0,0 @@
+-/*
+- * Copyright © 2006-2007 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- *	Eric Anholt <eric at anholt.net>
+- */
+-
+-#include <linux/i2c.h>
+-#include <drm/drmP.h>
+-
+-#include "intel_bios.h"
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "power.h"
+-#include <linux/pm_runtime.h>
+-
+-
+-static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	u32 temp, reg;
+-	reg = ADPA;
+-
+-	temp = REG_READ(reg);
+-	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+-	temp &= ~ADPA_DAC_ENABLE;
+-
+-	switch (mode) {
+-	case DRM_MODE_DPMS_ON:
+-		temp |= ADPA_DAC_ENABLE;
+-		break;
+-	case DRM_MODE_DPMS_STANDBY:
+-		temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
+-		break;
+-	case DRM_MODE_DPMS_SUSPEND:
+-		temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
+-		break;
+-	case DRM_MODE_DPMS_OFF:
+-		temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+-		break;
+-	}
+-
+-	REG_WRITE(reg, temp);
+-}
+-
+-static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
+-				struct drm_display_mode *mode)
+-{
+-	int max_clock = 0;
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+-		return MODE_NO_DBLESCAN;
+-
+-	/* The lowest clock for CDV is 20000KHz */
+-	if (mode->clock < 20000)
+-		return MODE_CLOCK_LOW;
+-
+-	/* The max clock for CDV is 355 instead of 400 */
+-	max_clock = 355000;
+-	if (mode->clock > max_clock)
+-		return MODE_CLOCK_HIGH;
+-
+-	if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
+-		return MODE_PANEL;
+-
+-	return MODE_OK;
+-}
+-
+-static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
+-				 struct drm_display_mode *mode,
+-				 struct drm_display_mode *adjusted_mode)
+-{
+-	return true;
+-}
+-
+-static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
+-			       struct drm_display_mode *mode,
+-			       struct drm_display_mode *adjusted_mode)
+-{
+-
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_crtc *crtc = encoder->crtc;
+-	struct psb_intel_crtc *psb_intel_crtc =
+-					to_psb_intel_crtc(crtc);
+-	int dpll_md_reg;
+-	u32 adpa, dpll_md;
+-	u32 adpa_reg;
+-
+-	if (psb_intel_crtc->pipe == 0)
+-		dpll_md_reg = DPLL_A_MD;
+-	else
+-		dpll_md_reg = DPLL_B_MD;
+-
+-	adpa_reg = ADPA;
+-
+-	/*
+-	 * Disable separate mode multiplier used when cloning SDVO to CRT
+-	 * XXX this needs to be adjusted when we really are cloning
+-	 */
+-	{
+-		dpll_md = REG_READ(dpll_md_reg);
+-		REG_WRITE(dpll_md_reg,
+-			   dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+-	}
+-
+-	adpa = 0;
+-	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+-		adpa |= ADPA_HSYNC_ACTIVE_HIGH;
+-	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+-		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
+-
+-	if (psb_intel_crtc->pipe == 0)
+-		adpa |= ADPA_PIPE_A_SELECT;
+-	else
+-		adpa |= ADPA_PIPE_B_SELECT;
+-
+-	REG_WRITE(adpa_reg, adpa);
+-}
+-
+-
+-/**
+- * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
+- *
+- * \return true if CRT is connected.
+- * \return false if CRT is disconnected.
+- */
+-static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
+-								bool force)
+-{
+-	struct drm_device *dev = connector->dev;
+-	u32 hotplug_en;
+-	int i, tries = 0, ret = false;
+-	u32 adpa_orig;
+-
+-	/* disable the DAC when doing the hotplug detection */
+-
+-	adpa_orig = REG_READ(ADPA);
+-
+-	REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
+-
+-	/*
+-	 * On a CDV thep, CRT detect sequence need to be done twice
+-	 * to get a reliable result.
+-	 */
+-	tries = 2;
+-
+-	hotplug_en = REG_READ(PORT_HOTPLUG_EN);
+-	hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
+-	hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
+-
+-	hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+-	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+-
+-	for (i = 0; i < tries ; i++) {
+-		unsigned long timeout;
+-		/* turn on the FORCE_DETECT */
+-		REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
+-		timeout = jiffies + msecs_to_jiffies(1000);
+-		/* wait for FORCE_DETECT to go off */
+-		do {
+-			if (!(REG_READ(PORT_HOTPLUG_EN) &
+-					CRT_HOTPLUG_FORCE_DETECT))
+-				break;
+-			msleep(1);
+-		} while (time_after(timeout, jiffies));
+-	}
+-
+-	if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
+-	    CRT_HOTPLUG_MONITOR_NONE)
+-		ret = true;
+-
+-	/* Restore the saved ADPA */
+-	REG_WRITE(ADPA, adpa_orig);
+-	return ret;
+-}
+-
+-static enum drm_connector_status cdv_intel_crt_detect(
+-				struct drm_connector *connector, bool force)
+-{
+-	if (cdv_intel_crt_detect_hotplug(connector, force))
+-		return connector_status_connected;
+-	else
+-		return connector_status_disconnected;
+-}
+-
+-static void cdv_intel_crt_destroy(struct drm_connector *connector)
+-{
+-	struct psb_intel_output *intel_output = to_psb_intel_output(connector);
+-
+-	psb_intel_i2c_destroy(intel_output->ddc_bus);
+-	drm_sysfs_connector_remove(connector);
+-	drm_connector_cleanup(connector);
+-	kfree(connector);
+-}
+-
+-static int cdv_intel_crt_get_modes(struct drm_connector *connector)
+-{
+-	struct psb_intel_output *intel_output =
+-				to_psb_intel_output(connector);
+-	return psb_intel_ddc_get_modes(intel_output);
+-}
+-
+-static int cdv_intel_crt_set_property(struct drm_connector *connector,
+-				  struct drm_property *property,
+-				  uint64_t value)
+-{
+-	return 0;
+-}
+-
+-/*
+- * Routines for controlling stuff on the analog port
+- */
+-
+-static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
+-	.dpms = cdv_intel_crt_dpms,
+-	.mode_fixup = cdv_intel_crt_mode_fixup,
+-	.prepare = psb_intel_encoder_prepare,
+-	.commit = psb_intel_encoder_commit,
+-	.mode_set = cdv_intel_crt_mode_set,
+-};
+-
+-static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
+-	.dpms = drm_helper_connector_dpms,
+-	.detect = cdv_intel_crt_detect,
+-	.fill_modes = drm_helper_probe_single_connector_modes,
+-	.destroy = cdv_intel_crt_destroy,
+-	.set_property = cdv_intel_crt_set_property,
+-};
+-
+-static const struct drm_connector_helper_funcs
+-				cdv_intel_crt_connector_helper_funcs = {
+-	.mode_valid = cdv_intel_crt_mode_valid,
+-	.get_modes = cdv_intel_crt_get_modes,
+-	.best_encoder = psb_intel_best_encoder,
+-};
+-
+-static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
+-{
+-	drm_encoder_cleanup(encoder);
+-}
+-
+-static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
+-	.destroy = cdv_intel_crt_enc_destroy,
+-};
+-
+-void cdv_intel_crt_init(struct drm_device *dev,
+-			struct psb_intel_mode_device *mode_dev)
+-{
+-
+-	struct psb_intel_output *psb_intel_output;
+-	struct drm_connector *connector;
+-	struct drm_encoder *encoder;
+-
+-	u32 i2c_reg;
+-
+-	psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
+-	if (!psb_intel_output)
+-		return;
+-
+-	psb_intel_output->mode_dev = mode_dev;
+-	connector = &psb_intel_output->base;
+-	drm_connector_init(dev, connector,
+-		&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+-
+-	encoder = &psb_intel_output->enc;
+-	drm_encoder_init(dev, encoder,
+-		&cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
+-
+-	drm_mode_connector_attach_encoder(&psb_intel_output->base,
+-					  &psb_intel_output->enc);
+-
+-	/* Set up the DDC bus. */
+-	i2c_reg = GPIOA;
+-	/* Remove the following code for CDV */
+-	/*
+-	if (dev_priv->crt_ddc_bus != 0)
+-		i2c_reg = dev_priv->crt_ddc_bus;
+-	}*/
+-	psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
+-						i2c_reg, "CRTDDC_A");
+-	if (!psb_intel_output->ddc_bus) {
+-		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
+-			   "failed.\n");
+-		goto failed_ddc;
+-	}
+-
+-	psb_intel_output->type = INTEL_OUTPUT_ANALOG;
+-	/*
+-	psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
+-	psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
+-	*/
+-	connector->interlace_allowed = 0;
+-	connector->doublescan_allowed = 0;
+-
+-	drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
+-	drm_connector_helper_add(connector,
+-					&cdv_intel_crt_connector_helper_funcs);
+-
+-	drm_sysfs_connector_add(connector);
+-
+-	return;
+-failed_ddc:
+-	drm_encoder_cleanup(&psb_intel_output->enc);
+-	drm_connector_cleanup(&psb_intel_output->base);
+-	kfree(psb_intel_output);
+-	return;
+-}
+diff --git a/drivers/staging/gma500/cdv_intel_display.c b/drivers/staging/gma500/cdv_intel_display.c
+deleted file mode 100644
+index 7b97c60..0000000
+--- a/drivers/staging/gma500/cdv_intel_display.c
++++ /dev/null
+@@ -1,1508 +0,0 @@
+-/*
+- * Copyright © 2006-2011 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *	Eric Anholt <eric at anholt.net>
+- */
+-
+-#include <linux/i2c.h>
+-#include <linux/pm_runtime.h>
+-
+-#include <drm/drmP.h>
+-#include "framebuffer.h"
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "psb_intel_display.h"
+-#include "power.h"
+-#include "cdv_device.h"
+-
+-
+-struct cdv_intel_range_t {
+-	int min, max;
+-};
+-
+-struct cdv_intel_p2_t {
+-	int dot_limit;
+-	int p2_slow, p2_fast;
+-};
+-
+-struct cdv_intel_clock_t {
+-	/* given values */
+-	int n;
+-	int m1, m2;
+-	int p1, p2;
+-	/* derived values */
+-	int dot;
+-	int vco;
+-	int m;
+-	int p;
+-};
+-
+-#define INTEL_P2_NUM		      2
+-
+-struct cdv_intel_limit_t {
+-	struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+-	struct cdv_intel_p2_t p2;
+-};
+-
+-#define CDV_LIMIT_SINGLE_LVDS_96	0
+-#define CDV_LIMIT_SINGLE_LVDS_100	1
+-#define CDV_LIMIT_DAC_HDMI_27		2
+-#define CDV_LIMIT_DAC_HDMI_96		3
+-
+-static const struct cdv_intel_limit_t cdv_intel_limits[] = {
+-	{			/* CDV_SIGNLE_LVDS_96MHz */
+-	 .dot = {.min = 20000, .max = 115500},
+-	 .vco = {.min = 1800000, .max = 3600000},
+-	 .n = {.min = 2, .max = 6},
+-	 .m = {.min = 60, .max = 160},
+-	 .m1 = {.min = 0, .max = 0},
+-	 .m2 = {.min = 58, .max = 158},
+-	 .p = {.min = 28, .max = 140},
+-	 .p1 = {.min = 2, .max = 10},
+-	 .p2 = {.dot_limit = 200000,
+-		.p2_slow = 14, .p2_fast = 14},
+-	 },
+-	{			/* CDV_SINGLE_LVDS_100MHz */
+-	 .dot = {.min = 20000, .max = 115500},
+-	 .vco = {.min = 1800000, .max = 3600000},
+-	 .n = {.min = 2, .max = 6},
+-	 .m = {.min = 60, .max = 160},
+-	 .m1 = {.min = 0, .max = 0},
+-	 .m2 = {.min = 58, .max = 158},
+-	 .p = {.min = 28, .max = 140},
+-	 .p1 = {.min = 2, .max = 10},
+-	 /* The single-channel range is 25-112Mhz, and dual-channel
+-	  * is 80-224Mhz.  Prefer single channel as much as possible.
+-	  */
+-	 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+-	 },
+-	{			/* CDV_DAC_HDMI_27MHz */
+-	 .dot = {.min = 20000, .max = 400000},
+-	 .vco = {.min = 1809000, .max = 3564000},
+-	 .n = {.min = 1, .max = 1},
+-	 .m = {.min = 67, .max = 132},
+-	 .m1 = {.min = 0, .max = 0},
+-	 .m2 = {.min = 65, .max = 130},
+-	 .p = {.min = 5, .max = 90},
+-	 .p1 = {.min = 1, .max = 9},
+-	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+-	 },
+-	{			/* CDV_DAC_HDMI_96MHz */
+-	 .dot = {.min = 20000, .max = 400000},
+-	 .vco = {.min = 1800000, .max = 3600000},
+-	 .n = {.min = 2, .max = 6},
+-	 .m = {.min = 60, .max = 160},
+-	 .m1 = {.min = 0, .max = 0},
+-	 .m2 = {.min = 58, .max = 158},
+-	 .p = {.min = 5, .max = 100},
+-	 .p1 = {.min = 1, .max = 10},
+-	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
+-	 },
+-};
+-
+-#define _wait_for(COND, MS, W) ({ \
+-	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
+-	int ret__ = 0;							\
+-	while (!(COND)) {						\
+-		if (time_after(jiffies, timeout__)) {			\
+-			ret__ = -ETIMEDOUT;				\
+-			break;						\
+-		}							\
+-		if (W && !in_dbg_master())				\
+-			msleep(W);					\
+-	}								\
+-	ret__;								\
+-})
+-
+-#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+-
+-
+-static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
+-{
+-	int ret;
+-
+-	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+-	if (ret) {
+-		DRM_ERROR("timeout waiting for SB to idle before read\n");
+-		return ret;
+-	}
+-
+-	REG_WRITE(SB_ADDR, reg);
+-	REG_WRITE(SB_PCKT,
+-		   SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
+-		   SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+-		   SET_FIELD(0xf, SB_BYTE_ENABLE));
+-
+-	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+-	if (ret) {
+-		DRM_ERROR("timeout waiting for SB to idle after read\n");
+-		return ret;
+-	}
+-
+-	*val = REG_READ(SB_DATA);
+-
+-	return 0;
+-}
+-
+-static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
+-{
+-	int ret;
+-	static bool dpio_debug = true;
+-	u32 temp;
+-
+-	if (dpio_debug) {
+-		if (cdv_sb_read(dev, reg, &temp) == 0)
+-			DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
+-		DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
+-	}
+-
+-	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+-	if (ret) {
+-		DRM_ERROR("timeout waiting for SB to idle before write\n");
+-		return ret;
+-	}
+-
+-	REG_WRITE(SB_ADDR, reg);
+-	REG_WRITE(SB_DATA, val);
+-	REG_WRITE(SB_PCKT,
+-		   SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
+-		   SET_FIELD(SB_DEST_DPLL, SB_DEST) |
+-		   SET_FIELD(0xf, SB_BYTE_ENABLE));
+-
+-	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
+-	if (ret) {
+-		DRM_ERROR("timeout waiting for SB to idle after write\n");
+-		return ret;
+-	}
+-
+-	if (dpio_debug) {
+-		if (cdv_sb_read(dev, reg, &temp) == 0)
+-			DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
+-	}
+-
+-	return 0;
+-}
+-
+-/* Reset the DPIO configuration register.  The BIOS does this at every
+- * mode set.
+- */
+-static void cdv_sb_reset(struct drm_device *dev)
+-{
+-
+-	REG_WRITE(DPIO_CFG, 0);
+-	REG_READ(DPIO_CFG);
+-	REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
+-}
+-
+-/* Unlike most Intel display engines, on Cedarview the DPLL registers
+- * are behind this sideband bus.  They must be programmed while the
+- * DPLL reference clock is on in the DPLL control register, but before
+- * the DPLL is enabled in the DPLL control register.
+- */
+-static int
+-cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
+-			       struct cdv_intel_clock_t *clock)
+-{
+-	struct psb_intel_crtc *psb_crtc =
+-				to_psb_intel_crtc(crtc);
+-	int pipe = psb_crtc->pipe;
+-	u32 m, n_vco, p;
+-	int ret = 0;
+-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+-	u32 ref_value;
+-
+-	cdv_sb_reset(dev);
+-
+-	if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
+-		DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
+-		return -EBUSY;
+-	}
+-
+-	/* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
+-	ref_value = 0x68A701;
+-
+-	cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
+-
+-	/* We don't know what the other fields of these regs are, so
+-	 * leave them in place.
+-	 */
+-	ret = cdv_sb_read(dev, SB_M(pipe), &m);
+-	if (ret)
+-		return ret;
+-	m &= ~SB_M_DIVIDER_MASK;
+-	m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
+-	ret = cdv_sb_write(dev, SB_M(pipe), m);
+-	if (ret)
+-		return ret;
+-
+-	ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
+-	if (ret)
+-		return ret;
+-
+-	/* Follow the BIOS to program the N_DIVIDER REG */
+-	n_vco &= 0xFFFF;
+-	n_vco |= 0x107;
+-	n_vco &= ~(SB_N_VCO_SEL_MASK |
+-		   SB_N_DIVIDER_MASK |
+-		   SB_N_CB_TUNE_MASK);
+-
+-	n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
+-
+-	if (clock->vco < 2250000) {
+-		n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
+-		n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
+-	} else if (clock->vco < 2750000) {
+-		n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
+-		n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
+-	} else if (clock->vco < 3300000) {
+-		n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+-		n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
+-	} else {
+-		n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
+-		n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
+-	}
+-
+-	ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
+-	if (ret)
+-		return ret;
+-
+-	ret = cdv_sb_read(dev, SB_P(pipe), &p);
+-	if (ret)
+-		return ret;
+-	p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
+-	p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
+-	switch (clock->p2) {
+-	case 5:
+-		p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
+-		break;
+-	case 10:
+-		p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
+-		break;
+-	case 14:
+-		p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
+-		break;
+-	case 7:
+-		p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
+-		break;
+-	default:
+-		DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
+-		return -EINVAL;
+-	}
+-	ret = cdv_sb_write(dev, SB_P(pipe), p);
+-	if (ret)
+-		return ret;
+-
+-	/* always Program the Lane Register for the Pipe A*/
+-	if (pipe == 0) {
+-		/* Program the Lane0/1 for HDMI B */
+-		u32 lane_reg, lane_value;
+-
+-		lane_reg = PSB_LANE0;
+-		cdv_sb_read(dev, lane_reg, &lane_value);
+-		lane_value &= ~(LANE_PLL_MASK);
+-		lane_value |= LANE_PLL_ENABLE;
+-		cdv_sb_write(dev, lane_reg, lane_value);
+-
+-		lane_reg = PSB_LANE1;
+-		cdv_sb_read(dev, lane_reg, &lane_value);
+-		lane_value &= ~(LANE_PLL_MASK);
+-		lane_value |= LANE_PLL_ENABLE;
+-		cdv_sb_write(dev, lane_reg, lane_value);
+-
+-		/* Program the Lane2/3 for HDMI C */
+-		lane_reg = PSB_LANE2;
+-		cdv_sb_read(dev, lane_reg, &lane_value);
+-		lane_value &= ~(LANE_PLL_MASK);
+-		lane_value |= LANE_PLL_ENABLE;
+-		cdv_sb_write(dev, lane_reg, lane_value);
+-
+-		lane_reg = PSB_LANE3;
+-		cdv_sb_read(dev, lane_reg, &lane_value);
+-		lane_value &= ~(LANE_PLL_MASK);
+-		lane_value |= LANE_PLL_ENABLE;
+-		cdv_sb_write(dev, lane_reg, lane_value);
+-	}
+-
+-	return 0;
+-}
+-
+-/*
+- * Returns whether any output on the specified pipe is of the specified type
+- */
+-bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_mode_config *mode_config = &dev->mode_config;
+-	struct drm_connector *l_entry;
+-
+-	list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+-		if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+-			struct psb_intel_output *psb_intel_output =
+-			    to_psb_intel_output(l_entry);
+-			if (psb_intel_output->type == type)
+-				return true;
+-		}
+-	}
+-	return false;
+-}
+-
+-static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
+-							int refclk)
+-{
+-	const struct cdv_intel_limit_t *limit;
+-	if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+-		/*
+-		 * Now only single-channel LVDS is supported on CDV. If it is
+-		 * incorrect, please add the dual-channel LVDS.
+-		 */
+-		if (refclk == 96000)
+-			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
+-		else
+-			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
+-	} else {
+-		if (refclk == 27000)
+-			limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
+-		else
+-			limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
+-	}
+-	return limit;
+-}
+-
+-/* m1 is reserved as 0 in CDV, n is a ring counter */
+-static void cdv_intel_clock(struct drm_device *dev,
+-			int refclk, struct cdv_intel_clock_t *clock)
+-{
+-	clock->m = clock->m2 + 2;
+-	clock->p = clock->p1 * clock->p2;
+-	clock->vco = (refclk * clock->m) / clock->n;
+-	clock->dot = clock->vco / clock->p;
+-}
+-
+-
+-#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+-static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
+-				const struct cdv_intel_limit_t *limit,
+-			       struct cdv_intel_clock_t *clock)
+-{
+-	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+-		INTELPllInvalid("p1 out of range\n");
+-	if (clock->p < limit->p.min || limit->p.max < clock->p)
+-		INTELPllInvalid("p out of range\n");
+-	/* unnecessary to check the range of m(m1/M2)/n again */
+-	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+-		INTELPllInvalid("vco out of range\n");
+-	/* XXX: We may need to be checking "Dot clock"
+-	 * depending on the multiplier, connector, etc.,
+-	 * rather than just a single range.
+-	 */
+-	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+-		INTELPllInvalid("dot out of range\n");
+-
+-	return true;
+-}
+-
+-static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+-				int refclk,
+-				struct cdv_intel_clock_t *best_clock)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct cdv_intel_clock_t clock;
+-	const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
+-	int err = target;
+-
+-
+-	if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+-	    (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+-		/*
+-		 * For LVDS, if the panel is on, just rely on its current
+-		 * settings for dual-channel.  We haven't figured out how to
+-		 * reliably set up different single/dual channel state, if we
+-		 * even can.
+-		 */
+-		if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+-		    LVDS_CLKB_POWER_UP)
+-			clock.p2 = limit->p2.p2_fast;
+-		else
+-			clock.p2 = limit->p2.p2_slow;
+-	} else {
+-		if (target < limit->p2.dot_limit)
+-			clock.p2 = limit->p2.p2_slow;
+-		else
+-			clock.p2 = limit->p2.p2_fast;
+-	}
+-
+-	memset(best_clock, 0, sizeof(*best_clock));
+-	clock.m1 = 0;
+-	/* m1 is reserved as 0 in CDV, n is a ring counter.
+-	   So skip the m1 loop */
+-	for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
+-		for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
+-					     clock.m2++) {
+-			for (clock.p1 = limit->p1.min;
+-					clock.p1 <= limit->p1.max;
+-					clock.p1++) {
+-				int this_err;
+-
+-				cdv_intel_clock(dev, refclk, &clock);
+-
+-				if (!cdv_intel_PLL_is_valid(crtc,
+-								limit, &clock))
+-						continue;
+-
+-				this_err = abs(clock.dot - target);
+-				if (this_err < err) {
+-					*best_clock = clock;
+-					err = this_err;
+-				}
+-			}
+-		}
+-	}
+-
+-	return err != target;
+-}
+-
+-int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
+-			    int x, int y, struct drm_framebuffer *old_fb)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+-	int pipe = psb_intel_crtc->pipe;
+-	unsigned long start, offset;
+-	int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+-	int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+-	int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+-	u32 dspcntr;
+-	int ret = 0;
+-
+-	if (!gma_power_begin(dev, true))
+-		return 0;
+-
+-	/* no fb bound */
+-	if (!crtc->fb) {
+-		dev_err(dev->dev, "No FB bound\n");
+-		goto psb_intel_pipe_cleaner;
+-	}
+-
+-
+-	/* We are displaying this buffer, make sure it is actually loaded
+-	   into the GTT */
+-	ret = psb_gtt_pin(psbfb->gtt);
+-	if (ret < 0)
+-		goto psb_intel_pipe_set_base_exit;
+-	start = psbfb->gtt->offset;
+-	offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+-
+-	REG_WRITE(dspstride, crtc->fb->pitch);
+-
+-	dspcntr = REG_READ(dspcntr_reg);
+-	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+-
+-	switch (crtc->fb->bits_per_pixel) {
+-	case 8:
+-		dspcntr |= DISPPLANE_8BPP;
+-		break;
+-	case 16:
+-		if (crtc->fb->depth == 15)
+-			dspcntr |= DISPPLANE_15_16BPP;
+-		else
+-			dspcntr |= DISPPLANE_16BPP;
+-		break;
+-	case 24:
+-	case 32:
+-		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Unknown color depth\n");
+-		ret = -EINVAL;
+-		goto psb_intel_pipe_set_base_exit;
+-	}
+-	REG_WRITE(dspcntr_reg, dspcntr);
+-
+-	dev_dbg(dev->dev,
+-		"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+-
+-	REG_WRITE(dspbase, offset);
+-	REG_READ(dspbase);
+-	REG_WRITE(dspsurf, start);
+-	REG_READ(dspsurf);
+-
+-psb_intel_pipe_cleaner:
+-	/* If there was a previous display we can now unpin it */
+-	if (old_fb)
+-		psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+-
+-psb_intel_pipe_set_base_exit:
+-	gma_power_end(dev);
+-	return ret;
+-}
+-
+-/**
+- * Sets the power management mode of the pipe and plane.
+- *
+- * This code should probably grow support for turning the cursor off and back
+- * on appropriately at the same time as we're turning the pipe off/on.
+- */
+-static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+-	int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	u32 temp;
+-	bool enabled;
+-
+-	/* XXX: When our outputs are all unaware of DPMS modes other than off
+-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+-	 */
+-	switch (mode) {
+-	case DRM_MODE_DPMS_ON:
+-	case DRM_MODE_DPMS_STANDBY:
+-	case DRM_MODE_DPMS_SUSPEND:
+-		/* Enable the DPLL */
+-		temp = REG_READ(dpll_reg);
+-		if ((temp & DPLL_VCO_ENABLE) == 0) {
+-			REG_WRITE(dpll_reg, temp);
+-			REG_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+-			REG_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+-			REG_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-		}
+-
+-		/* Jim Bish - switch plan and pipe per scott */
+-		/* Enable the plane */
+-		temp = REG_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+-			REG_WRITE(dspcntr_reg,
+-				  temp | DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-		}
+-
+-		udelay(150);
+-
+-		/* Enable the pipe */
+-		temp = REG_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) == 0)
+-			REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+-
+-		psb_intel_crtc_load_lut(crtc);
+-
+-		/* Give the overlay scaler a chance to enable
+-		 * if it's on this pipe */
+-		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+-		break;
+-	case DRM_MODE_DPMS_OFF:
+-		/* Give the overlay scaler a chance to disable
+-		 * if it's on this pipe */
+-		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+-
+-		/* Disable the VGA plane that we never use */
+-		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+-
+-		/* Jim Bish - changed pipe/plane here as well. */
+-
+-		/* Wait for vblank for the disable to take effect */
+-		cdv_intel_wait_for_vblank(dev);
+-
+-		/* Next, disable display pipes */
+-		temp = REG_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) != 0) {
+-			REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+-			REG_READ(pipeconf_reg);
+-		}
+-
+-		/* Wait for vblank for the disable to take effect. */
+-		cdv_intel_wait_for_vblank(dev);
+-
+-		udelay(150);
+-
+-		/* Disable display plane */
+-		temp = REG_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+-			REG_WRITE(dspcntr_reg,
+-				  temp & ~DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-			REG_READ(dspbase_reg);
+-		}
+-
+-		temp = REG_READ(dpll_reg);
+-		if ((temp & DPLL_VCO_ENABLE) != 0) {
+-			REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+-			REG_READ(dpll_reg);
+-		}
+-
+-		/* Wait for the clocks to turn off. */
+-		udelay(150);
+-		break;
+-	}
+-	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+-	/*Set FIFO Watermarks*/
+-	REG_WRITE(DSPARB, 0x3F3E);
+-}
+-
+-static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
+-{
+-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+-}
+-
+-static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
+-{
+-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+-}
+-
+-void cdv_intel_encoder_prepare(struct drm_encoder *encoder)
+-{
+-	struct drm_encoder_helper_funcs *encoder_funcs =
+-	    encoder->helper_private;
+-	/* lvds has its own version of prepare see cdv_intel_lvds_prepare */
+-	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+-}
+-
+-void cdv_intel_encoder_commit(struct drm_encoder *encoder)
+-{
+-	struct drm_encoder_helper_funcs *encoder_funcs =
+-	    encoder->helper_private;
+-	/* lvds has its own version of commit see cdv_intel_lvds_commit */
+-	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+-}
+-
+-static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+-				  struct drm_display_mode *mode,
+-				  struct drm_display_mode *adjusted_mode)
+-{
+-	return true;
+-}
+-
+-
+-/**
+- * Return the pipe currently connected to the panel fitter,
+- * or -1 if the panel fitter is not present or not in use
+- */
+-static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
+-{
+-	u32 pfit_control;
+-
+-	pfit_control = REG_READ(PFIT_CONTROL);
+-
+-	/* See if the panel fitter is in use */
+-	if ((pfit_control & PFIT_ENABLE) == 0)
+-		return -1;
+-	return (pfit_control >> 29) & 0x3;
+-}
+-
+-static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
+-			       struct drm_display_mode *mode,
+-			       struct drm_display_mode *adjusted_mode,
+-			       int x, int y,
+-			       struct drm_framebuffer *old_fb)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+-	int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
+-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+-	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+-	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+-	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+-	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+-	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+-	int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+-	int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+-	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+-	int refclk;
+-	struct cdv_intel_clock_t clock;
+-	u32 dpll = 0, dspcntr, pipeconf;
+-	bool ok, is_sdvo = false, is_dvo = false;
+-	bool is_crt = false, is_lvds = false, is_tv = false;
+-	bool is_hdmi = false;
+-	struct drm_mode_config *mode_config = &dev->mode_config;
+-	struct drm_connector *connector;
+-
+-	list_for_each_entry(connector, &mode_config->connector_list, head) {
+-		struct psb_intel_output *psb_intel_output =
+-		    to_psb_intel_output(connector);
+-
+-		if (!connector->encoder
+-		    || connector->encoder->crtc != crtc)
+-			continue;
+-
+-		switch (psb_intel_output->type) {
+-		case INTEL_OUTPUT_LVDS:
+-			is_lvds = true;
+-			break;
+-		case INTEL_OUTPUT_SDVO:
+-			is_sdvo = true;
+-			break;
+-		case INTEL_OUTPUT_DVO:
+-			is_dvo = true;
+-			break;
+-		case INTEL_OUTPUT_TVOUT:
+-			is_tv = true;
+-			break;
+-		case INTEL_OUTPUT_ANALOG:
+-			is_crt = true;
+-			break;
+-		case INTEL_OUTPUT_HDMI:
+-			is_hdmi = true;
+-			break;
+-		}
+-	}
+-
+-	refclk = 96000;
+-
+-	/* Hack selection about ref clk for CRT */
+-	/* Select 27MHz as the reference clk for HDMI */
+-	if (is_crt || is_hdmi)
+-		refclk = 27000;
+-
+-	drm_mode_debug_printmodeline(adjusted_mode);
+-
+-	ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+-				 &clock);
+-	if (!ok) {
+-		dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+-		return 0;
+-	}
+-
+-	dpll = DPLL_VGA_MODE_DIS;
+-	if (is_tv) {
+-		/* XXX: just matching BIOS for now */
+-/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
+-		dpll |= 3;
+-	}
+-		dpll |= PLL_REF_INPUT_DREFCLK;
+-
+-	dpll |= DPLL_SYNCLOCK_ENABLE;
+-	dpll |= DPLL_VGA_MODE_DIS;
+-	if (is_lvds)
+-		dpll |= DPLLB_MODE_LVDS;
+-	else
+-		dpll |= DPLLB_MODE_DAC_SERIAL;
+-	/* dpll |= (2 << 11); */
+-
+-	/* setup pipeconf */
+-	pipeconf = REG_READ(pipeconf_reg);
+-
+-	/* Set up the display plane register */
+-	dspcntr = DISPPLANE_GAMMA_ENABLE;
+-
+-	if (pipe == 0)
+-		dspcntr |= DISPPLANE_SEL_PIPE_A;
+-	else
+-		dspcntr |= DISPPLANE_SEL_PIPE_B;
+-
+-	dspcntr |= DISPLAY_PLANE_ENABLE;
+-	pipeconf |= PIPEACONF_ENABLE;
+-
+-	REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
+-	REG_READ(dpll_reg);
+-
+-	cdv_dpll_set_clock_cdv(dev, crtc, &clock);
+-
+-	udelay(150);
+-
+-
+-	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+-	 * This is an exception to the general rule that mode_set doesn't turn
+-	 * things on.
+-	 */
+-	if (is_lvds) {
+-		u32 lvds = REG_READ(LVDS);
+-
+-		lvds |=
+-		    LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
+-		    LVDS_PIPEB_SELECT;
+-		/* Set the B0-B3 data pairs corresponding to
+-		 * whether we're going to
+-		 * set the DPLLs for dual-channel mode or not.
+-		 */
+-		if (clock.p2 == 7)
+-			lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+-		else
+-			lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+-
+-		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+-		 * appropriately here, but we need to look more
+-		 * thoroughly into how panels behave in the two modes.
+-		 */
+-
+-		REG_WRITE(LVDS, lvds);
+-		REG_READ(LVDS);
+-	}
+-
+-	dpll |= DPLL_VCO_ENABLE;
+-
+-	/* Disable the panel fitter if it was on our pipe */
+-	if (cdv_intel_panel_fitter_pipe(dev) == pipe)
+-		REG_WRITE(PFIT_CONTROL, 0);
+-
+-	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+-	drm_mode_debug_printmodeline(mode);
+-
+-	REG_WRITE(dpll_reg,
+-		(REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
+-	REG_READ(dpll_reg);
+-	/* Wait for the clocks to stabilize. */
+-	udelay(150); /* 42 usec w/o calibration, 110 with.  rounded up. */
+-
+-	if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
+-		dev_err(dev->dev, "Failed to get DPLL lock\n");
+-		return -EBUSY;
+-	}
+-
+-	{
+-		int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+-		REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
+-	}
+-
+-	REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+-		  ((adjusted_mode->crtc_htotal - 1) << 16));
+-	REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+-		  ((adjusted_mode->crtc_hblank_end - 1) << 16));
+-	REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+-		  ((adjusted_mode->crtc_hsync_end - 1) << 16));
+-	REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+-		  ((adjusted_mode->crtc_vtotal - 1) << 16));
+-	REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+-		  ((adjusted_mode->crtc_vblank_end - 1) << 16));
+-	REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+-		  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+-	/* pipesrc and dspsize control the size that is scaled from,
+-	 * which should always be the user's requested size.
+-	 */
+-	REG_WRITE(dspsize_reg,
+-		  ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+-	REG_WRITE(dsppos_reg, 0);
+-	REG_WRITE(pipesrc_reg,
+-		  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+-	REG_WRITE(pipeconf_reg, pipeconf);
+-	REG_READ(pipeconf_reg);
+-
+-	cdv_intel_wait_for_vblank(dev);
+-
+-	REG_WRITE(dspcntr_reg, dspcntr);
+-
+-	/* Flush the plane changes */
+-	{
+-		struct drm_crtc_helper_funcs *crtc_funcs =
+-		    crtc->helper_private;
+-		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+-	}
+-
+-	cdv_intel_wait_for_vblank(dev);
+-
+-	return 0;
+-}
+-
+-/** Loads the palette/gamma unit for the CRTC with the prepared values */
+-void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_psb_private *dev_priv =
+-				(struct drm_psb_private *)dev->dev_private;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int palreg = PALETTE_A;
+-	int i;
+-
+-	/* The clocks have to be on to load the palette. */
+-	if (!crtc->enabled)
+-		return;
+-
+-	switch (psb_intel_crtc->pipe) {
+-	case 0:
+-		break;
+-	case 1:
+-		palreg = PALETTE_B;
+-		break;
+-	case 2:
+-		palreg = PALETTE_C;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Illegal Pipe Number.\n");
+-		return;
+-	}
+-
+-	if (gma_power_begin(dev, false)) {
+-		for (i = 0; i < 256; i++) {
+-			REG_WRITE(palreg + 4 * i,
+-				  ((psb_intel_crtc->lut_r[i] +
+-				  psb_intel_crtc->lut_adj[i]) << 16) |
+-				  ((psb_intel_crtc->lut_g[i] +
+-				  psb_intel_crtc->lut_adj[i]) << 8) |
+-				  (psb_intel_crtc->lut_b[i] +
+-				  psb_intel_crtc->lut_adj[i]));
+-		}
+-		gma_power_end(dev);
+-	} else {
+-		for (i = 0; i < 256; i++) {
+-			dev_priv->save_palette_a[i] =
+-				  ((psb_intel_crtc->lut_r[i] +
+-				  psb_intel_crtc->lut_adj[i]) << 16) |
+-				  ((psb_intel_crtc->lut_g[i] +
+-				  psb_intel_crtc->lut_adj[i]) << 8) |
+-				  (psb_intel_crtc->lut_b[i] +
+-				  psb_intel_crtc->lut_adj[i]);
+-		}
+-
+-	}
+-}
+-
+-/**
+- * Save HW states of giving crtc
+- */
+-static void cdv_intel_crtc_save(struct drm_crtc *crtc)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	/* struct drm_psb_private *dev_priv =
+-			(struct drm_psb_private *)dev->dev_private; */
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+-	int pipeA = (psb_intel_crtc->pipe == 0);
+-	uint32_t paletteReg;
+-	int i;
+-
+-	if (!crtc_state) {
+-		dev_dbg(dev->dev, "No CRTC state found\n");
+-		return;
+-	}
+-
+-	crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+-	crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+-	crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+-	crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+-	crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+-	crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+-	crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+-	crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+-	crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+-	crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+-	crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+-	crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+-	crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+-
+-	/*NOTE: DSPSIZE DSPPOS only for psb*/
+-	crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+-	crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+-
+-	crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+-
+-	DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+-			crtc_state->saveDSPCNTR,
+-			crtc_state->savePIPECONF,
+-			crtc_state->savePIPESRC,
+-			crtc_state->saveFP0,
+-			crtc_state->saveFP1,
+-			crtc_state->saveDPLL,
+-			crtc_state->saveHTOTAL,
+-			crtc_state->saveHBLANK,
+-			crtc_state->saveHSYNC,
+-			crtc_state->saveVTOTAL,
+-			crtc_state->saveVBLANK,
+-			crtc_state->saveVSYNC,
+-			crtc_state->saveDSPSTRIDE,
+-			crtc_state->saveDSPSIZE,
+-			crtc_state->saveDSPPOS,
+-			crtc_state->saveDSPBASE
+-		);
+-
+-	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+-	for (i = 0; i < 256; ++i)
+-		crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+-}
+-
+-/**
+- * Restore HW states of giving crtc
+- */
+-static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	/* struct drm_psb_private * dev_priv =
+-				(struct drm_psb_private *)dev->dev_private; */
+-	struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
+-	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+-	/* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+-	int pipeA = (psb_intel_crtc->pipe == 0);
+-	uint32_t paletteReg;
+-	int i;
+-
+-	if (!crtc_state) {
+-		dev_dbg(dev->dev, "No crtc state\n");
+-		return;
+-	}
+-
+-	DRM_DEBUG(
+-		"current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+-		REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
+-		REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
+-		REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
+-		REG_READ(pipeA ? FPA0 : FPB0),
+-		REG_READ(pipeA ? FPA1 : FPB1),
+-		REG_READ(pipeA ? DPLL_A : DPLL_B),
+-		REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
+-		REG_READ(pipeA ? HBLANK_A : HBLANK_B),
+-		REG_READ(pipeA ? HSYNC_A : HSYNC_B),
+-		REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
+-		REG_READ(pipeA ? VBLANK_A : VBLANK_B),
+-		REG_READ(pipeA ? VSYNC_A : VSYNC_B),
+-		REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
+-		REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
+-		REG_READ(pipeA ? DSPAPOS : DSPBPOS),
+-		REG_READ(pipeA ? DSPABASE : DSPBBASE)
+-		);
+-
+-	DRM_DEBUG(
+-		"saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
+-		crtc_state->saveDSPCNTR,
+-		crtc_state->savePIPECONF,
+-		crtc_state->savePIPESRC,
+-		crtc_state->saveFP0,
+-		crtc_state->saveFP1,
+-		crtc_state->saveDPLL,
+-		crtc_state->saveHTOTAL,
+-		crtc_state->saveHBLANK,
+-		crtc_state->saveHSYNC,
+-		crtc_state->saveVTOTAL,
+-		crtc_state->saveVBLANK,
+-		crtc_state->saveVSYNC,
+-		crtc_state->saveDSPSTRIDE,
+-		crtc_state->saveDSPSIZE,
+-		crtc_state->saveDSPPOS,
+-		crtc_state->saveDSPBASE
+-		);
+-
+-
+-	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+-		REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+-			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+-		REG_READ(pipeA ? DPLL_A : DPLL_B);
+-		DRM_DEBUG("write dpll: %x\n",
+-				REG_READ(pipeA ? DPLL_A : DPLL_B));
+-		udelay(150);
+-	}
+-
+-	REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+-	REG_READ(pipeA ? FPA0 : FPB0);
+-
+-	REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+-	REG_READ(pipeA ? FPA1 : FPB1);
+-
+-	REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+-	REG_READ(pipeA ? DPLL_A : DPLL_B);
+-	udelay(150);
+-
+-	REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+-	REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+-	REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+-	REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+-	REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+-	REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+-	REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+-
+-	REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+-	REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+-
+-	REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+-	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+-	REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+-
+-	cdv_intel_wait_for_vblank(dev);
+-
+-	REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+-	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+-
+-	cdv_intel_wait_for_vblank(dev);
+-
+-	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+-	for (i = 0; i < 256; ++i)
+-		REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+-}
+-
+-static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
+-				 struct drm_file *file_priv,
+-				 uint32_t handle,
+-				 uint32_t width, uint32_t height)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+-	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+-	uint32_t temp;
+-	size_t addr = 0;
+-	struct gtt_range *gt;
+-	struct drm_gem_object *obj;
+-	int ret;
+-
+-	/* if we want to turn of the cursor ignore width and height */
+-	if (!handle) {
+-		/* turn off the cursor */
+-		temp = CURSOR_MODE_DISABLE;
+-
+-		if (gma_power_begin(dev, false)) {
+-			REG_WRITE(control, temp);
+-			REG_WRITE(base, 0);
+-			gma_power_end(dev);
+-		}
+-
+-		/* unpin the old GEM object */
+-		if (psb_intel_crtc->cursor_obj) {
+-			gt = container_of(psb_intel_crtc->cursor_obj,
+-							struct gtt_range, gem);
+-			psb_gtt_unpin(gt);
+-			drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+-			psb_intel_crtc->cursor_obj = NULL;
+-		}
+-
+-		return 0;
+-	}
+-
+-	/* Currently we only support 64x64 cursors */
+-	if (width != 64 || height != 64) {
+-		dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+-		return -EINVAL;
+-	}
+-
+-	obj = drm_gem_object_lookup(dev, file_priv, handle);
+-	if (!obj)
+-		return -ENOENT;
+-
+-	if (obj->size < width * height * 4) {
+-		dev_dbg(dev->dev, "buffer is to small\n");
+-		return -ENOMEM;
+-	}
+-
+-	gt = container_of(obj, struct gtt_range, gem);
+-
+-	/* Pin the memory into the GTT */
+-	ret = psb_gtt_pin(gt);
+-	if (ret) {
+-		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+-		return ret;
+-	}
+-
+-	addr = gt->offset;	/* Or resource.start ??? */
+-
+-	psb_intel_crtc->cursor_addr = addr;
+-
+-	temp = 0;
+-	/* set the pipe for the cursor */
+-	temp |= (pipe << 28);
+-	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+-
+-	if (gma_power_begin(dev, false)) {
+-		REG_WRITE(control, temp);
+-		REG_WRITE(base, addr);
+-		gma_power_end(dev);
+-	}
+-
+-	/* unpin the old GEM object */
+-	if (psb_intel_crtc->cursor_obj) {
+-		gt = container_of(psb_intel_crtc->cursor_obj,
+-							struct gtt_range, gem);
+-		psb_gtt_unpin(gt);
+-		drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+-		psb_intel_crtc->cursor_obj = obj;
+-	}
+-	return 0;
+-}
+-
+-static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	uint32_t temp = 0;
+-	uint32_t adder;
+-
+-
+-	if (x < 0) {
+-		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+-		x = -x;
+-	}
+-	if (y < 0) {
+-		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+-		y = -y;
+-	}
+-
+-	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+-	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+-
+-	adder = psb_intel_crtc->cursor_addr;
+-
+-	if (gma_power_begin(dev, false)) {
+-		REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+-		REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+-		gma_power_end(dev);
+-	}
+-	return 0;
+-}
+-
+-static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+-			 u16 *green, u16 *blue, uint32_t start, uint32_t size)
+-{
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int i;
+-	int end = (start + size > 256) ? 256 : start + size;
+-
+-	for (i = start; i < end; i++) {
+-		psb_intel_crtc->lut_r[i] = red[i] >> 8;
+-		psb_intel_crtc->lut_g[i] = green[i] >> 8;
+-		psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+-	}
+-
+-	cdv_intel_crtc_load_lut(crtc);
+-}
+-
+-static int cdv_crtc_set_config(struct drm_mode_set *set)
+-{
+-	int ret = 0;
+-	struct drm_device *dev = set->crtc->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (!dev_priv->rpm_enabled)
+-		return drm_crtc_helper_set_config(set);
+-
+-	pm_runtime_forbid(&dev->pdev->dev);
+-
+-	ret = drm_crtc_helper_set_config(set);
+-
+-	pm_runtime_allow(&dev->pdev->dev);
+-
+-	return ret;
+-}
+-
+-/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+-
+-/* FIXME: why are we using this, should it be cdv_ in this tree ? */
+-
+-static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
+-{
+-	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+-	clock->p = clock->p1 * clock->p2;
+-	clock->vco = refclk * clock->m / (clock->n + 2);
+-	clock->dot = clock->vco / clock->p;
+-}
+-
+-/* Returns the clock of the currently programmed mode of the given pipe. */
+-static int cdv_intel_crtc_clock_get(struct drm_device *dev,
+-				struct drm_crtc *crtc)
+-{
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	u32 dpll;
+-	u32 fp;
+-	struct cdv_intel_clock_t clock;
+-	bool is_lvds;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (gma_power_begin(dev, false)) {
+-		dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+-		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+-			fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+-		else
+-			fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+-		is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+-		gma_power_end(dev);
+-	} else {
+-		dpll = (pipe == 0) ?
+-			dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+-
+-		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+-			fp = (pipe == 0) ?
+-				dev_priv->saveFPA0 :
+-				dev_priv->saveFPB0;
+-		else
+-			fp = (pipe == 0) ?
+-				dev_priv->saveFPA1 :
+-				dev_priv->saveFPB1;
+-
+-		is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+-	}
+-
+-	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+-	clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+-	clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+-
+-	if (is_lvds) {
+-		clock.p1 =
+-		    ffs((dpll &
+-			 DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+-			DPLL_FPA01_P1_POST_DIV_SHIFT);
+-		if (clock.p1 == 0) {
+-			clock.p1 = 4;
+-			dev_err(dev->dev, "PLL %d\n", dpll);
+-		}
+-		clock.p2 = 14;
+-
+-		if ((dpll & PLL_REF_INPUT_MASK) ==
+-		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+-			/* XXX: might not be 66MHz */
+-			i8xx_clock(66000, &clock);
+-		} else
+-			i8xx_clock(48000, &clock);
+-	} else {
+-		if (dpll & PLL_P1_DIVIDE_BY_TWO)
+-			clock.p1 = 2;
+-		else {
+-			clock.p1 =
+-			    ((dpll &
+-			      DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+-			     DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+-		}
+-		if (dpll & PLL_P2_DIVIDE_BY_4)
+-			clock.p2 = 4;
+-		else
+-			clock.p2 = 2;
+-
+-		i8xx_clock(48000, &clock);
+-	}
+-
+-	/* XXX: It would be nice to validate the clocks, but we can't reuse
+-	 * i830PllIsValid() because it relies on the xf86_config connector
+-	 * configuration being accurate, which it isn't necessarily.
+-	 */
+-
+-	return clock.dot;
+-}
+-
+-/** Returns the currently programmed mode of the given pipe. */
+-struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
+-					     struct drm_crtc *crtc)
+-{
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	struct drm_display_mode *mode;
+-	int htot;
+-	int hsync;
+-	int vtot;
+-	int vsync;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (gma_power_begin(dev, false)) {
+-		htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+-		hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+-		vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+-		vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+-		gma_power_end(dev);
+-	} else {
+-		htot = (pipe == 0) ?
+-			dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+-		hsync = (pipe == 0) ?
+-			dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+-		vtot = (pipe == 0) ?
+-			dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+-		vsync = (pipe == 0) ?
+-			dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+-	}
+-
+-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+-	if (!mode)
+-		return NULL;
+-
+-	mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
+-	mode->hdisplay = (htot & 0xffff) + 1;
+-	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+-	mode->hsync_start = (hsync & 0xffff) + 1;
+-	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+-	mode->vdisplay = (vtot & 0xffff) + 1;
+-	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+-	mode->vsync_start = (vsync & 0xffff) + 1;
+-	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+-
+-	drm_mode_set_name(mode);
+-	drm_mode_set_crtcinfo(mode, 0);
+-
+-	return mode;
+-}
+-
+-static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
+-{
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-
+-	kfree(psb_intel_crtc->crtc_state);
+-	drm_crtc_cleanup(crtc);
+-	kfree(psb_intel_crtc);
+-}
+-
+-const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
+-	.dpms = cdv_intel_crtc_dpms,
+-	.mode_fixup = cdv_intel_crtc_mode_fixup,
+-	.mode_set = cdv_intel_crtc_mode_set,
+-	.mode_set_base = cdv_intel_pipe_set_base,
+-	.prepare = cdv_intel_crtc_prepare,
+-	.commit = cdv_intel_crtc_commit,
+-};
+-
+-const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
+-	.save = cdv_intel_crtc_save,
+-	.restore = cdv_intel_crtc_restore,
+-	.cursor_set = cdv_intel_crtc_cursor_set,
+-	.cursor_move = cdv_intel_crtc_cursor_move,
+-	.gamma_set = cdv_intel_crtc_gamma_set,
+-	.set_config = cdv_crtc_set_config,
+-	.destroy = cdv_intel_crtc_destroy,
+-};
+-
+-/*
+- * Set the default value of cursor control and base register
+- * to zero. This is a workaround for h/w defect on oaktrail
+- */
+-void cdv_intel_cursor_init(struct drm_device *dev, int pipe)
+-{
+-	uint32_t control;
+-	uint32_t base;
+-
+-	switch (pipe) {
+-	case 0:
+-		control = CURACNTR;
+-		base = CURABASE;
+-		break;
+-	case 1:
+-		control = CURBCNTR;
+-		base = CURBBASE;
+-		break;
+-	case 2:
+-		control = CURCCNTR;
+-		base = CURCBASE;
+-		break;
+-	default:
+-		return;
+-	}
+-
+-	REG_WRITE(control, 0);
+-	REG_WRITE(base, 0);
+-}
+-
+diff --git a/drivers/staging/gma500/cdv_intel_hdmi.c b/drivers/staging/gma500/cdv_intel_hdmi.c
+deleted file mode 100644
+index cbca2b0..0000000
+--- a/drivers/staging/gma500/cdv_intel_hdmi.c
++++ /dev/null
+@@ -1,376 +0,0 @@
+-/*
+- * Copyright © 2006-2011 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- *	jim liu <jim.liu at intel.com>
+- *
+- * FIXME:
+- *	We should probably make this generic and share it with Medfield
+- */
+-
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include <drm/drm_crtc.h>
+-#include <drm/drm_edid.h>
+-#include "psb_intel_drv.h"
+-#include "psb_drv.h"
+-#include "psb_intel_reg.h"
+-#include <linux/pm_runtime.h>
+-
+-/* hdmi control bits */
+-#define HDMI_NULL_PACKETS_DURING_VSYNC	(1 << 9)
+-#define HDMI_BORDER_ENABLE		(1 << 7)
+-#define HDMI_AUDIO_ENABLE		(1 << 6)
+-#define HDMI_VSYNC_ACTIVE_HIGH		(1 << 4)
+-#define HDMI_HSYNC_ACTIVE_HIGH		(1 << 3)
+-/* hdmi-b control bits */
+-#define	HDMIB_PIPE_B_SELECT		(1 << 30)
+-
+-
+-struct mid_intel_hdmi_priv {
+-	u32 hdmi_reg;
+-	u32 save_HDMIB;
+-	bool has_hdmi_sink;
+-	bool has_hdmi_audio;
+-	/* Should set this when detect hotplug */
+-	bool hdmi_device_connected;
+-	struct mdfld_hdmi_i2c *i2c_bus;
+-	struct i2c_adapter *hdmi_i2c_adapter;	/* for control functions */
+-	struct drm_device *dev;
+-};
+-
+-static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
+-			struct drm_display_mode *mode,
+-			struct drm_display_mode *adjusted_mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-	struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+-	u32 hdmib;
+-	struct drm_crtc *crtc = encoder->crtc;
+-	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+-
+-	hdmib = (2 << 10);
+-
+-	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+-		hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
+-	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+-		hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
+-
+-	if (intel_crtc->pipe == 1)
+-		hdmib |= HDMIB_PIPE_B_SELECT;
+-
+-	if (hdmi_priv->has_hdmi_audio) {
+-		hdmib |= HDMI_AUDIO_ENABLE;
+-		hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
+-	}
+-
+-	REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
+-	REG_READ(hdmi_priv->hdmi_reg);
+-}
+-
+-static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
+-				  struct drm_display_mode *mode,
+-				  struct drm_display_mode *adjusted_mode)
+-{
+-	return true;
+-}
+-
+-static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-	struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+-	u32 hdmib;
+-
+-	hdmib = REG_READ(hdmi_priv->hdmi_reg);
+-
+-	if (mode != DRM_MODE_DPMS_ON)
+-		REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
+-	else
+-		REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
+-	REG_READ(hdmi_priv->hdmi_reg);
+-}
+-
+-static void cdv_hdmi_save(struct drm_connector *connector)
+-{
+-	struct drm_device *dev = connector->dev;
+-	struct psb_intel_output *output = to_psb_intel_output(connector);
+-	struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+-
+-	hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
+-}
+-
+-static void cdv_hdmi_restore(struct drm_connector *connector)
+-{
+-	struct drm_device *dev = connector->dev;
+-	struct psb_intel_output *output = to_psb_intel_output(connector);
+-	struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+-
+-	REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
+-	REG_READ(hdmi_priv->hdmi_reg);
+-}
+-
+-static enum drm_connector_status cdv_hdmi_detect(
+-				struct drm_connector *connector, bool force)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-						to_psb_intel_output(connector);
+-	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_output->dev_priv;
+-	struct edid *edid = NULL;
+-	enum drm_connector_status status = connector_status_disconnected;
+-
+-	edid = drm_get_edid(&psb_intel_output->base,
+-			 psb_intel_output->hdmi_i2c_adapter);
+-
+-	hdmi_priv->has_hdmi_sink = false;
+-	hdmi_priv->has_hdmi_audio = false;
+-	if (edid) {
+-		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+-			status = connector_status_connected;
+-			hdmi_priv->has_hdmi_sink =
+-						drm_detect_hdmi_monitor(edid);
+-			hdmi_priv->has_hdmi_audio =
+-						drm_detect_monitor_audio(edid);
+-		}
+-
+-		psb_intel_output->base.display_info.raw_edid = NULL;
+-		kfree(edid);
+-	}
+-	return status;
+-}
+-
+-static int cdv_hdmi_set_property(struct drm_connector *connector,
+-				       struct drm_property *property,
+-				       uint64_t value)
+-{
+-	struct drm_encoder *encoder = connector->encoder;
+-
+-	if (!strcmp(property->name, "scaling mode") && encoder) {
+-		struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
+-		bool centre;
+-		uint64_t curValue;
+-
+-		if (!crtc)
+-			return -1;
+-
+-		switch (value) {
+-		case DRM_MODE_SCALE_FULLSCREEN:
+-			break;
+-		case DRM_MODE_SCALE_NO_SCALE:
+-			break;
+-		case DRM_MODE_SCALE_ASPECT:
+-			break;
+-		default:
+-			return -1;
+-		}
+-
+-		if (drm_connector_property_get_value(connector,
+-							property, &curValue))
+-			return -1;
+-
+-		if (curValue == value)
+-			return 0;
+-
+-		if (drm_connector_property_set_value(connector,
+-							property, value))
+-			return -1;
+-
+-		centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
+-			(value == DRM_MODE_SCALE_NO_SCALE);
+-
+-		if (crtc->saved_mode.hdisplay != 0 &&
+-		    crtc->saved_mode.vdisplay != 0) {
+-			if (centre) {
+-				if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
+-					    encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
+-					return -1;
+-			} else {
+-				struct drm_encoder_helper_funcs *helpers
+-						    = encoder->helper_private;
+-				helpers->mode_set(encoder, &crtc->saved_mode,
+-					     &crtc->saved_adjusted_mode);
+-			}
+-		}
+-	}
+-	return 0;
+-}
+-
+-/*
+- * Return the list of HDMI DDC modes if available.
+- */
+-static int cdv_hdmi_get_modes(struct drm_connector *connector)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-	struct edid *edid = NULL;
+-	int ret = 0;
+-
+-	edid = drm_get_edid(&psb_intel_output->base,
+-			 psb_intel_output->hdmi_i2c_adapter);
+-	if (edid) {
+-		drm_mode_connector_update_edid_property(&psb_intel_output->
+-							base, edid);
+-		ret = drm_add_edid_modes(&psb_intel_output->base, edid);
+-		kfree(edid);
+-	}
+-	return ret;
+-}
+-
+-static int cdv_hdmi_mode_valid(struct drm_connector *connector,
+-				 struct drm_display_mode *mode)
+-{
+-
+-	if (mode->clock > 165000)
+-		return MODE_CLOCK_HIGH;
+-	if (mode->clock < 20000)
+-		return MODE_CLOCK_HIGH;
+-
+-	/* just in case */
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+-		return MODE_NO_DBLESCAN;
+-
+-	/* just in case */
+-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+-		return MODE_NO_INTERLACE;
+-
+-	/*
+-	 * FIXME: for now we limit the size to 1680x1050 on CDV, otherwise it
+-	 * will go beyond the stolen memory size allocated to the framebuffer
+-	 */
+-	if (mode->hdisplay > 1680)
+-		return MODE_PANEL;
+-	if (mode->vdisplay > 1050)
+-		return MODE_PANEL;
+-	return MODE_OK;
+-}
+-
+-static void cdv_hdmi_destroy(struct drm_connector *connector)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-
+-	if (psb_intel_output->ddc_bus)
+-		psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+-	drm_sysfs_connector_remove(connector);
+-	drm_connector_cleanup(connector);
+-	kfree(connector);
+-}
+-
+-static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
+-	.dpms = cdv_hdmi_dpms,
+-	.mode_fixup = cdv_hdmi_mode_fixup,
+-	.prepare = psb_intel_encoder_prepare,
+-	.mode_set = cdv_hdmi_mode_set,
+-	.commit = psb_intel_encoder_commit,
+-};
+-
+-static const struct drm_connector_helper_funcs
+-					cdv_hdmi_connector_helper_funcs = {
+-	.get_modes = cdv_hdmi_get_modes,
+-	.mode_valid = cdv_hdmi_mode_valid,
+-	.best_encoder = psb_intel_best_encoder,
+-};
+-
+-static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
+-	.dpms = drm_helper_connector_dpms,
+-	.save = cdv_hdmi_save,
+-	.restore = cdv_hdmi_restore,
+-	.detect = cdv_hdmi_detect,
+-	.fill_modes = drm_helper_probe_single_connector_modes,
+-	.set_property = cdv_hdmi_set_property,
+-	.destroy = cdv_hdmi_destroy,
+-};
+-
+-void cdv_hdmi_init(struct drm_device *dev,
+-			struct psb_intel_mode_device *mode_dev, int reg)
+-{
+-	struct psb_intel_output *psb_intel_output;
+-	struct drm_connector *connector;
+-	struct drm_encoder *encoder;
+-	struct mid_intel_hdmi_priv *hdmi_priv;
+-	int ddc_bus;
+-
+-	psb_intel_output = kzalloc(sizeof(struct psb_intel_output) +
+-			       sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
+-	if (!psb_intel_output)
+-		return;
+-
+-	hdmi_priv = (struct mid_intel_hdmi_priv *)(psb_intel_output + 1);
+-	psb_intel_output->mode_dev = mode_dev;
+-	connector = &psb_intel_output->base;
+-	encoder = &psb_intel_output->enc;
+-	drm_connector_init(dev, &psb_intel_output->base,
+-			   &cdv_hdmi_connector_funcs,
+-			   DRM_MODE_CONNECTOR_DVID);
+-
+-	drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
+-			 DRM_MODE_ENCODER_TMDS);
+-
+-	drm_mode_connector_attach_encoder(&psb_intel_output->base,
+-					  &psb_intel_output->enc);
+-	psb_intel_output->type = INTEL_OUTPUT_HDMI;
+-	hdmi_priv->hdmi_reg = reg;
+-	hdmi_priv->has_hdmi_sink = false;
+-	psb_intel_output->dev_priv = hdmi_priv;
+-
+-	drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
+-	drm_connector_helper_add(connector,
+-				 &cdv_hdmi_connector_helper_funcs);
+-	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+-	connector->interlace_allowed = false;
+-	connector->doublescan_allowed = false;
+-
+-	drm_connector_attach_property(connector,
+-	    dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
+-
+-	switch (reg) {
+-	case SDVOB:
+-		ddc_bus = GPIOE;
+-		break;
+-	case SDVOC:
+-		ddc_bus = GPIOD;
+-		break;
+-	default:
+-		DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
+-		goto failed_ddc;
+-		break;
+-	}
+-
+-	psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
+-				ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
+-
+-	if (!psb_intel_output->ddc_bus) {
+-		dev_err(dev->dev, "No ddc adapter available!\n");
+-		goto failed_ddc;
+-	}
+-	psb_intel_output->hdmi_i2c_adapter =
+-				&(psb_intel_output->ddc_bus->adapter);
+-	hdmi_priv->dev = dev;
+-	drm_sysfs_connector_add(connector);
+-	return;
+-
+-failed_ddc:
+-	drm_encoder_cleanup(&psb_intel_output->enc);
+-	drm_connector_cleanup(&psb_intel_output->base);
+-	kfree(psb_intel_output);
+-}
+diff --git a/drivers/staging/gma500/cdv_intel_lvds.c b/drivers/staging/gma500/cdv_intel_lvds.c
+deleted file mode 100644
+index 988b2d0..0000000
+--- a/drivers/staging/gma500/cdv_intel_lvds.c
++++ /dev/null
+@@ -1,721 +0,0 @@
+-/*
+- * Copyright © 2006-2011 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *	Eric Anholt <eric at anholt.net>
+- *	Dave Airlie <airlied at linux.ie>
+- *	Jesse Barnes <jesse.barnes at intel.com>
+- */
+-
+-#include <linux/i2c.h>
+-#include <linux/dmi.h>
+-#include <drm/drmP.h>
+-
+-#include "intel_bios.h"
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "power.h"
+-#include <linux/pm_runtime.h>
+-#include "cdv_device.h"
+-
+-/**
+- * LVDS I2C backlight control macros
+- */
+-#define BRIGHTNESS_MAX_LEVEL 100
+-#define BRIGHTNESS_MASK 0xFF
+-#define BLC_I2C_TYPE	0x01
+-#define BLC_PWM_TYPT	0x02
+-
+-#define BLC_POLARITY_NORMAL 0
+-#define BLC_POLARITY_INVERSE 1
+-
+-#define PSB_BLC_MAX_PWM_REG_FREQ       (0xFFFE)
+-#define PSB_BLC_MIN_PWM_REG_FREQ	(0x2)
+-#define PSB_BLC_PWM_PRECISION_FACTOR	(10)
+-#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
+-#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+-
+-struct cdv_intel_lvds_priv {
+-	/**
+-	 * Saved LVDO output states
+-	 */
+-	uint32_t savePP_ON;
+-	uint32_t savePP_OFF;
+-	uint32_t saveLVDS;
+-	uint32_t savePP_CONTROL;
+-	uint32_t savePP_CYCLE;
+-	uint32_t savePFIT_CONTROL;
+-	uint32_t savePFIT_PGM_RATIOS;
+-	uint32_t saveBLC_PWM_CTL;
+-};
+-
+-/*
+- * Returns the maximum level of the backlight duty cycle field.
+- */
+-static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 retval;
+-
+-	if (gma_power_begin(dev, false)) {
+-		retval = ((REG_READ(BLC_PWM_CTL) &
+-			  BACKLIGHT_MODULATION_FREQ_MASK) >>
+-			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+-
+-		gma_power_end(dev);
+-	} else
+-		retval = ((dev_priv->saveBLC_PWM_CTL &
+-			  BACKLIGHT_MODULATION_FREQ_MASK) >>
+-			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+-
+-	return retval;
+-}
+-
+-/*
+- * Set LVDS backlight level by I2C command
+- */
+-static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
+-					unsigned int level)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+-	u8 out_buf[2];
+-	unsigned int blc_i2c_brightness;
+-
+-	struct i2c_msg msgs[] = {
+-		{
+-			.addr = lvds_i2c_bus->slave_addr,
+-			.flags = 0,
+-			.len = 2,
+-			.buf = out_buf,
+-		}
+-	};
+-
+-	blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+-			     BRIGHTNESS_MASK /
+-			     BRIGHTNESS_MAX_LEVEL);
+-
+-	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+-		blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+-
+-	out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+-	out_buf[1] = (u8)blc_i2c_brightness;
+-
+-	if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
+-		return 0;
+-
+-	DRM_ERROR("I2C transfer error\n");
+-	return -1;
+-}
+-
+-
+-static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	u32 max_pwm_blc;
+-	u32 blc_pwm_duty_cycle;
+-
+-	max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
+-
+-	/*BLC_PWM_CTL Should be initiated while backlight device init*/
+-	BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
+-
+-	blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+-
+-	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+-		blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+-
+-	blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+-	REG_WRITE(BLC_PWM_CTL,
+-		  (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+-		  (blc_pwm_duty_cycle));
+-
+-	return 0;
+-}
+-
+-/*
+- * Set LVDS backlight level either by I2C or PWM
+- */
+-void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (!dev_priv->lvds_bl) {
+-		DRM_ERROR("NO LVDS Backlight Info\n");
+-		return;
+-	}
+-
+-	if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+-		cdv_lvds_i2c_set_brightness(dev, level);
+-	else
+-		cdv_lvds_pwm_set_brightness(dev, level);
+-}
+-
+-/**
+- * Sets the backlight level.
+- *
+- * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
+- */
+-static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 blc_pwm_ctl;
+-
+-	if (gma_power_begin(dev, false)) {
+-		blc_pwm_ctl =
+-			REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+-		REG_WRITE(BLC_PWM_CTL,
+-				(blc_pwm_ctl |
+-				(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+-		gma_power_end(dev);
+-	} else {
+-		blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+-				~BACKLIGHT_DUTY_CYCLE_MASK;
+-		dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+-					(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+-	}
+-}
+-
+-/**
+- * Sets the power state for the panel.
+- */
+-static void cdv_intel_lvds_set_power(struct drm_device *dev,
+-				 struct psb_intel_output *output, bool on)
+-{
+-	u32 pp_status;
+-
+-	if (!gma_power_begin(dev, true))
+-		return;
+-
+-	if (on) {
+-		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+-			  POWER_TARGET_ON);
+-		do {
+-			pp_status = REG_READ(PP_STATUS);
+-		} while ((pp_status & PP_ON) == 0);
+-
+-		cdv_intel_lvds_set_backlight(dev,
+-					 output->
+-					 mode_dev->backlight_duty_cycle);
+-	} else {
+-		cdv_intel_lvds_set_backlight(dev, 0);
+-
+-		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+-			  ~POWER_TARGET_ON);
+-		do {
+-			pp_status = REG_READ(PP_STATUS);
+-		} while (pp_status & PP_ON);
+-	}
+-	gma_power_end(dev);
+-}
+-
+-static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-	if (mode == DRM_MODE_DPMS_ON)
+-		cdv_intel_lvds_set_power(dev, output, true);
+-	else
+-		cdv_intel_lvds_set_power(dev, output, false);
+-	/* XXX: We never power down the LVDS pairs. */
+-}
+-
+-static void cdv_intel_lvds_save(struct drm_connector *connector)
+-{
+-}
+-
+-static void cdv_intel_lvds_restore(struct drm_connector *connector)
+-{
+-}
+-
+-int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
+-				 struct drm_display_mode *mode)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-				to_psb_intel_output(connector);
+-	struct drm_display_mode *fixed_mode =
+-	    psb_intel_output->mode_dev->panel_fixed_mode;
+-
+-	/* just in case */
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+-		return MODE_NO_DBLESCAN;
+-
+-	/* just in case */
+-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+-		return MODE_NO_INTERLACE;
+-
+-	if (fixed_mode) {
+-		if (mode->hdisplay > fixed_mode->hdisplay)
+-			return MODE_PANEL;
+-		if (mode->vdisplay > fixed_mode->vdisplay)
+-			return MODE_PANEL;
+-	}
+-	return MODE_OK;
+-}
+-
+-bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+-				  struct drm_display_mode *mode,
+-				  struct drm_display_mode *adjusted_mode)
+-{
+-	struct psb_intel_mode_device *mode_dev =
+-	    enc_to_psb_intel_output(encoder)->mode_dev;
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_encoder *tmp_encoder;
+-	struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+-
+-	/* Should never happen!! */
+-	list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+-			    head) {
+-		if (tmp_encoder != encoder
+-		    && tmp_encoder->crtc == encoder->crtc) {
+-			printk(KERN_ERR "Can't enable LVDS and another "
+-			       "encoder on the same pipe\n");
+-			return false;
+-		}
+-	}
+-
+-	/*
+-	 * If we have timings from the BIOS for the panel, put them in
+-	 * to the adjusted mode.  The CRTC will be set up for this mode,
+-	 * with the panel scaling set up to source from the H/VDisplay
+-	 * of the original mode.
+-	 */
+-	if (panel_fixed_mode != NULL) {
+-		adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+-		adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+-		adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+-		adjusted_mode->htotal = panel_fixed_mode->htotal;
+-		adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+-		adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+-		adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+-		adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+-		adjusted_mode->clock = panel_fixed_mode->clock;
+-		drm_mode_set_crtcinfo(adjusted_mode,
+-				      CRTC_INTERLACE_HALVE_V);
+-	}
+-
+-	/*
+-	 * XXX: It would be nice to support lower refresh rates on the
+-	 * panels to reduce power consumption, and perhaps match the
+-	 * user's requested refresh rate.
+-	 */
+-
+-	return true;
+-}
+-
+-static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-	struct psb_intel_mode_device *mode_dev = output->mode_dev;
+-
+-	if (!gma_power_begin(dev, true))
+-		return;
+-
+-	mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+-	mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+-					  BACKLIGHT_DUTY_CYCLE_MASK);
+-
+-	cdv_intel_lvds_set_power(dev, output, false);
+-
+-	gma_power_end(dev);
+-}
+-
+-static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-	struct psb_intel_mode_device *mode_dev = output->mode_dev;
+-
+-	if (mode_dev->backlight_duty_cycle == 0)
+-		mode_dev->backlight_duty_cycle =
+-		    cdv_intel_lvds_get_max_backlight(dev);
+-
+-	cdv_intel_lvds_set_power(dev, output, true);
+-}
+-
+-static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
+-				struct drm_display_mode *mode,
+-				struct drm_display_mode *adjusted_mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 pfit_control;
+-
+-	/*
+-	 * The LVDS pin pair will already have been turned on in the
+-	 * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
+-	 * settings.
+-	 */
+-
+-	/*
+-	 * Enable automatic panel scaling so that non-native modes fill the
+-	 * screen.  Should be enabled before the pipe is enabled, according to
+-	 * register description and PRM.
+-	 */
+-	if (mode->hdisplay != adjusted_mode->hdisplay ||
+-	    mode->vdisplay != adjusted_mode->vdisplay)
+-		pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+-				HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+-				HORIZ_INTERP_BILINEAR);
+-	else
+-		pfit_control = 0;
+-
+-	if (dev_priv->lvds_dither)
+-		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+-
+-	REG_WRITE(PFIT_CONTROL, pfit_control);
+-}
+-
+-/**
+- * Detect the LVDS connection.
+- *
+- * This always returns CONNECTOR_STATUS_CONNECTED.
+- * This connector should only have
+- * been set up if the LVDS was actually connected anyway.
+- */
+-static enum drm_connector_status cdv_intel_lvds_detect(
+-				struct drm_connector *connector, bool force)
+-{
+-	return connector_status_connected;
+-}
+-
+-/**
+- * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+- */
+-static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
+-{
+-	struct drm_device *dev = connector->dev;
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-	struct psb_intel_mode_device *mode_dev =
+-					psb_intel_output->mode_dev;
+-	int ret;
+-
+-	ret = psb_intel_ddc_get_modes(psb_intel_output);
+-
+-	if (ret)
+-		return ret;
+-
+-	/* Didn't get an EDID, so
+-	 * Set wide sync ranges so we get all modes
+-	 * handed to valid_mode for checking
+-	 */
+-	connector->display_info.min_vfreq = 0;
+-	connector->display_info.max_vfreq = 200;
+-	connector->display_info.min_hfreq = 0;
+-	connector->display_info.max_hfreq = 200;
+-	if (mode_dev->panel_fixed_mode != NULL) {
+-		struct drm_display_mode *mode =
+-		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+-		drm_mode_probed_add(connector, mode);
+-		return 1;
+-	}
+-
+-	return 0;
+-}
+-
+-/**
+- * cdv_intel_lvds_destroy - unregister and free LVDS structures
+- * @connector: connector to free
+- *
+- * Unregister the DDC bus for this connector then free the driver private
+- * structure.
+- */
+-void cdv_intel_lvds_destroy(struct drm_connector *connector)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-
+-	if (psb_intel_output->ddc_bus)
+-		psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+-	drm_sysfs_connector_remove(connector);
+-	drm_connector_cleanup(connector);
+-	kfree(connector);
+-}
+-
+-int cdv_intel_lvds_set_property(struct drm_connector *connector,
+-				       struct drm_property *property,
+-				       uint64_t value)
+-{
+-	struct drm_encoder *encoder = connector->encoder;
+-
+-	if (!strcmp(property->name, "scaling mode") && encoder) {
+-		struct psb_intel_crtc *crtc =
+-					to_psb_intel_crtc(encoder->crtc);
+-		uint64_t curValue;
+-
+-		if (!crtc)
+-			return -1;
+-
+-		switch (value) {
+-		case DRM_MODE_SCALE_FULLSCREEN:
+-			break;
+-		case DRM_MODE_SCALE_NO_SCALE:
+-			break;
+-		case DRM_MODE_SCALE_ASPECT:
+-			break;
+-		default:
+-			return -1;
+-		}
+-
+-		if (drm_connector_property_get_value(connector,
+-						     property,
+-						     &curValue))
+-			return -1;
+-
+-		if (curValue == value)
+-			return 0;
+-
+-		if (drm_connector_property_set_value(connector,
+-							property,
+-							value))
+-			return -1;
+-
+-		if (crtc->saved_mode.hdisplay != 0 &&
+-		    crtc->saved_mode.vdisplay != 0) {
+-			if (!drm_crtc_helper_set_mode(encoder->crtc,
+-						      &crtc->saved_mode,
+-						      encoder->crtc->x,
+-						      encoder->crtc->y,
+-						      encoder->crtc->fb))
+-				return -1;
+-		}
+-	} else if (!strcmp(property->name, "backlight") && encoder) {
+-		if (drm_connector_property_set_value(connector,
+-							property,
+-							value))
+-			return -1;
+-		else {
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-			struct drm_psb_private *dev_priv =
+-						encoder->dev->dev_private;
+-			struct backlight_device *bd =
+-						dev_priv->backlight_device;
+-			bd->props.brightness = value;
+-			backlight_update_status(bd);
+-#endif
+-		}
+-	} else if (!strcmp(property->name, "DPMS") && encoder) {
+-		struct drm_encoder_helper_funcs *helpers =
+-					encoder->helper_private;
+-		helpers->dpms(encoder, value);
+-	}
+-	return 0;
+-}
+-
+-static const struct drm_encoder_helper_funcs
+-					cdv_intel_lvds_helper_funcs = {
+-	.dpms = cdv_intel_lvds_encoder_dpms,
+-	.mode_fixup = cdv_intel_lvds_mode_fixup,
+-	.prepare = cdv_intel_lvds_prepare,
+-	.mode_set = cdv_intel_lvds_mode_set,
+-	.commit = cdv_intel_lvds_commit,
+-};
+-
+-static const struct drm_connector_helper_funcs
+-				cdv_intel_lvds_connector_helper_funcs = {
+-	.get_modes = cdv_intel_lvds_get_modes,
+-	.mode_valid = cdv_intel_lvds_mode_valid,
+-	.best_encoder = psb_intel_best_encoder,
+-};
+-
+-static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
+-	.dpms = drm_helper_connector_dpms,
+-	.save = cdv_intel_lvds_save,
+-	.restore = cdv_intel_lvds_restore,
+-	.detect = cdv_intel_lvds_detect,
+-	.fill_modes = drm_helper_probe_single_connector_modes,
+-	.set_property = cdv_intel_lvds_set_property,
+-	.destroy = cdv_intel_lvds_destroy,
+-};
+-
+-
+-static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+-{
+-	drm_encoder_cleanup(encoder);
+-}
+-
+-const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
+-	.destroy = cdv_intel_lvds_enc_destroy,
+-};
+-
+-/**
+- * cdv_intel_lvds_init - setup LVDS connectors on this device
+- * @dev: drm device
+- *
+- * Create the connector, register the LVDS DDC bus, and try to figure out what
+- * modes we can display on the LVDS panel (if present).
+- */
+-void cdv_intel_lvds_init(struct drm_device *dev,
+-		     struct psb_intel_mode_device *mode_dev)
+-{
+-	struct psb_intel_output *psb_intel_output;
+-	struct cdv_intel_lvds_priv *lvds_priv;
+-	struct drm_connector *connector;
+-	struct drm_encoder *encoder;
+-	struct drm_display_mode *scan;
+-	struct drm_crtc *crtc;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 lvds;
+-	int pipe;
+-
+-	psb_intel_output = kzalloc(sizeof(struct psb_intel_output) +
+-			sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
+-	if (!psb_intel_output)
+-		return;
+-
+-	lvds_priv = (struct cdv_intel_lvds_priv *)(psb_intel_output + 1);
+-
+-	psb_intel_output->dev_priv = lvds_priv;
+-
+-	psb_intel_output->mode_dev = mode_dev;
+-	connector = &psb_intel_output->base;
+-	encoder = &psb_intel_output->enc;
+-
+-
+-	drm_connector_init(dev, &psb_intel_output->base,
+-			   &cdv_intel_lvds_connector_funcs,
+-			   DRM_MODE_CONNECTOR_LVDS);
+-
+-	drm_encoder_init(dev, &psb_intel_output->enc,
+-			 &cdv_intel_lvds_enc_funcs,
+-			 DRM_MODE_ENCODER_LVDS);
+-
+-
+-	drm_mode_connector_attach_encoder(&psb_intel_output->base,
+-					  &psb_intel_output->enc);
+-	psb_intel_output->type = INTEL_OUTPUT_LVDS;
+-
+-	drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
+-	drm_connector_helper_add(connector,
+-				 &cdv_intel_lvds_connector_helper_funcs);
+-	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+-	connector->interlace_allowed = false;
+-	connector->doublescan_allowed = false;
+-
+-	/*Attach connector properties*/
+-	drm_connector_attach_property(connector,
+-				      dev->mode_config.scaling_mode_property,
+-				      DRM_MODE_SCALE_FULLSCREEN);
+-	drm_connector_attach_property(connector,
+-				      dev_priv->backlight_property,
+-				      BRIGHTNESS_MAX_LEVEL);
+-
+-	/**
+-	 * Set up I2C bus
+-	 * FIXME: distroy i2c_bus when exit
+-	 */
+-	psb_intel_output->i2c_bus = psb_intel_i2c_create(dev,
+-							 GPIOB,
+-							 "LVDSBLC_B");
+-	if (!psb_intel_output->i2c_bus) {
+-		dev_printk(KERN_ERR,
+-			&dev->pdev->dev, "I2C bus registration failed.\n");
+-		goto failed_blc_i2c;
+-	}
+-	psb_intel_output->i2c_bus->slave_addr = 0x2C;
+-	dev_priv->lvds_i2c_bus =  psb_intel_output->i2c_bus;
+-
+-	/*
+-	 * LVDS discovery:
+-	 * 1) check for EDID on DDC
+-	 * 2) check for VBT data
+-	 * 3) check to see if LVDS is already on
+-	 *    if none of the above, no panel
+-	 * 4) make sure lid is open
+-	 *    if closed, act like it's not there for now
+-	 */
+-
+-	/* Set up the DDC bus. */
+-	psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
+-							 GPIOC,
+-							 "LVDSDDC_C");
+-	if (!psb_intel_output->ddc_bus) {
+-		dev_printk(KERN_ERR, &dev->pdev->dev,
+-			   "DDC bus registration " "failed.\n");
+-		goto failed_ddc;
+-	}
+-
+-	/*
+-	 * Attempt to get the fixed panel mode from DDC.  Assume that the
+-	 * preferred mode is the right one.
+-	 */
+-	psb_intel_ddc_get_modes(psb_intel_output);
+-	list_for_each_entry(scan, &connector->probed_modes, head) {
+-		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+-			mode_dev->panel_fixed_mode =
+-			    drm_mode_duplicate(dev, scan);
+-			goto out;	/* FIXME: check for quirks */
+-		}
+-	}
+-
+-	/* Failed to get EDID, what about VBT? do we need this?*/
+-	if (dev_priv->lfp_lvds_vbt_mode) {
+-		mode_dev->panel_fixed_mode =
+-			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
+-		if (mode_dev->panel_fixed_mode) {
+-			mode_dev->panel_fixed_mode->type |=
+-				DRM_MODE_TYPE_PREFERRED;
+-			goto out;	/* FIXME: check for quirks */
+-		}
+-	}
+-	/*
+-	 * If we didn't get EDID, try checking if the panel is already turned
+-	 * on.	If so, assume that whatever is currently programmed is the
+-	 * correct mode.
+-	 */
+-	lvds = REG_READ(LVDS);
+-	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+-	crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+-
+-	if (crtc && (lvds & LVDS_PORT_EN)) {
+-		mode_dev->panel_fixed_mode =
+-		    cdv_intel_crtc_mode_get(dev, crtc);
+-		if (mode_dev->panel_fixed_mode) {
+-			mode_dev->panel_fixed_mode->type |=
+-			    DRM_MODE_TYPE_PREFERRED;
+-			goto out;	/* FIXME: check for quirks */
+-		}
+-	}
+-
+-	/* If we still don't have a mode after all that, give up. */
+-	if (!mode_dev->panel_fixed_mode) {
+-		DRM_DEBUG
+-			("Found no modes on the lvds, ignoring the LVDS\n");
+-		goto failed_find;
+-	}
+-
+-out:
+-	drm_sysfs_connector_add(connector);
+-	return;
+-
+-failed_find:
+-	printk(KERN_ERR "Failed find\n");
+-	if (psb_intel_output->ddc_bus)
+-		psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+-failed_ddc:
+-	printk(KERN_ERR "Failed DDC\n");
+-	if (psb_intel_output->i2c_bus)
+-		psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
+-failed_blc_i2c:
+-	printk(KERN_ERR "Failed BLC\n");
+-	drm_encoder_cleanup(encoder);
+-	drm_connector_cleanup(connector);
+-	kfree(connector);
+-}
+diff --git a/drivers/staging/gma500/displays/hdmi.h b/drivers/staging/gma500/displays/hdmi.h
+deleted file mode 100644
+index d58ba9b..0000000
+--- a/drivers/staging/gma500/displays/hdmi.h
++++ /dev/null
+@@ -1,33 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+- */
+-
+-#ifndef HDMI_H
+-#define HDMI_H
+-
+-extern void hdmi_init(struct drm_device *dev);
+-
+-#endif
+diff --git a/drivers/staging/gma500/displays/pyr_cmd.h b/drivers/staging/gma500/displays/pyr_cmd.h
+deleted file mode 100644
+index 84bae5c..0000000
+--- a/drivers/staging/gma500/displays/pyr_cmd.h
++++ /dev/null
+@@ -1,34 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+- */
+-
+-#ifndef PYR_CMD_H
+-#define PYR_CMD_H
+-
+-extern void pyr_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+-
+-#endif
+-
+diff --git a/drivers/staging/gma500/displays/pyr_vid.h b/drivers/staging/gma500/displays/pyr_vid.h
+deleted file mode 100644
+index ce98860..0000000
+--- a/drivers/staging/gma500/displays/pyr_vid.h
++++ /dev/null
+@@ -1,34 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+-*/
+-
+-#ifndef PYR_VID_H
+-#define PYR_VID_H
+-
+-extern void pyr_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+-extern struct drm_display_mode *pyr_vid_get_config_mode(struct drm_device* dev);
+-
+-#endif
+diff --git a/drivers/staging/gma500/displays/tmd_cmd.h b/drivers/staging/gma500/displays/tmd_cmd.h
+deleted file mode 100644
+index 641e85e..0000000
+--- a/drivers/staging/gma500/displays/tmd_cmd.h
++++ /dev/null
+@@ -1,34 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+- */
+-
+-#ifndef TMD_CMD_H
+-#define TMD_CMD_H
+-
+-extern void tmd_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+-extern struct drm_display_mode *tmd_cmd_get_config_mode(struct drm_device *dev);
+-
+-#endif
+diff --git a/drivers/staging/gma500/displays/tmd_vid.h b/drivers/staging/gma500/displays/tmd_vid.h
+deleted file mode 100644
+index 7a5fa3b..0000000
+--- a/drivers/staging/gma500/displays/tmd_vid.h
++++ /dev/null
+@@ -1,34 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+-*/
+-
+-#ifndef TMD_VID_H
+-#define TMD_VID_H
+-
+-extern void tmd_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+-extern struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev);
+-
+-#endif
+diff --git a/drivers/staging/gma500/displays/tpo_cmd.h b/drivers/staging/gma500/displays/tpo_cmd.h
+deleted file mode 100644
+index 6105527..0000000
+--- a/drivers/staging/gma500/displays/tpo_cmd.h
++++ /dev/null
+@@ -1,35 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+-*/
+-
+-#ifndef TPO_CMD_H
+-#define TPO_CMD_H
+-
+-extern void tpo_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+-/* extern struct drm_display_mode * */
+-/* tpo_cmd_get_config_mode(struct drm_device *dev); */
+-
+-#endif
+diff --git a/drivers/staging/gma500/displays/tpo_vid.h b/drivers/staging/gma500/displays/tpo_vid.h
+deleted file mode 100644
+index c24f057..0000000
+--- a/drivers/staging/gma500/displays/tpo_vid.h
++++ /dev/null
+@@ -1,33 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+- */
+-
+-#ifndef TPO_VID_H
+-#define TPO_VID_H
+-
+-extern void tpo_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
+-
+-#endif
+diff --git a/drivers/staging/gma500/framebuffer.c b/drivers/staging/gma500/framebuffer.c
+deleted file mode 100644
+index 3f39a37..0000000
+--- a/drivers/staging/gma500/framebuffer.c
++++ /dev/null
+@@ -1,849 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2007-2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-#include <linux/module.h>
+-#include <linux/kernel.h>
+-#include <linux/errno.h>
+-#include <linux/string.h>
+-#include <linux/mm.h>
+-#include <linux/tty.h>
+-#include <linux/slab.h>
+-#include <linux/delay.h>
+-#include <linux/fb.h>
+-#include <linux/init.h>
+-#include <linux/console.h>
+-
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include <drm/drm_crtc.h>
+-
+-#include "psb_drv.h"
+-#include "psb_intel_reg.h"
+-#include "psb_intel_drv.h"
+-#include "framebuffer.h"
+-#include "gtt.h"
+-
+-#include "mdfld_output.h"
+-
+-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
+-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+-					      struct drm_file *file_priv,
+-					      unsigned int *handle);
+-
+-static const struct drm_framebuffer_funcs psb_fb_funcs = {
+-	.destroy = psb_user_framebuffer_destroy,
+-	.create_handle = psb_user_framebuffer_create_handle,
+-};
+-
+-#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
+-
+-static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
+-			   unsigned blue, unsigned transp,
+-			   struct fb_info *info)
+-{
+-	struct psb_fbdev *fbdev = info->par;
+-	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
+-	uint32_t v;
+-
+-	if (!fb)
+-		return -ENOMEM;
+-
+-	if (regno > 255)
+-		return 1;
+-
+-	red = CMAP_TOHW(red, info->var.red.length);
+-	blue = CMAP_TOHW(blue, info->var.blue.length);
+-	green = CMAP_TOHW(green, info->var.green.length);
+-	transp = CMAP_TOHW(transp, info->var.transp.length);
+-
+-	v = (red << info->var.red.offset) |
+-	    (green << info->var.green.offset) |
+-	    (blue << info->var.blue.offset) |
+-	    (transp << info->var.transp.offset);
+-
+-	if (regno < 16) {
+-		switch (fb->bits_per_pixel) {
+-		case 16:
+-			((uint32_t *) info->pseudo_palette)[regno] = v;
+-			break;
+-		case 24:
+-		case 32:
+-			((uint32_t *) info->pseudo_palette)[regno] = v;
+-			break;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
+-{
+-	struct psb_fbdev *fbdev = info->par;
+-	struct psb_framebuffer *psbfb = &fbdev->pfb;
+-	struct drm_device *dev = psbfb->base.dev;
+-
+-	/*
+-	 *	We have to poke our nose in here. The core fb code assumes
+-	 *	panning is part of the hardware that can be invoked before
+-	 *	the actual fb is mapped. In our case that isn't quite true.
+-	 */
+-	if (psbfb->gtt->npage)
+-        	psb_gtt_roll(dev, psbfb->gtt, var->yoffset);
+-	return 0;
+-}
+-
+-void psbfb_suspend(struct drm_device *dev)
+-{
+-	struct drm_framebuffer *fb = 0;
+-	struct psb_framebuffer *psbfb = to_psb_fb(fb);
+-
+-	console_lock();
+-	mutex_lock(&dev->mode_config.mutex);
+-	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+-		struct fb_info *info = psbfb->fbdev;
+-		fb_set_suspend(info, 1);
+-		drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
+-	}
+-	mutex_unlock(&dev->mode_config.mutex);
+-	console_unlock();
+-}
+-
+-void psbfb_resume(struct drm_device *dev)
+-{
+-	struct drm_framebuffer *fb = 0;
+-	struct psb_framebuffer *psbfb = to_psb_fb(fb);
+-
+-	console_lock();
+-	mutex_lock(&dev->mode_config.mutex);
+-	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+-		struct fb_info *info = psbfb->fbdev;
+-		fb_set_suspend(info, 0);
+-		drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
+-	}
+-	mutex_unlock(&dev->mode_config.mutex);
+-	console_unlock();
+-	drm_helper_disable_unused_functions(dev);
+-}
+-
+-static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+-{
+-	struct psb_framebuffer *psbfb = vma->vm_private_data;
+-	struct drm_device *dev = psbfb->base.dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int page_num;
+-	int i;
+-	unsigned long address;
+-	int ret;
+-	unsigned long pfn;
+-	/* FIXME: assumes fb at stolen base which may not be true */
+-	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
+-
+-	page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+-	address = (unsigned long)vmf->virtual_address;
+-
+-	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-
+-	for (i = 0; i < page_num; i++) {
+-		pfn = (phys_addr >> PAGE_SHIFT);
+-
+-		ret = vm_insert_mixed(vma, address, pfn);
+-		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+-			break;
+-		else if (unlikely(ret != 0)) {
+-			ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+-			return ret;
+-		}
+-		address += PAGE_SIZE;
+-		phys_addr += PAGE_SIZE;
+-	}
+-	return VM_FAULT_NOPAGE;
+-}
+-
+-static void psbfb_vm_open(struct vm_area_struct *vma)
+-{
+-}
+-
+-static void psbfb_vm_close(struct vm_area_struct *vma)
+-{
+-}
+-
+-static struct vm_operations_struct psbfb_vm_ops = {
+-	.fault	= psbfb_vm_fault,
+-	.open	= psbfb_vm_open,
+-	.close	= psbfb_vm_close
+-};
+-
+-static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+-{
+-	struct psb_fbdev *fbdev = info->par;
+-	struct psb_framebuffer *psbfb = &fbdev->pfb;
+-
+-	if (vma->vm_pgoff != 0)
+-		return -EINVAL;
+-	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+-		return -EINVAL;
+-
+-	if (!psbfb->addr_space)
+-		psbfb->addr_space = vma->vm_file->f_mapping;
+-	/*
+-	 * If this is a GEM object then info->screen_base is the virtual
+-	 * kernel remapping of the object. FIXME: Review if this is
+-	 * suitable for our mmap work
+-	 */
+-	vma->vm_ops = &psbfb_vm_ops;
+-	vma->vm_private_data = (void *)psbfb;
+-	vma->vm_flags |= VM_RESERVED | VM_IO |
+-					VM_MIXEDMAP | VM_DONTEXPAND;
+-	return 0;
+-}
+-
+-static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
+-						unsigned long arg)
+-{
+-	return -ENOTTY;
+-}
+-
+-static struct fb_ops psbfb_ops = {
+-	.owner = THIS_MODULE,
+-	.fb_check_var = drm_fb_helper_check_var,
+-	.fb_set_par = drm_fb_helper_set_par,
+-	.fb_blank = drm_fb_helper_blank,
+-	.fb_setcolreg = psbfb_setcolreg,
+-	.fb_fillrect = cfb_fillrect,
+-	.fb_copyarea = psbfb_copyarea,
+-	.fb_imageblit = cfb_imageblit,
+-	.fb_mmap = psbfb_mmap,
+-	.fb_sync = psbfb_sync,
+-	.fb_ioctl = psbfb_ioctl,
+-};
+-
+-static struct fb_ops psbfb_roll_ops = {
+-	.owner = THIS_MODULE,
+-	.fb_check_var = drm_fb_helper_check_var,
+-	.fb_set_par = drm_fb_helper_set_par,
+-	.fb_blank = drm_fb_helper_blank,
+-	.fb_setcolreg = psbfb_setcolreg,
+-	.fb_fillrect = cfb_fillrect,
+-	.fb_copyarea = cfb_copyarea,
+-	.fb_imageblit = cfb_imageblit,
+-	.fb_pan_display = psbfb_pan,
+-	.fb_mmap = psbfb_mmap,
+-	.fb_sync = psbfb_sync,
+-	.fb_ioctl = psbfb_ioctl,
+-};
+-
+-static struct fb_ops psbfb_unaccel_ops = {
+-	.owner = THIS_MODULE,
+-	.fb_check_var = drm_fb_helper_check_var,
+-	.fb_set_par = drm_fb_helper_set_par,
+-	.fb_blank = drm_fb_helper_blank,
+-	.fb_setcolreg = psbfb_setcolreg,
+-	.fb_fillrect = cfb_fillrect,
+-	.fb_copyarea = cfb_copyarea,
+-	.fb_imageblit = cfb_imageblit,
+-	.fb_mmap = psbfb_mmap,
+-	.fb_ioctl = psbfb_ioctl,
+-};
+-
+-/**
+- *	psb_framebuffer_init	-	initialize a framebuffer
+- *	@dev: our DRM device
+- *	@fb: framebuffer to set up
+- *	@mode_cmd: mode description
+- *	@gt: backing object
+- *
+- *	Configure and fill in the boilerplate for our frame buffer. Return
+- *	0 on success or an error code if we fail.
+- */
+-static int psb_framebuffer_init(struct drm_device *dev,
+-					struct psb_framebuffer *fb,
+-					struct drm_mode_fb_cmd *mode_cmd,
+-					struct gtt_range *gt)
+-{
+-	int ret;
+-
+-	if (mode_cmd->pitch & 63)
+-		return -EINVAL;
+-	switch (mode_cmd->bpp) {
+-	case 8:
+-	case 16:
+-	case 24:
+-	case 32:
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-	ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
+-	if (ret) {
+-		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+-		return ret;
+-	}
+-	drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
+-	fb->gtt = gt;
+-	return 0;
+-}
+-
+-/**
+- *	psb_framebuffer_create	-	create a framebuffer backed by gt
+- *	@dev: our DRM device
+- *	@mode_cmd: the description of the requested mode
+- *	@gt: the backing object
+- *
+- *	Create a framebuffer object backed by the gt, and fill in the
+- *	boilerplate required
+- *
+- *	TODO: review object references
+- */
+-
+-static struct drm_framebuffer *psb_framebuffer_create
+-			(struct drm_device *dev,
+-			 struct drm_mode_fb_cmd *mode_cmd,
+-			 struct gtt_range *gt)
+-{
+-	struct psb_framebuffer *fb;
+-	int ret;
+-
+-	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+-	if (!fb)
+-		return ERR_PTR(-ENOMEM);
+-
+-	ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
+-	if (ret) {
+-		kfree(fb);
+-		return ERR_PTR(ret);
+-	}
+-	return &fb->base;
+-}
+-
+-/**
+- *	psbfb_alloc		-	allocate frame buffer memory
+- *	@dev: the DRM device
+- *	@aligned_size: space needed
+- *	@force: fall back to GEM buffers if need be
+- *
+- *	Allocate the frame buffer. In the usual case we get a GTT range that
+- *	is stolen memory backed and life is simple. If there isn't sufficient
+- *	stolen memory or the system has no stolen memory we allocate a range
+- *	and back it with a GEM object.
+- *
+- *	In this case the GEM object has no handle.
+- */
+-static struct gtt_range *psbfb_alloc(struct drm_device *dev,
+-						int aligned_size, int force)
+-{
+-	struct gtt_range *backing;
+-	/* Begin by trying to use stolen memory backing */
+-	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
+-	if (backing) {
+-		if (drm_gem_private_object_init(dev,
+-					&backing->gem, aligned_size) == 0)
+-			return backing;
+-		psb_gtt_free_range(dev, backing);
+-	}
+-	if (!force)
+-		return NULL;
+-
+-	/* Next try using GEM host memory */
+-	backing = psb_gtt_alloc_range(dev, aligned_size, "fb(gem)", 0);
+-	if (backing == NULL)
+-		return NULL;
+-
+-	/* Now back it with an object */
+-	if (drm_gem_object_init(dev, &backing->gem, aligned_size) != 0) {
+-		psb_gtt_free_range(dev, backing);
+-		return NULL;
+-	}
+-	return backing;
+-}
+-
+-/**
+- *	psbfb_create		-	create a framebuffer
+- *	@fbdev: the framebuffer device
+- *	@sizes: specification of the layout
+- *
+- *	Create a framebuffer to the specifications provided
+- */
+-static int psbfb_create(struct psb_fbdev *fbdev,
+-				struct drm_fb_helper_surface_size *sizes)
+-{
+-	struct drm_device *dev = fbdev->psb_fb_helper.dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct fb_info *info;
+-	struct drm_framebuffer *fb;
+-	struct psb_framebuffer *psbfb = &fbdev->pfb;
+-	struct drm_mode_fb_cmd mode_cmd;
+-	struct device *device = &dev->pdev->dev;
+-	int size;
+-	int ret;
+-	struct gtt_range *backing;
+-	int gtt_roll = 1;
+-
+-	mode_cmd.width = sizes->surface_width;
+-	mode_cmd.height = sizes->surface_height;
+-	mode_cmd.bpp = sizes->surface_bpp;
+-
+-	/* No 24bit packed */
+-	if (mode_cmd.bpp == 24)
+-		mode_cmd.bpp = 32;
+-
+-	/* Acceleration via the GTT requires pitch to be 4096 byte aligned 
+-	   (ie 1024 or 2048 pixels in normal use) */
+-	mode_cmd.pitch =  ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 4096);
+-	mode_cmd.depth = sizes->surface_depth;
+-
+-	size = mode_cmd.pitch * mode_cmd.height;
+-	size = ALIGN(size, PAGE_SIZE);
+-
+-	/* Allocate the framebuffer in the GTT with stolen page backing */
+-	backing = psbfb_alloc(dev, size, 0);
+-	if (backing == NULL) {
+-		/*
+-		 *	We couldn't get the space we wanted, fall back to the
+-		 *	display engine requirement instead.  The HW requires
+-		 *	the pitch to be 64 byte aligned
+-		 */
+-
+-		gtt_roll = 0;	/* Don't use GTT accelerated scrolling */
+-
+-		mode_cmd.pitch =  ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
+-		mode_cmd.depth = sizes->surface_depth;
+-
+-		size = mode_cmd.pitch * mode_cmd.height;
+-		size = ALIGN(size, PAGE_SIZE);
+-
+-		/* Allocate the framebuffer in the GTT with stolen page
+-		   backing when there is room */
+-		backing = psbfb_alloc(dev, size, 1);
+-		if (backing == NULL)
+-			return -ENOMEM;
+-	}
+-
+-	mutex_lock(&dev->struct_mutex);
+-
+-	info = framebuffer_alloc(0, device);
+-	if (!info) {
+-		ret = -ENOMEM;
+-		goto out_err1;
+-	}
+-	info->par = fbdev;
+-
+-	ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
+-	if (ret)
+-		goto out_unref;
+-
+-	fb = &psbfb->base;
+-	psbfb->fbdev = info;
+-
+-	fbdev->psb_fb_helper.fb = fb;
+-	fbdev->psb_fb_helper.fbdev = info;
+-
+-	strcpy(info->fix.id, "psbfb");
+-
+-	info->flags = FBINFO_DEFAULT;
+-	if (gtt_roll) {	/* GTT rolling seems best */
+-		info->fbops = &psbfb_roll_ops;
+-		info->flags |= FBINFO_HWACCEL_YPAN;
+-        }
+-	else if (dev_priv->ops->accel_2d)	/* 2D engine */
+-		info->fbops = &psbfb_ops;
+-	else	/* Software */
+-		info->fbops = &psbfb_unaccel_ops;
+-
+-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+-	if (ret) {
+-		ret = -ENOMEM;
+-		goto out_unref;
+-	}
+-
+-	info->fix.smem_start = dev->mode_config.fb_base;
+-	info->fix.smem_len = size;
+-	info->fix.ywrapstep = gtt_roll;
+-	info->fix.ypanstep = gtt_roll;
+-
+-	if (backing->stolen) {
+-		/* Accessed stolen memory directly */
+-		info->screen_base = (char *)dev_priv->vram_addr +
+-							backing->offset;
+-	} else {
+-		/* Pin the pages into the GTT and create a mapping to them */
+-		psb_gtt_pin(backing);
+-		info->screen_base = vm_map_ram(backing->pages, backing->npage,
+-				-1, PAGE_KERNEL);
+-		if (info->screen_base == NULL) {
+-			psb_gtt_unpin(backing);
+-			ret = -ENOMEM;
+-			goto out_unref;
+-		}
+-		psbfb->vm_map = 1;
+-	}
+-	info->screen_size = size;
+-
+-	if (dev_priv->gtt.stolen_size) {
+-		info->apertures = alloc_apertures(1);
+-		if (!info->apertures) {
+-			ret = -ENOMEM;
+-			goto out_unref;
+-		}
+-		info->apertures->ranges[0].base = dev->mode_config.fb_base;
+-		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
+-	}
+-
+-	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+-	drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
+-				sizes->fb_width, sizes->fb_height);
+-
+-	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
+-	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
+-
+-	info->pixmap.size = 64 * 1024;
+-	info->pixmap.buf_align = 8;
+-	info->pixmap.access_align = 32;
+-	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+-	info->pixmap.scan_align = 1;
+-
+-	dev_info(dev->dev, "allocated %dx%d fb\n",
+-					psbfb->base.width, psbfb->base.height);
+-
+-	mutex_unlock(&dev->struct_mutex);
+-	return 0;
+-out_unref:
+-	if (backing->stolen)
+-		psb_gtt_free_range(dev, backing);
+-	else {
+-		if (psbfb->vm_map)
+-			vm_unmap_ram(info->screen_base, backing->npage);
+-		drm_gem_object_unreference(&backing->gem);
+-	}
+-out_err1:
+-	mutex_unlock(&dev->struct_mutex);
+-	psb_gtt_free_range(dev, backing);
+-	return ret;
+-}
+-
+-/**
+- *	psb_user_framebuffer_create	-	create framebuffer
+- *	@dev: our DRM device
+- *	@filp: client file
+- *	@cmd: mode request
+- *
+- *	Create a new framebuffer backed by a userspace GEM object
+- */
+-static struct drm_framebuffer *psb_user_framebuffer_create
+-			(struct drm_device *dev, struct drm_file *filp,
+-			 struct drm_mode_fb_cmd *cmd)
+-{
+-	struct gtt_range *r;
+-	struct drm_gem_object *obj;
+-
+-	/*
+-	 *	Find the GEM object and thus the gtt range object that is
+-	 *	to back this space
+-	 */
+-	obj = drm_gem_object_lookup(dev, filp, cmd->handle);
+-	if (obj == NULL)
+-		return ERR_PTR(-ENOENT);
+-
+-	/* Let the core code do all the work */
+-	r = container_of(obj, struct gtt_range, gem);
+-	return psb_framebuffer_create(dev, cmd, r);
+-}
+-
+-static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+-							u16 blue, int regno)
+-{
+-}
+-
+-static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
+-					u16 *green, u16 *blue, int regno)
+-{
+-}
+-
+-static int psbfb_probe(struct drm_fb_helper *helper,
+-				struct drm_fb_helper_surface_size *sizes)
+-{
+-	struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
+-	int new_fb = 0;
+-	int ret;
+-
+-	if (!helper->fb) {
+-		ret = psbfb_create(psb_fbdev, sizes);
+-		if (ret)
+-			return ret;
+-		new_fb = 1;
+-	}
+-	return new_fb;
+-}
+-
+-struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+-	.gamma_set = psbfb_gamma_set,
+-	.gamma_get = psbfb_gamma_get,
+-	.fb_probe = psbfb_probe,
+-};
+-
+-int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
+-{
+-	struct fb_info *info;
+-	struct psb_framebuffer *psbfb = &fbdev->pfb;
+-
+-	if (fbdev->psb_fb_helper.fbdev) {
+-		info = fbdev->psb_fb_helper.fbdev;
+-
+-		/* If this is our base framebuffer then kill any virtual map
+-		   for the framebuffer layer and unpin it */
+-		if (psbfb->vm_map) {
+-			vm_unmap_ram(info->screen_base, psbfb->gtt->npage);
+-			psb_gtt_unpin(psbfb->gtt);
+-		}
+-		unregister_framebuffer(info);
+-		if (info->cmap.len)
+-			fb_dealloc_cmap(&info->cmap);
+-		framebuffer_release(info);
+-	}
+-	drm_fb_helper_fini(&fbdev->psb_fb_helper);
+-	drm_framebuffer_cleanup(&psbfb->base);
+-
+-	if (psbfb->gtt)
+-		drm_gem_object_unreference(&psbfb->gtt->gem);
+-	return 0;
+-}
+-
+-int psb_fbdev_init(struct drm_device *dev)
+-{
+-	struct psb_fbdev *fbdev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
+-	if (!fbdev) {
+-		dev_err(dev->dev, "no memory\n");
+-		return -ENOMEM;
+-	}
+-
+-	dev_priv->fbdev = fbdev;
+-	fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+-
+-	drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
+-							INTELFB_CONN_LIMIT);
+-
+-	drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
+-	drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
+-	return 0;
+-}
+-
+-void psb_fbdev_fini(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (!dev_priv->fbdev)
+-		return;
+-
+-	psb_fbdev_destroy(dev, dev_priv->fbdev);
+-	kfree(dev_priv->fbdev);
+-	dev_priv->fbdev = NULL;
+-}
+-
+-static void psbfb_output_poll_changed(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
+-	drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
+-}
+-
+-/**
+- *	psb_user_framebuffer_create_handle - add hamdle to a framebuffer
+- *	@fb: framebuffer
+- *	@file_priv: our DRM file
+- *	@handle: returned handle
+- *
+- *	Our framebuffer object is a GTT range which also contains a GEM
+- *	object. We need to turn it into a handle for userspace. GEM will do
+- *	the work for us
+- */
+-static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+-					      struct drm_file *file_priv,
+-					      unsigned int *handle)
+-{
+-	struct psb_framebuffer *psbfb = to_psb_fb(fb);
+-	struct gtt_range *r = psbfb->gtt;
+-	return drm_gem_handle_create(file_priv, &r->gem, handle);
+-}
+-
+-/**
+- *	psb_user_framebuffer_destroy	-	destruct user created fb
+- *	@fb: framebuffer
+- *
+- *	User framebuffers are backed by GEM objects so all we have to do is
+- *	clean up a bit and drop the reference, GEM will handle the fallout
+- */
+-static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
+-{
+-	struct psb_framebuffer *psbfb = to_psb_fb(fb);
+-	struct gtt_range *r = psbfb->gtt;
+-	struct drm_device *dev = fb->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct psb_fbdev *fbdev = dev_priv->fbdev;
+-	struct drm_crtc *crtc;
+-	int reset = 0;
+-
+-	/* Should never get stolen memory for a user fb */
+-	WARN_ON(r->stolen);
+-
+-	/* Check if we are erroneously live */
+-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+-		if (crtc->fb == fb)
+-			reset = 1;
+-
+-	if (reset)
+-		/*
+-		 * Now force a sane response before we permit the DRM CRTC
+-		 * layer to do stupid things like blank the display. Instead
+-		 * we reset this framebuffer as if the user had forced a reset.
+-		 * We must do this before the cleanup so that the DRM layer
+-		 * doesn't get a chance to stick its oar in where it isn't
+-		 * wanted.
+-		 */
+-		drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
+-
+-	/* Let DRM do its clean up */
+-	drm_framebuffer_cleanup(fb);
+-	/*  We are no longer using the resource in GEM */
+-	drm_gem_object_unreference_unlocked(&r->gem);
+-	kfree(fb);
+-}
+-
+-static const struct drm_mode_config_funcs psb_mode_funcs = {
+-	.fb_create = psb_user_framebuffer_create,
+-	.output_poll_changed = psbfb_output_poll_changed,
+-};
+-
+-static int psb_create_backlight_property(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct drm_property *backlight;
+-
+-	if (dev_priv->backlight_property)
+-		return 0;
+-
+-	backlight = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-							"backlight", 2);
+-	backlight->values[0] = 0;
+-	backlight->values[1] = 100;
+-
+-	dev_priv->backlight_property = backlight;
+-
+-	return 0;
+-}
+-
+-static void psb_setup_outputs(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct drm_connector *connector;
+-
+-	drm_mode_create_scaling_mode_property(dev);
+-	psb_create_backlight_property(dev);
+-
+-	dev_priv->ops->output_init(dev);
+-
+-	list_for_each_entry(connector, &dev->mode_config.connector_list,
+-			    head) {
+-		struct psb_intel_output *psb_intel_output =
+-		    to_psb_intel_output(connector);
+-		struct drm_encoder *encoder = &psb_intel_output->enc;
+-		int crtc_mask = 0, clone_mask = 0;
+-
+-		/* valid crtcs */
+-		switch (psb_intel_output->type) {
+-		case INTEL_OUTPUT_ANALOG:
+-			crtc_mask = (1 << 0);
+-			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
+-			break;
+-		case INTEL_OUTPUT_SDVO:
+-			crtc_mask = ((1 << 0) | (1 << 1));
+-			clone_mask = (1 << INTEL_OUTPUT_SDVO);
+-			break;
+-		case INTEL_OUTPUT_LVDS:
+-			if (IS_MRST(dev))
+-				crtc_mask = (1 << 0);
+-			else
+-				crtc_mask = (1 << 1);
+-			clone_mask = (1 << INTEL_OUTPUT_LVDS);
+-			break;
+-		case INTEL_OUTPUT_MIPI:
+-			crtc_mask = (1 << 0);
+-			clone_mask = (1 << INTEL_OUTPUT_MIPI);
+-			break;
+-		case INTEL_OUTPUT_MIPI2:
+-			crtc_mask = (1 << 2);
+-			clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+-			break;
+-		case INTEL_OUTPUT_HDMI:
+-		        /* HDMI on crtc 1 for SoC devices and crtc 0 for
+-                           Cedarview. HDMI on Poulsbo is only via external
+-			   logic */
+-			if (IS_MFLD(dev) || IS_MRST(dev))
+-				crtc_mask = (1 << 1);
+-			else
+-				crtc_mask = (1 << 0);	/* Cedarview */
+-			clone_mask = (1 << INTEL_OUTPUT_HDMI);
+-			break;
+-		}
+-		encoder->possible_crtcs = crtc_mask;
+-		encoder->possible_clones =
+-		    psb_intel_connector_clones(dev, clone_mask);
+-	}
+-}
+-
+-void psb_modeset_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv =
+-	    (struct drm_psb_private *) dev->dev_private;
+-	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+-	int i;
+-
+-	drm_mode_config_init(dev);
+-
+-	dev->mode_config.min_width = 0;
+-	dev->mode_config.min_height = 0;
+-
+-	dev->mode_config.funcs = (void *) &psb_mode_funcs;
+-
+-	/* set memory base */
+-	/* MRST and PSB should use BAR 2*/
+-	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
+-					&(dev->mode_config.fb_base));
+-
+-	/* num pipes is 2 for PSB but 1 for Mrst */
+-	for (i = 0; i < dev_priv->num_pipe; i++)
+-		psb_intel_crtc_init(dev, i, mode_dev);
+-
+-	dev->mode_config.max_width = 2048;
+-	dev->mode_config.max_height = 2048;
+-
+-	psb_setup_outputs(dev);
+-}
+-
+-void psb_modeset_cleanup(struct drm_device *dev)
+-{
+-	mutex_lock(&dev->struct_mutex);
+-
+-	drm_kms_helper_poll_fini(dev);
+-	psb_fbdev_fini(dev);
+-	drm_mode_config_cleanup(dev);
+-
+-	mutex_unlock(&dev->struct_mutex);
+-}
+diff --git a/drivers/staging/gma500/framebuffer.h b/drivers/staging/gma500/framebuffer.h
+deleted file mode 100644
+index d1b2289..0000000
+--- a/drivers/staging/gma500/framebuffer.h
++++ /dev/null
+@@ -1,48 +0,0 @@
+-/*
+- * Copyright (c) 2008-2011, Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *      Eric Anholt <eric at anholt.net>
+- *
+- */
+-
+-#ifndef _FRAMEBUFFER_H_
+-#define _FRAMEBUFFER_H_
+-
+-#include <drm/drmP.h>
+-#include <drm/drm_fb_helper.h>
+-
+-#include "psb_drv.h"
+-
+-struct psb_framebuffer {
+-	struct drm_framebuffer base;
+-	struct address_space *addr_space;
+-	struct fb_info *fbdev;
+-	struct gtt_range *gtt;
+-	bool vm_map;		/* True if we must undo a vm_map_ram */
+-};
+-
+-struct psb_fbdev {
+-	struct drm_fb_helper psb_fb_helper;
+-	struct psb_framebuffer pfb;
+-};
+-
+-#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
+-
+-extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
+-
+-#endif
+-
+diff --git a/drivers/staging/gma500/gem.c b/drivers/staging/gma500/gem.c
+deleted file mode 100644
+index f6433c0..0000000
+--- a/drivers/staging/gma500/gem.c
++++ /dev/null
+@@ -1,292 +0,0 @@
+-/*
+- *  psb GEM interface
+- *
+- * Copyright (c) 2011, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors: Alan Cox
+- *
+- * TODO:
+- *	-	we need to work out if the MMU is relevant (eg for
+- *		accelerated operations on a GEM object)
+- */
+-
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include "psb_drm.h"
+-#include "psb_drv.h"
+-
+-int psb_gem_init_object(struct drm_gem_object *obj)
+-{
+-	return -EINVAL;
+-}
+-
+-void psb_gem_free_object(struct drm_gem_object *obj)
+-{
+-	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
+-	drm_gem_object_release_wrap(obj);
+-	/* This must occur last as it frees up the memory of the GEM object */
+-	psb_gtt_free_range(obj->dev, gtt);
+-}
+-
+-int psb_gem_get_aperture(struct drm_device *dev, void *data,
+-				struct drm_file *file)
+-{
+-	return -EINVAL;
+-}
+-
+-/**
+- *	psb_gem_dumb_map_gtt	-	buffer mapping for dumb interface
+- *	@file: our drm client file
+- *	@dev: drm device
+- *	@handle: GEM handle to the object (from dumb_create)
+- *
+- *	Do the necessary setup to allow the mapping of the frame buffer
+- *	into user memory. We don't have to do much here at the moment.
+- */
+-int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+-			 uint32_t handle, uint64_t *offset)
+-{
+-	int ret = 0;
+-	struct drm_gem_object *obj;
+-
+-	if (!(dev->driver->driver_features & DRIVER_GEM))
+-		return -ENODEV;
+-
+-	mutex_lock(&dev->struct_mutex);
+-
+-	/* GEM does all our handle to object mapping */
+-	obj = drm_gem_object_lookup(dev, file, handle);
+-	if (obj == NULL) {
+-		ret = -ENOENT;
+-		goto unlock;
+-	}
+-	/* What validation is needed here ? */
+-
+-	/* Make it mmapable */
+-	if (!obj->map_list.map) {
+-		ret = gem_create_mmap_offset(obj);
+-		if (ret)
+-			goto out;
+-	}
+-	/* GEM should really work out the hash offsets for us */
+-	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+-out:
+-	drm_gem_object_unreference(obj);
+-unlock:
+-	mutex_unlock(&dev->struct_mutex);
+-	return ret;
+-}
+-
+-/**
+- *	psb_gem_create		-	create a mappable object
+- *	@file: the DRM file of the client
+- *	@dev: our device
+- *	@size: the size requested
+- *	@handlep: returned handle (opaque number)
+- *
+- *	Create a GEM object, fill in the boilerplate and attach a handle to
+- *	it so that userspace can speak about it. This does the core work
+- *	for the various methods that do/will create GEM objects for things
+- */
+-static int psb_gem_create(struct drm_file *file,
+-	struct drm_device *dev, uint64_t size, uint32_t *handlep)
+-{
+-	struct gtt_range *r;
+-	int ret;
+-	u32 handle;
+-
+-	size = roundup(size, PAGE_SIZE);
+-
+-	/* Allocate our object - for now a direct gtt range which is not
+-	   stolen memory backed */
+-	r = psb_gtt_alloc_range(dev, size, "gem", 0);
+-	if (r == NULL) {
+-		dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
+-		return -ENOSPC;
+-	}
+-	/* Initialize the extra goodies GEM needs to do all the hard work */
+-	if (drm_gem_object_init(dev, &r->gem, size) != 0) {
+-		psb_gtt_free_range(dev, r);
+-		/* GEM doesn't give an error code so use -ENOMEM */
+-		dev_err(dev->dev, "GEM init failed for %lld\n", size);
+-		return -ENOMEM;
+-	}
+-	/* Give the object a handle so we can carry it more easily */
+-	ret = drm_gem_handle_create(file, &r->gem, &handle);
+-	if (ret) {
+-		dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
+-							&r->gem, size);
+-		drm_gem_object_release(&r->gem);
+-		psb_gtt_free_range(dev, r);
+-		return ret;
+-	}
+-	/* We have the initial and handle reference but need only one now */
+-	drm_gem_object_unreference(&r->gem);
+-	*handlep = handle;
+-	return 0;
+-}
+-
+-/**
+- *	psb_gem_dumb_create	-	create a dumb buffer
+- *	@drm_file: our client file
+- *	@dev: our device
+- *	@args: the requested arguments copied from userspace
+- *
+- *	Allocate a buffer suitable for use for a frame buffer of the
+- *	form described by user space. Give userspace a handle by which
+- *	to reference it.
+- */
+-int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+-			struct drm_mode_create_dumb *args)
+-{
+-	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+-	args->size = args->pitch * args->height;
+-	return psb_gem_create(file, dev, args->size, &args->handle);
+-}
+-
+-/**
+- *	psb_gem_dumb_destroy	-	destroy a dumb buffer
+- *	@file: client file
+- *	@dev: our DRM device
+- *	@handle: the object handle
+- *
+- *	Destroy a handle that was created via psb_gem_dumb_create, at least
+- *	we hope it was created that way. i915 seems to assume the caller
+- *	does the checking but that might be worth review ! FIXME
+- */
+-int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+-			uint32_t handle)
+-{
+-	/* No special work needed, drop the reference and see what falls out */
+-	return drm_gem_handle_delete(file, handle);
+-}
+-
+-/**
+- *	psb_gem_fault		-	pagefault handler for GEM objects
+- *	@vma: the VMA of the GEM object
+- *	@vmf: fault detail
+- *
+- *	Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+- *	does most of the work for us including the actual map/unmap calls
+- *	but we need to do the actual page work.
+- *
+- *	This code eventually needs to handle faulting objects in and out
+- *	of the GTT and repacking it when we run out of space. We can put
+- *	that off for now and for our simple uses
+- *
+- *	The VMA was set up by GEM. In doing so it also ensured that the
+- *	vma->vm_private_data points to the GEM object that is backing this
+- *	mapping.
+- */
+-int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+-{
+-	struct drm_gem_object *obj;
+-	struct gtt_range *r;
+-	int ret;
+-	unsigned long pfn;
+-	pgoff_t page_offset;
+-	struct drm_device *dev;
+-	struct drm_psb_private *dev_priv;
+-
+-	obj = vma->vm_private_data;	/* GEM object */
+-	dev = obj->dev;
+-	dev_priv = dev->dev_private;
+-
+-	r = container_of(obj, struct gtt_range, gem);	/* Get the gtt range */
+-
+-	/* Make sure we don't parallel update on a fault, nor move or remove
+-	   something from beneath our feet */
+-	mutex_lock(&dev->struct_mutex);
+-
+-	/* For now the mmap pins the object and it stays pinned. As things
+-	   stand that will do us no harm */
+-	if (r->mmapping == 0) {
+-		ret = psb_gtt_pin(r);
+-		if (ret < 0) {
+-			dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
+-			goto fail;
+-		}
+-		r->mmapping = 1;
+-	}
+-
+-	/* Page relative to the VMA start - we must calculate this ourselves
+-	   because vmf->pgoff is the fake GEM offset */
+-	page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
+-				>> PAGE_SHIFT;
+-
+-	/* CPU view of the page, don't go via the GART for CPU writes */
+-	if (r->stolen)
+-		pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
+-	else
+-		pfn = page_to_pfn(r->pages[page_offset]);
+-	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+-
+-fail:
+-	mutex_unlock(&dev->struct_mutex);
+-	switch (ret) {
+-	case 0:
+-	case -ERESTARTSYS:
+-	case -EINTR:
+-		return VM_FAULT_NOPAGE;
+-	case -ENOMEM:
+-		return VM_FAULT_OOM;
+-	default:
+-		return VM_FAULT_SIGBUS;
+-	}
+-}
+-
+-static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
+-						int size, u32 *handle)
+-{
+-	struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
+-	if (gtt == NULL)
+-		return -ENOMEM;
+-	if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
+-		goto free_gtt;
+-	if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
+-		return 0;
+-free_gtt:
+-	psb_gtt_free_range(dev, gtt);
+-	return -ENOMEM;
+-}
+-
+-/*
+- *	GEM interfaces for our specific client
+- */
+-int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+-					struct drm_file *file)
+-{
+-	struct drm_psb_gem_create *args = data;
+-	int ret;
+-	if (args->flags & PSB_GEM_CREATE_STOLEN) {
+-		ret = psb_gem_create_stolen(file, dev, args->size,
+-							&args->handle);
+-		if (ret == 0)
+-			return 0;
+-		/* Fall throguh */
+-		args->flags &= ~PSB_GEM_CREATE_STOLEN;
+-	}
+-	return psb_gem_create(file, dev, args->size, &args->handle);
+-}
+-
+-int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+-					struct drm_file *file)
+-{
+-	struct drm_psb_gem_mmap *args = data;
+-	return dev->driver->dumb_map_offset(file, dev,
+-						args->handle, &args->offset);
+-}
+-
+diff --git a/drivers/staging/gma500/gem_glue.c b/drivers/staging/gma500/gem_glue.c
+deleted file mode 100644
+index daac121..0000000
+--- a/drivers/staging/gma500/gem_glue.c
++++ /dev/null
+@@ -1,89 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-
+-void drm_gem_object_release_wrap(struct drm_gem_object *obj)
+-{
+-	/* Remove the list map if one is present */
+-	if (obj->map_list.map) {
+-		struct drm_gem_mm *mm = obj->dev->mm_private;
+-		struct drm_map_list *list = &obj->map_list;
+-		drm_ht_remove_item(&mm->offset_hash, &list->hash);
+-		drm_mm_put_block(list->file_offset_node);
+-		kfree(list->map);
+-		list->map = NULL;
+-	}
+-	drm_gem_object_release(obj);
+-}
+-
+-/**
+- *	gem_create_mmap_offset		-	invent an mmap offset
+- *	@obj: our object
+- *
+- *	Standard implementation of offset generation for mmap as is
+- *	duplicated in several drivers. This belongs in GEM.
+- */
+-int gem_create_mmap_offset(struct drm_gem_object *obj)
+-{
+-	struct drm_device *dev = obj->dev;
+-	struct drm_gem_mm *mm = dev->mm_private;
+-	struct drm_map_list *list;
+-	struct drm_local_map *map;
+-	int ret;
+-
+-	list = &obj->map_list;
+-	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+-	if (list->map == NULL)
+-		return -ENOMEM;
+-	map = list->map;
+-	map->type = _DRM_GEM;
+-	map->size = obj->size;
+-	map->handle = obj;
+-
+-	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+-					obj->size / PAGE_SIZE, 0, 0);
+-	if (!list->file_offset_node) {
+-		dev_err(dev->dev, "failed to allocate offset for bo %d\n",
+-								obj->name);
+-		ret = -ENOSPC;
+-		goto free_it;
+-	}
+-	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+-					obj->size / PAGE_SIZE, 0);
+-	if (!list->file_offset_node) {
+-		ret = -ENOMEM;
+-		goto free_it;
+-	}
+-	list->hash.key = list->file_offset_node->start;
+-	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+-	if (ret) {
+-		dev_err(dev->dev, "failed to add to map hash\n");
+-		goto free_mm;
+-	}
+-	return 0;
+-
+-free_mm:
+-	drm_mm_put_block(list->file_offset_node);
+-free_it:
+-	kfree(list->map);
+-	list->map = NULL;
+-	return ret;
+-}
+diff --git a/drivers/staging/gma500/gem_glue.h b/drivers/staging/gma500/gem_glue.h
+deleted file mode 100644
+index ce5ce30..0000000
+--- a/drivers/staging/gma500/gem_glue.h
++++ /dev/null
+@@ -1,2 +0,0 @@
+-extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
+-extern int gem_create_mmap_offset(struct drm_gem_object *obj);
+diff --git a/drivers/staging/gma500/gtt.c b/drivers/staging/gma500/gtt.c
+deleted file mode 100644
+index e770bd1..0000000
+--- a/drivers/staging/gma500/gtt.c
++++ /dev/null
+@@ -1,553 +0,0 @@
+-/*
+- * Copyright (c) 2007, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
+- *	    Alan Cox <alan at linux.intel.com>
+- */
+-
+-#include <drm/drmP.h>
+-#include "psb_drv.h"
+-
+-
+-/*
+- *	GTT resource allocator - manage page mappings in GTT space
+- */
+-
+-/**
+- *	psb_gtt_mask_pte	-	generate GTT pte entry
+- *	@pfn: page number to encode
+- *	@type: type of memory in the GTT
+- *
+- *	Set the GTT entry for the appropriate memory type.
+- */
+-static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
+-{
+-	uint32_t mask = PSB_PTE_VALID;
+-
+-	if (type & PSB_MMU_CACHED_MEMORY)
+-		mask |= PSB_PTE_CACHED;
+-	if (type & PSB_MMU_RO_MEMORY)
+-		mask |= PSB_PTE_RO;
+-	if (type & PSB_MMU_WO_MEMORY)
+-		mask |= PSB_PTE_WO;
+-
+-	return (pfn << PAGE_SHIFT) | mask;
+-}
+-
+-/**
+- *	psb_gtt_entry		-	find the GTT entries for a gtt_range
+- *	@dev: our DRM device
+- *	@r: our GTT range
+- *
+- *	Given a gtt_range object return the GTT offset of the page table
+- *	entries for this gtt_range
+- */
+-u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned long offset;
+-
+-	offset = r->resource.start - dev_priv->gtt_mem->start;
+-
+-	return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
+-}
+-
+-/**
+- *	psb_gtt_insert	-	put an object into the GTT
+- *	@dev: our DRM device
+- *	@r: our GTT range
+- *
+- *	Take our preallocated GTT range and insert the GEM object into
+- *	the GTT. This is protected via the gtt mutex which the caller
+- *	must hold.
+- */
+-static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
+-{
+-	u32 *gtt_slot, pte;
+-	struct page **pages;
+-	int i;
+-
+-	if (r->pages == NULL) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-
+-	WARN_ON(r->stolen);	/* refcount these maybe ? */
+-
+-	gtt_slot = psb_gtt_entry(dev, r);
+-	pages = r->pages;
+-
+-	/* Make sure changes are visible to the GPU */
+-	set_pages_array_uc(pages, r->npage);
+-
+-	/* Write our page entries into the GTT itself */
+-	for (i = r->roll; i < r->npage; i++) {
+-		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+-		iowrite32(pte, gtt_slot++);
+-	}
+-	for (i = 0; i < r->roll; i++) {
+-		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+-		iowrite32(pte, gtt_slot++);
+-	}
+-	/* Make sure all the entries are set before we return */
+-	ioread32(gtt_slot - 1);
+-
+-	return 0;
+-}
+-
+-/**
+- *	psb_gtt_remove	-	remove an object from the GTT
+- *	@dev: our DRM device
+- *	@r: our GTT range
+- *
+- *	Remove a preallocated GTT range from the GTT. Overwrite all the
+- *	page table entries with the dummy page. This is protected via the gtt
+- *	mutex which the caller must hold.
+- */
+-static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 *gtt_slot, pte;
+-	int i;
+-
+-	WARN_ON(r->stolen);
+-
+-	gtt_slot = psb_gtt_entry(dev, r);
+-	pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
+-
+-	for (i = 0; i < r->npage; i++)
+-		iowrite32(pte, gtt_slot++);
+-	ioread32(gtt_slot - 1);
+-	set_pages_array_wb(r->pages, r->npage);
+-}
+-
+-/**
+- *	psb_gtt_roll	-	set scrolling position
+- *	@dev: our DRM device
+- *	@r: the gtt mapping we are using
+- *	@roll: roll offset
+- *
+- *	Roll an existing pinned mapping by moving the pages through the GTT.
+- *	This allows us to implement hardware scrolling on the consoles without
+- *	a 2D engine
+- */
+-void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
+-{
+-	u32 *gtt_slot, pte;
+-	int i;
+-
+-	if (roll >= r->npage) {
+-		WARN_ON(1);
+-		return;
+-	}
+-
+-	r->roll = roll;
+-
+-	/* Not currently in the GTT - no worry we will write the mapping at
+-	   the right position when it gets pinned */
+-	if (!r->stolen && !r->in_gart)
+-		return;
+-
+-	gtt_slot = psb_gtt_entry(dev, r);
+-
+-	for (i = r->roll; i < r->npage; i++) {
+-		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+-		iowrite32(pte, gtt_slot++);
+-	}
+-	for (i = 0; i < r->roll; i++) {
+-		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+-		iowrite32(pte, gtt_slot++);
+-	}
+-	ioread32(gtt_slot - 1);
+-}
+-
+-/**
+- *	psb_gtt_attach_pages	-	attach and pin GEM pages
+- *	@gt: the gtt range
+- *
+- *	Pin and build an in kernel list of the pages that back our GEM object.
+- *	While we hold this the pages cannot be swapped out. This is protected
+- *	via the gtt mutex which the caller must hold.
+- */
+-static int psb_gtt_attach_pages(struct gtt_range *gt)
+-{
+-	struct inode *inode;
+-	struct address_space *mapping;
+-	int i;
+-	struct page *p;
+-	int pages = gt->gem.size / PAGE_SIZE;
+-
+-	WARN_ON(gt->pages);
+-
+-	/* This is the shared memory object that backs the GEM resource */
+-	inode = gt->gem.filp->f_path.dentry->d_inode;
+-	mapping = inode->i_mapping;
+-
+-	gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
+-	if (gt->pages == NULL)
+-		return -ENOMEM;
+-	gt->npage = pages;
+-
+-	for (i = 0; i < pages; i++) {
+-		/* FIXME: needs updating as per mail from Hugh Dickins */
+-		p = read_cache_page_gfp(mapping, i,
+-					__GFP_COLD | GFP_KERNEL);
+-		if (IS_ERR(p))
+-			goto err;
+-		gt->pages[i] = p;
+-	}
+-	return 0;
+-
+-err:
+-	while (i--)
+-		page_cache_release(gt->pages[i]);
+-	kfree(gt->pages);
+-	gt->pages = NULL;
+-	return PTR_ERR(p);
+-}
+-
+-/**
+- *	psb_gtt_detach_pages	-	attach and pin GEM pages
+- *	@gt: the gtt range
+- *
+- *	Undo the effect of psb_gtt_attach_pages. At this point the pages
+- *	must have been removed from the GTT as they could now be paged out
+- *	and move bus address. This is protected via the gtt mutex which the
+- *	caller must hold.
+- */
+-static void psb_gtt_detach_pages(struct gtt_range *gt)
+-{
+-	int i;
+-	for (i = 0; i < gt->npage; i++) {
+-		/* FIXME: do we need to force dirty */
+-		set_page_dirty(gt->pages[i]);
+-		page_cache_release(gt->pages[i]);
+-	}
+-	kfree(gt->pages);
+-	gt->pages = NULL;
+-}
+-
+-/**
+- *	psb_gtt_pin		-	pin pages into the GTT
+- *	@gt: range to pin
+- *
+- *	Pin a set of pages into the GTT. The pins are refcounted so that
+- *	multiple pins need multiple unpins to undo.
+- *
+- *	Non GEM backed objects treat this as a no-op as they are always GTT
+- *	backed objects.
+- */
+-int psb_gtt_pin(struct gtt_range *gt)
+-{
+-	int ret = 0;
+-	struct drm_device *dev = gt->gem.dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	mutex_lock(&dev_priv->gtt_mutex);
+-
+-	if (gt->in_gart == 0 && gt->stolen == 0) {
+-		ret = psb_gtt_attach_pages(gt);
+-		if (ret < 0)
+-			goto out;
+-		ret = psb_gtt_insert(dev, gt);
+-		if (ret < 0) {
+-			psb_gtt_detach_pages(gt);
+-			goto out;
+-		}
+-	}
+-	gt->in_gart++;
+-out:
+-	mutex_unlock(&dev_priv->gtt_mutex);
+-	return ret;
+-}
+-
+-/**
+- *	psb_gtt_unpin		-	Drop a GTT pin requirement
+- *	@gt: range to pin
+- *
+- *	Undoes the effect of psb_gtt_pin. On the last drop the GEM object
+- *	will be removed from the GTT which will also drop the page references
+- *	and allow the VM to clean up or page stuff.
+- *
+- *	Non GEM backed objects treat this as a no-op as they are always GTT
+- *	backed objects.
+- */
+-void psb_gtt_unpin(struct gtt_range *gt)
+-{
+-	struct drm_device *dev = gt->gem.dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	mutex_lock(&dev_priv->gtt_mutex);
+-
+-	WARN_ON(!gt->in_gart);
+-
+-	gt->in_gart--;
+-	if (gt->in_gart == 0 && gt->stolen == 0) {
+-		psb_gtt_remove(dev, gt);
+-		psb_gtt_detach_pages(gt);
+-	}
+-	mutex_unlock(&dev_priv->gtt_mutex);
+-}
+-
+-/*
+- *	GTT resource allocator - allocate and manage GTT address space
+- */
+-
+-/**
+- *	psb_gtt_alloc_range	-	allocate GTT address space
+- *	@dev: Our DRM device
+- *	@len: length (bytes) of address space required
+- *	@name: resource name
+- *	@backed: resource should be backed by stolen pages
+- *
+- *	Ask the kernel core to find us a suitable range of addresses
+- *	to use for a GTT mapping.
+- *
+- *	Returns a gtt_range structure describing the object, or NULL on
+- *	error. On successful return the resource is both allocated and marked
+- *	as in use.
+- */
+-struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+-						const char *name, int backed)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct gtt_range *gt;
+-	struct resource *r = dev_priv->gtt_mem;
+-	int ret;
+-	unsigned long start, end;
+-
+-	if (backed) {
+-		/* The start of the GTT is the stolen pages */
+-		start = r->start;
+-		end = r->start + dev_priv->gtt.stolen_size - 1;
+-	} else {
+-		/* The rest we will use for GEM backed objects */
+-		start = r->start + dev_priv->gtt.stolen_size;
+-		end = r->end;
+-	}
+-
+-	gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
+-	if (gt == NULL)
+-		return NULL;
+-	gt->resource.name = name;
+-	gt->stolen = backed;
+-	gt->in_gart = backed;
+-	gt->roll = 0;
+-	/* Ensure this is set for non GEM objects */
+-	gt->gem.dev = dev;
+-	ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
+-				len, start, end, PAGE_SIZE, NULL, NULL);
+-	if (ret == 0) {
+-		gt->offset = gt->resource.start - r->start;
+-		return gt;
+-	}
+-	kfree(gt);
+-	return NULL;
+-}
+-
+-/**
+- *	psb_gtt_free_range	-	release GTT address space
+- *	@dev: our DRM device
+- *	@gt: a mapping created with psb_gtt_alloc_range
+- *
+- *	Release a resource that was allocated with psb_gtt_alloc_range. If the
+- *	object has been pinned by mmap users we clean this up here currently.
+- */
+-void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
+-{
+-	/* Undo the mmap pin if we are destroying the object */
+-	if (gt->mmapping) {
+-		psb_gtt_unpin(gt);
+-		gt->mmapping = 0;
+-	}
+-	WARN_ON(gt->in_gart && !gt->stolen);
+-	release_resource(&gt->resource);
+-	kfree(gt);
+-}
+-
+-void psb_gtt_alloc(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	init_rwsem(&dev_priv->gtt.sem);
+-}
+-
+-void psb_gtt_takedown(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (dev_priv->gtt_map) {
+-		iounmap(dev_priv->gtt_map);
+-		dev_priv->gtt_map = NULL;
+-	}
+-	if (dev_priv->gtt_initialized) {
+-		pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+-				      dev_priv->gmch_ctrl);
+-		PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
+-		(void) PSB_RVDC32(PSB_PGETBL_CTL);
+-	}
+-	if (dev_priv->vram_addr)
+-		iounmap(dev_priv->gtt_map);
+-}
+-
+-int psb_gtt_init(struct drm_device *dev, int resume)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned gtt_pages;
+-	unsigned long stolen_size, vram_stolen_size;
+-	unsigned i, num_pages;
+-	unsigned pfn_base;
+-	uint32_t vram_pages;
+-	uint32_t dvmt_mode = 0;
+-	struct psb_gtt *pg;
+-
+-	int ret = 0;
+-	uint32_t pte;
+-
+-	mutex_init(&dev_priv->gtt_mutex);
+-
+-	psb_gtt_alloc(dev);
+-	pg = &dev_priv->gtt;
+-
+-	/* Enable the GTT */
+-	pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
+-	pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
+-			      dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+-
+-	dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
+-	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+-	(void) PSB_RVDC32(PSB_PGETBL_CTL);
+-
+-	/* The root resource we allocate address space from */
+-	dev_priv->gtt_initialized = 1;
+-
+-	pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
+-
+-	/*
+-	 *	The video mmu has a hw bug when accessing 0x0D0000000.
+-	 *	Make gatt start at 0x0e000,0000. This doesn't actually
+-	 *	matter for us but may do if the video acceleration ever
+-	 *	gets opened up.
+-	 */
+-	pg->mmu_gatt_start = 0xE0000000;
+-
+-	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
+-	gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
+-								>> PAGE_SHIFT;
+-	/* Some CDV firmware doesn't report this currently. In which case the
+-	   system has 64 gtt pages */
+-	if (pg->gtt_start == 0 || gtt_pages == 0) {
+-		dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
+-		gtt_pages = 64;
+-		pg->gtt_start = dev_priv->pge_ctl;
+-	}
+-
+-	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
+-	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
+-								>> PAGE_SHIFT;
+-	dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
+-
+-	if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
+-		static struct resource fudge;	/* Preferably peppermint */
+-		/* This can occur on CDV SDV systems. Fudge it in this case.
+-		   We really don't care what imaginary space is being allocated
+-		   at this point */
+-		dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
+-		pg->gatt_start = 0x40000000;
+-		pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
+-		/* This is a little confusing but in fact the GTT is providing
+-		   a view from the GPU into memory and not vice versa. As such
+-		   this is really allocating space that is not the same as the
+-		   CPU address space on CDV */
+-		fudge.start = 0x40000000;
+-		fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
+-		fudge.name = "fudge";
+-		fudge.flags = IORESOURCE_MEM;
+-		dev_priv->gtt_mem = &fudge;
+-	}
+-
+-	pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
+-	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
+-								- PAGE_SIZE;
+-
+-	stolen_size = vram_stolen_size;
+-
+-	printk(KERN_INFO "Stolen memory information\n");
+-	printk(KERN_INFO "       base in RAM: 0x%x\n", dev_priv->stolen_base);
+-	printk(KERN_INFO "       size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
+-		vram_stolen_size/1024);
+-	dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
+-	printk(KERN_INFO "      the correct size should be: %dM(dvmt mode=%d)\n",
+-		(dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
+-
+-	if (resume && (gtt_pages != pg->gtt_pages) &&
+-	    (stolen_size != pg->stolen_size)) {
+-		dev_err(dev->dev, "GTT resume error.\n");
+-		ret = -EINVAL;
+-		goto out_err;
+-	}
+-
+-	pg->gtt_pages = gtt_pages;
+-	pg->stolen_size = stolen_size;
+-	dev_priv->vram_stolen_size = vram_stolen_size;
+-
+-	/*
+-	 *	Map the GTT and the stolen memory area
+-	 */
+-	dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
+-						gtt_pages << PAGE_SHIFT);
+-	if (!dev_priv->gtt_map) {
+-		dev_err(dev->dev, "Failure to map gtt.\n");
+-		ret = -ENOMEM;
+-		goto out_err;
+-	}
+-
+-	dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
+-	if (!dev_priv->vram_addr) {
+-		dev_err(dev->dev, "Failure to map stolen base.\n");
+-		ret = -ENOMEM;
+-		goto out_err;
+-	}
+-
+-	/*
+-	 * Insert vram stolen pages into the GTT
+-	 */
+-
+-	pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
+-	vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
+-	printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
+-		num_pages, pfn_base << PAGE_SHIFT, 0);
+-	for (i = 0; i < num_pages; ++i) {
+-		pte = psb_gtt_mask_pte(pfn_base + i, 0);
+-		iowrite32(pte, dev_priv->gtt_map + i);
+-	}
+-
+-	/*
+-	 * Init rest of GTT to the scratch page to avoid accidents or scribbles
+-	 */
+-
+-	pfn_base = page_to_pfn(dev_priv->scratch_page);
+-	pte = psb_gtt_mask_pte(pfn_base, 0);
+-	for (; i < gtt_pages; ++i)
+-		iowrite32(pte, dev_priv->gtt_map + i);
+-
+-	(void) ioread32(dev_priv->gtt_map + i - 1);
+-	return 0;
+-
+-out_err:
+-	psb_gtt_takedown(dev);
+-	return ret;
+-}
+diff --git a/drivers/staging/gma500/gtt.h b/drivers/staging/gma500/gtt.h
+deleted file mode 100644
+index aa17423..0000000
+--- a/drivers/staging/gma500/gtt.h
++++ /dev/null
+@@ -1,64 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2007-2008, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-#ifndef _PSB_GTT_H_
+-#define _PSB_GTT_H_
+-
+-#include <drm/drmP.h>
+-
+-/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
+-struct psb_gtt {
+-	uint32_t gatt_start;
+-	uint32_t mmu_gatt_start;
+-	uint32_t gtt_start;
+-	uint32_t gtt_phys_start;
+-	unsigned gtt_pages;
+-	unsigned gatt_pages;
+-	unsigned long stolen_size;
+-	unsigned long vram_stolen_size;
+-	struct rw_semaphore sem;
+-};
+-
+-/* Exported functions */
+-extern int psb_gtt_init(struct drm_device *dev, int resume);
+-extern void psb_gtt_takedown(struct drm_device *dev);
+-
+-/* Each gtt_range describes an allocation in the GTT area */
+-struct gtt_range {
+-	struct resource resource;	/* Resource for our allocation */
+-	u32 offset;			/* GTT offset of our object */
+-	struct drm_gem_object gem;	/* GEM high level stuff */
+-	int in_gart;			/* Currently in the GART (ref ct) */
+-	bool stolen;			/* Backed from stolen RAM */
+-	bool mmapping;			/* Is mmappable */
+-	struct page **pages;		/* Backing pages if present */
+-	int npage;			/* Number of backing pages */
+-	int roll;			/* Roll applied to the GTT entries */
+-};
+-
+-extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
+-						const char *name, int backed);
+-extern void psb_gtt_kref_put(struct gtt_range *gt);
+-extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
+-extern int psb_gtt_pin(struct gtt_range *gt);
+-extern void psb_gtt_unpin(struct gtt_range *gt);
+-extern void psb_gtt_roll(struct drm_device *dev,
+-					struct gtt_range *gt, int roll);
+-
+-#endif
+diff --git a/drivers/staging/gma500/intel_bios.c b/drivers/staging/gma500/intel_bios.c
+deleted file mode 100644
+index 096757f..0000000
+--- a/drivers/staging/gma500/intel_bios.c
++++ /dev/null
+@@ -1,303 +0,0 @@
+-/*
+- * Copyright (c) 2006 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *    Eric Anholt <eric at anholt.net>
+- *
+- */
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include "psb_drm.h"
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "intel_bios.h"
+-
+-
+-static void *find_section(struct bdb_header *bdb, int section_id)
+-{
+-	u8 *base = (u8 *)bdb;
+-	int index = 0;
+-	u16 total, current_size;
+-	u8 current_id;
+-
+-	/* skip to first section */
+-	index += bdb->header_size;
+-	total = bdb->bdb_size;
+-
+-	/* walk the sections looking for section_id */
+-	while (index < total) {
+-		current_id = *(base + index);
+-		index++;
+-		current_size = *((u16 *)(base + index));
+-		index += 2;
+-		if (current_id == section_id)
+-			return base + index;
+-		index += current_size;
+-	}
+-
+-	return NULL;
+-}
+-
+-static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
+-			struct lvds_dvo_timing *dvo_timing)
+-{
+-	panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
+-		dvo_timing->hactive_lo;
+-	panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
+-		((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
+-	panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
+-		dvo_timing->hsync_pulse_width;
+-	panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
+-		((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
+-
+-	panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
+-		dvo_timing->vactive_lo;
+-	panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
+-		dvo_timing->vsync_off;
+-	panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
+-		dvo_timing->vsync_pulse_width;
+-	panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
+-		((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
+-	panel_fixed_mode->clock = dvo_timing->clock * 10;
+-	panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
+-
+-	/* Some VBTs have bogus h/vtotal values */
+-	if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
+-		panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
+-	if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
+-		panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
+-
+-	drm_mode_set_name(panel_fixed_mode);
+-}
+-
+-static void parse_backlight_data(struct drm_psb_private *dev_priv,
+-				struct bdb_header *bdb)
+-{
+-	struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
+-	struct bdb_lvds_backlight *lvds_bl;
+-	u8 p_type = 0;
+-	void *bl_start = NULL;
+-	struct bdb_lvds_options *lvds_opts
+-				= find_section(bdb, BDB_LVDS_OPTIONS);
+-
+-	dev_priv->lvds_bl = NULL;
+-
+-	if (lvds_opts)
+-		p_type = lvds_opts->panel_type;
+-	else
+-		return;
+-
+-	bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
+-	vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
+-
+-	lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
+-	if (!lvds_bl) {
+-		dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
+-		return;
+-	}
+-	memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
+-	dev_priv->lvds_bl = lvds_bl;
+-}
+-
+-/* Try to find integrated panel data */
+-static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
+-			    struct bdb_header *bdb)
+-{
+-	struct bdb_lvds_options *lvds_options;
+-	struct bdb_lvds_lfp_data *lvds_lfp_data;
+-	struct bdb_lvds_lfp_data_entry *entry;
+-	struct lvds_dvo_timing *dvo_timing;
+-	struct drm_display_mode *panel_fixed_mode;
+-
+-	/* Defaults if we can't find VBT info */
+-	dev_priv->lvds_dither = 0;
+-	dev_priv->lvds_vbt = 0;
+-
+-	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
+-	if (!lvds_options)
+-		return;
+-
+-	dev_priv->lvds_dither = lvds_options->pixel_dither;
+-	if (lvds_options->panel_type == 0xff)
+-		return;
+-
+-	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+-	if (!lvds_lfp_data)
+-		return;
+-
+-
+-	entry = &lvds_lfp_data->data[lvds_options->panel_type];
+-	dvo_timing = &entry->dvo_timing;
+-
+-	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
+-				      GFP_KERNEL);
+-	if (panel_fixed_mode == NULL) {
+-		dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+-		return;
+-	}
+-
+-	dev_priv->lvds_vbt = 1;
+-	fill_detail_timing_data(panel_fixed_mode, dvo_timing);
+-
+-	if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
+-		dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+-		drm_mode_debug_printmodeline(panel_fixed_mode);
+-	} else {
+-		dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
+-		dev_priv->lvds_vbt = 0;
+-		kfree(panel_fixed_mode);
+-	}
+-	return;
+-}
+-
+-/* Try to find sdvo panel data */
+-static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
+-		      struct bdb_header *bdb)
+-{
+-	struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+-	struct lvds_dvo_timing *dvo_timing;
+-	struct drm_display_mode *panel_fixed_mode;
+-
+-	dev_priv->sdvo_lvds_vbt_mode = NULL;
+-
+-	sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+-	if (!sdvo_lvds_options)
+-		return;
+-
+-	dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
+-	if (!dvo_timing)
+-		return;
+-
+-	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+-
+-	if (!panel_fixed_mode)
+-		return;
+-
+-	fill_detail_timing_data(panel_fixed_mode,
+-			dvo_timing + sdvo_lvds_options->panel_type);
+-
+-	dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
+-
+-	return;
+-}
+-
+-static void parse_general_features(struct drm_psb_private *dev_priv,
+-		       struct bdb_header *bdb)
+-{
+-	struct bdb_general_features *general;
+-
+-	/* Set sensible defaults in case we can't find the general block */
+-	dev_priv->int_tv_support = 1;
+-	dev_priv->int_crt_support = 1;
+-
+-	general = find_section(bdb, BDB_GENERAL_FEATURES);
+-	if (general) {
+-		dev_priv->int_tv_support = general->int_tv_support;
+-		dev_priv->int_crt_support = general->int_crt_support;
+-		dev_priv->lvds_use_ssc = general->enable_ssc;
+-
+-		if (dev_priv->lvds_use_ssc) {
+-			dev_priv->lvds_ssc_freq
+-				= general->ssc_freq ? 100 : 96;
+-		}
+-	}
+-}
+-
+-/**
+- * psb_intel_init_bios - initialize VBIOS settings & find VBT
+- * @dev: DRM device
+- *
+- * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
+- * to appropriate values.
+- *
+- * VBT existence is a sanity check that is relied on by other i830_bios.c code.
+- * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
+- * feed an updated VBT back through that, compared to what we'll fetch using
+- * this method of groping around in the BIOS data.
+- *
+- * Returns 0 on success, nonzero on failure.
+- */
+-bool psb_intel_init_bios(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct pci_dev *pdev = dev->pdev;
+-	struct vbt_header *vbt = NULL;
+-	struct bdb_header *bdb;
+-	u8 __iomem *bios;
+-	size_t size;
+-	int i;
+-
+-	bios = pci_map_rom(pdev, &size);
+-	if (!bios)
+-		return -1;
+-
+-	/* Scour memory looking for the VBT signature */
+-	for (i = 0; i + 4 < size; i++) {
+-		if (!memcmp(bios + i, "$VBT", 4)) {
+-			vbt = (struct vbt_header *)(bios + i);
+-			break;
+-		}
+-	}
+-
+-	if (!vbt) {
+-		dev_err(dev->dev, "VBT signature missing\n");
+-		pci_unmap_rom(pdev, bios);
+-		return -1;
+-	}
+-
+-	bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+-
+-	/* Grab useful general definitions */
+-	parse_general_features(dev_priv, bdb);
+-	parse_lfp_panel_data(dev_priv, bdb);
+-	parse_sdvo_panel_data(dev_priv, bdb);
+-	parse_backlight_data(dev_priv, bdb);
+-
+-	pci_unmap_rom(pdev, bios);
+-
+-	return 0;
+-}
+-
+-/**
+- * Destroy and free VBT data
+- */
+-void psb_intel_destroy_bios(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct drm_display_mode *sdvo_lvds_vbt_mode =
+-				dev_priv->sdvo_lvds_vbt_mode;
+-	struct drm_display_mode *lfp_lvds_vbt_mode =
+-				dev_priv->lfp_lvds_vbt_mode;
+-	struct bdb_lvds_backlight *lvds_bl =
+-				dev_priv->lvds_bl;
+-
+-	/*free sdvo panel mode*/
+-	if (sdvo_lvds_vbt_mode) {
+-		dev_priv->sdvo_lvds_vbt_mode = NULL;
+-		kfree(sdvo_lvds_vbt_mode);
+-	}
+-
+-	if (lfp_lvds_vbt_mode) {
+-		dev_priv->lfp_lvds_vbt_mode = NULL;
+-		kfree(lfp_lvds_vbt_mode);
+-	}
+-
+-	if (lvds_bl) {
+-		dev_priv->lvds_bl = NULL;
+-		kfree(lvds_bl);
+-	}
+-}
+diff --git a/drivers/staging/gma500/intel_bios.h b/drivers/staging/gma500/intel_bios.h
+deleted file mode 100644
+index 70f1bf0..0000000
+--- a/drivers/staging/gma500/intel_bios.h
++++ /dev/null
+@@ -1,430 +0,0 @@
+-/*
+- * Copyright (c) 2006 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *    Eric Anholt <eric at anholt.net>
+- *
+- */
+-
+-#ifndef _I830_BIOS_H_
+-#define _I830_BIOS_H_
+-
+-#include <drm/drmP.h>
+-
+-struct vbt_header {
+-	u8 signature[20];		/**< Always starts with 'VBT$' */
+-	u16 version;			/**< decimal */
+-	u16 header_size;		/**< in bytes */
+-	u16 vbt_size;			/**< in bytes */
+-	u8 vbt_checksum;
+-	u8 reserved0;
+-	u32 bdb_offset;			/**< from beginning of VBT */
+-	u32 aim_offset[4];		/**< from beginning of VBT */
+-} __attribute__((packed));
+-
+-
+-struct bdb_header {
+-	u8 signature[16];		/**< Always 'BIOS_DATA_BLOCK' */
+-	u16 version;			/**< decimal */
+-	u16 header_size;		/**< in bytes */
+-	u16 bdb_size;			/**< in bytes */
+-};
+-
+-/* strictly speaking, this is a "skip" block, but it has interesting info */
+-struct vbios_data {
+-	u8 type; /* 0 == desktop, 1 == mobile */
+-	u8 relstage;
+-	u8 chipset;
+-	u8 lvds_present:1;
+-	u8 tv_present:1;
+-	u8 rsvd2:6; /* finish byte */
+-	u8 rsvd3[4];
+-	u8 signon[155];
+-	u8 copyright[61];
+-	u16 code_segment;
+-	u8 dos_boot_mode;
+-	u8 bandwidth_percent;
+-	u8 rsvd4; /* popup memory size */
+-	u8 resize_pci_bios;
+-	u8 rsvd5; /* is crt already on ddc2 */
+-} __attribute__((packed));
+-
+-/*
+- * There are several types of BIOS data blocks (BDBs), each block has
+- * an ID and size in the first 3 bytes (ID in first, size in next 2).
+- * Known types are listed below.
+- */
+-#define BDB_GENERAL_FEATURES	  1
+-#define BDB_GENERAL_DEFINITIONS	  2
+-#define BDB_OLD_TOGGLE_LIST	  3
+-#define BDB_MODE_SUPPORT_LIST	  4
+-#define BDB_GENERIC_MODE_TABLE	  5
+-#define BDB_EXT_MMIO_REGS	  6
+-#define BDB_SWF_IO		  7
+-#define BDB_SWF_MMIO		  8
+-#define BDB_DOT_CLOCK_TABLE	  9
+-#define BDB_MODE_REMOVAL_TABLE	 10
+-#define BDB_CHILD_DEVICE_TABLE	 11
+-#define BDB_DRIVER_FEATURES	 12
+-#define BDB_DRIVER_PERSISTENCE	 13
+-#define BDB_EXT_TABLE_PTRS	 14
+-#define BDB_DOT_CLOCK_OVERRIDE	 15
+-#define BDB_DISPLAY_SELECT	 16
+-/* 17 rsvd */
+-#define BDB_DRIVER_ROTATION	 18
+-#define BDB_DISPLAY_REMOVE	 19
+-#define BDB_OEM_CUSTOM		 20
+-#define BDB_EFP_LIST		 21 /* workarounds for VGA hsync/vsync */
+-#define BDB_SDVO_LVDS_OPTIONS	 22
+-#define BDB_SDVO_PANEL_DTDS	 23
+-#define BDB_SDVO_LVDS_PNP_IDS	 24
+-#define BDB_SDVO_LVDS_POWER_SEQ	 25
+-#define BDB_TV_OPTIONS		 26
+-#define BDB_LVDS_OPTIONS	 40
+-#define BDB_LVDS_LFP_DATA_PTRS	 41
+-#define BDB_LVDS_LFP_DATA	 42
+-#define BDB_LVDS_BACKLIGHT	 43
+-#define BDB_LVDS_POWER		 44
+-#define BDB_SKIP		254 /* VBIOS private block, ignore */
+-
+-struct bdb_general_features {
+-	/* bits 1 */
+-	u8 panel_fitting:2;
+-	u8 flexaim:1;
+-	u8 msg_enable:1;
+-	u8 clear_screen:3;
+-	u8 color_flip:1;
+-
+-	/* bits 2 */
+-	u8 download_ext_vbt:1;
+-	u8 enable_ssc:1;
+-	u8 ssc_freq:1;
+-	u8 enable_lfp_on_override:1;
+-	u8 disable_ssc_ddt:1;
+-	u8 rsvd8:3; /* finish byte */
+-
+-	/* bits 3 */
+-	u8 disable_smooth_vision:1;
+-	u8 single_dvi:1;
+-	u8 rsvd9:6; /* finish byte */
+-
+-	/* bits 4 */
+-	u8 legacy_monitor_detect;
+-
+-	/* bits 5 */
+-	u8 int_crt_support:1;
+-	u8 int_tv_support:1;
+-	u8 rsvd11:6; /* finish byte */
+-} __attribute__((packed));
+-
+-struct bdb_general_definitions {
+-	/* DDC GPIO */
+-	u8 crt_ddc_gmbus_pin;
+-
+-	/* DPMS bits */
+-	u8 dpms_acpi:1;
+-	u8 skip_boot_crt_detect:1;
+-	u8 dpms_aim:1;
+-	u8 rsvd1:5; /* finish byte */
+-
+-	/* boot device bits */
+-	u8 boot_display[2];
+-	u8 child_dev_size;
+-
+-	/* device info */
+-	u8 tv_or_lvds_info[33];
+-	u8 dev1[33];
+-	u8 dev2[33];
+-	u8 dev3[33];
+-	u8 dev4[33];
+-	/* may be another device block here on some platforms */
+-};
+-
+-struct bdb_lvds_options {
+-	u8 panel_type;
+-	u8 rsvd1;
+-	/* LVDS capabilities, stored in a dword */
+-	u8 pfit_mode:2;
+-	u8 pfit_text_mode_enhanced:1;
+-	u8 pfit_gfx_mode_enhanced:1;
+-	u8 pfit_ratio_auto:1;
+-	u8 pixel_dither:1;
+-	u8 lvds_edid:1;
+-	u8 rsvd2:1;
+-	u8 rsvd4;
+-} __attribute__((packed));
+-
+-struct bdb_lvds_backlight {
+-	u8 type:2;
+-	u8 pol:1;
+-	u8 gpio:3;
+-	u8 gmbus:2;
+-	u16 freq;
+-	u8 minbrightness;
+-	u8 i2caddr;
+-	u8 brightnesscmd;
+-	/*FIXME: more...*/
+-} __attribute__((packed));
+-
+-/* LFP pointer table contains entries to the struct below */
+-struct bdb_lvds_lfp_data_ptr {
+-	u16 fp_timing_offset; /* offsets are from start of bdb */
+-	u8 fp_table_size;
+-	u16 dvo_timing_offset;
+-	u8 dvo_table_size;
+-	u16 panel_pnp_id_offset;
+-	u8 pnp_table_size;
+-} __attribute__((packed));
+-
+-struct bdb_lvds_lfp_data_ptrs {
+-	u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+-	struct bdb_lvds_lfp_data_ptr ptr[16];
+-} __attribute__((packed));
+-
+-/* LFP data has 3 blocks per entry */
+-struct lvds_fp_timing {
+-	u16 x_res;
+-	u16 y_res;
+-	u32 lvds_reg;
+-	u32 lvds_reg_val;
+-	u32 pp_on_reg;
+-	u32 pp_on_reg_val;
+-	u32 pp_off_reg;
+-	u32 pp_off_reg_val;
+-	u32 pp_cycle_reg;
+-	u32 pp_cycle_reg_val;
+-	u32 pfit_reg;
+-	u32 pfit_reg_val;
+-	u16 terminator;
+-} __attribute__((packed));
+-
+-struct lvds_dvo_timing {
+-	u16 clock;		/**< In 10khz */
+-	u8 hactive_lo;
+-	u8 hblank_lo;
+-	u8 hblank_hi:4;
+-	u8 hactive_hi:4;
+-	u8 vactive_lo;
+-	u8 vblank_lo;
+-	u8 vblank_hi:4;
+-	u8 vactive_hi:4;
+-	u8 hsync_off_lo;
+-	u8 hsync_pulse_width;
+-	u8 vsync_pulse_width:4;
+-	u8 vsync_off:4;
+-	u8 rsvd0:6;
+-	u8 hsync_off_hi:2;
+-	u8 h_image;
+-	u8 v_image;
+-	u8 max_hv;
+-	u8 h_border;
+-	u8 v_border;
+-	u8 rsvd1:3;
+-	u8 digital:2;
+-	u8 vsync_positive:1;
+-	u8 hsync_positive:1;
+-	u8 rsvd2:1;
+-} __attribute__((packed));
+-
+-struct lvds_pnp_id {
+-	u16 mfg_name;
+-	u16 product_code;
+-	u32 serial;
+-	u8 mfg_week;
+-	u8 mfg_year;
+-} __attribute__((packed));
+-
+-struct bdb_lvds_lfp_data_entry {
+-	struct lvds_fp_timing fp_timing;
+-	struct lvds_dvo_timing dvo_timing;
+-	struct lvds_pnp_id pnp_id;
+-} __attribute__((packed));
+-
+-struct bdb_lvds_lfp_data {
+-	struct bdb_lvds_lfp_data_entry data[16];
+-} __attribute__((packed));
+-
+-struct aimdb_header {
+-	char signature[16];
+-	char oem_device[20];
+-	u16 aimdb_version;
+-	u16 aimdb_header_size;
+-	u16 aimdb_size;
+-} __attribute__((packed));
+-
+-struct aimdb_block {
+-	u8 aimdb_id;
+-	u16 aimdb_size;
+-} __attribute__((packed));
+-
+-struct vch_panel_data {
+-	u16 fp_timing_offset;
+-	u8 fp_timing_size;
+-	u16 dvo_timing_offset;
+-	u8 dvo_timing_size;
+-	u16 text_fitting_offset;
+-	u8 text_fitting_size;
+-	u16 graphics_fitting_offset;
+-	u8 graphics_fitting_size;
+-} __attribute__((packed));
+-
+-struct vch_bdb_22 {
+-	struct aimdb_block aimdb_block;
+-	struct vch_panel_data panels[16];
+-} __attribute__((packed));
+-
+-struct bdb_sdvo_lvds_options {
+-	u8 panel_backlight;
+-	u8 h40_set_panel_type;
+-	u8 panel_type;
+-	u8 ssc_clk_freq;
+-	u16 als_low_trip;
+-	u16 als_high_trip;
+-	u8 sclalarcoeff_tab_row_num;
+-	u8 sclalarcoeff_tab_row_size;
+-	u8 coefficient[8];
+-	u8 panel_misc_bits_1;
+-	u8 panel_misc_bits_2;
+-	u8 panel_misc_bits_3;
+-	u8 panel_misc_bits_4;
+-} __attribute__((packed));
+-
+-
+-extern bool psb_intel_init_bios(struct drm_device *dev);
+-extern void psb_intel_destroy_bios(struct drm_device *dev);
+-
+-/*
+- * Driver<->VBIOS interaction occurs through scratch bits in
+- * GR18 & SWF*.
+- */
+-
+-/* GR18 bits are set on display switch and hotkey events */
+-#define GR18_DRIVER_SWITCH_EN	(1<<7) /* 0: VBIOS control, 1: driver control */
+-#define GR18_HOTKEY_MASK	0x78 /* See also SWF4 15:0 */
+-#define   GR18_HK_NONE		(0x0<<3)
+-#define   GR18_HK_LFP_STRETCH	(0x1<<3)
+-#define   GR18_HK_TOGGLE_DISP	(0x2<<3)
+-#define   GR18_HK_DISP_SWITCH	(0x4<<3) /* see SWF14 15:0 for what to enable */
+-#define   GR18_HK_POPUP_DISABLED (0x6<<3)
+-#define   GR18_HK_POPUP_ENABLED	(0x7<<3)
+-#define   GR18_HK_PFIT		(0x8<<3)
+-#define   GR18_HK_APM_CHANGE	(0xa<<3)
+-#define   GR18_HK_MULTIPLE	(0xc<<3)
+-#define GR18_USER_INT_EN	(1<<2)
+-#define GR18_A0000_FLUSH_EN	(1<<1)
+-#define GR18_SMM_EN		(1<<0)
+-
+-/* Set by driver, cleared by VBIOS */
+-#define SWF00_YRES_SHIFT	16
+-#define SWF00_XRES_SHIFT	0
+-#define SWF00_RES_MASK		0xffff
+-
+-/* Set by VBIOS at boot time and driver at runtime */
+-#define SWF01_TV2_FORMAT_SHIFT	8
+-#define SWF01_TV1_FORMAT_SHIFT	0
+-#define SWF01_TV_FORMAT_MASK	0xffff
+-
+-#define SWF10_VBIOS_BLC_I2C_EN	(1<<29)
+-#define SWF10_GTT_OVERRIDE_EN	(1<<28)
+-#define SWF10_LFP_DPMS_OVR	(1<<27) /* override DPMS on display switch */
+-#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+-#define   SWF10_OLD_TOGGLE	0x0
+-#define   SWF10_TOGGLE_LIST_1	0x1
+-#define   SWF10_TOGGLE_LIST_2	0x2
+-#define   SWF10_TOGGLE_LIST_3	0x3
+-#define   SWF10_TOGGLE_LIST_4	0x4
+-#define SWF10_PANNING_EN	(1<<23)
+-#define SWF10_DRIVER_LOADED	(1<<22)
+-#define SWF10_EXTENDED_DESKTOP	(1<<21)
+-#define SWF10_EXCLUSIVE_MODE	(1<<20)
+-#define SWF10_OVERLAY_EN	(1<<19)
+-#define SWF10_PLANEB_HOLDOFF	(1<<18)
+-#define SWF10_PLANEA_HOLDOFF	(1<<17)
+-#define SWF10_VGA_HOLDOFF	(1<<16)
+-#define SWF10_ACTIVE_DISP_MASK	0xffff
+-#define   SWF10_PIPEB_LFP2	(1<<15)
+-#define   SWF10_PIPEB_EFP2	(1<<14)
+-#define   SWF10_PIPEB_TV2	(1<<13)
+-#define   SWF10_PIPEB_CRT2	(1<<12)
+-#define   SWF10_PIPEB_LFP	(1<<11)
+-#define   SWF10_PIPEB_EFP	(1<<10)
+-#define   SWF10_PIPEB_TV	(1<<9)
+-#define   SWF10_PIPEB_CRT	(1<<8)
+-#define   SWF10_PIPEA_LFP2	(1<<7)
+-#define   SWF10_PIPEA_EFP2	(1<<6)
+-#define   SWF10_PIPEA_TV2	(1<<5)
+-#define   SWF10_PIPEA_CRT2	(1<<4)
+-#define   SWF10_PIPEA_LFP	(1<<3)
+-#define   SWF10_PIPEA_EFP	(1<<2)
+-#define   SWF10_PIPEA_TV	(1<<1)
+-#define   SWF10_PIPEA_CRT	(1<<0)
+-
+-#define SWF11_MEMORY_SIZE_SHIFT	16
+-#define SWF11_SV_TEST_EN	(1<<15)
+-#define SWF11_IS_AGP		(1<<14)
+-#define SWF11_DISPLAY_HOLDOFF	(1<<13)
+-#define SWF11_DPMS_REDUCED	(1<<12)
+-#define SWF11_IS_VBE_MODE	(1<<11)
+-#define SWF11_PIPEB_ACCESS	(1<<10) /* 0 here means pipe a */
+-#define SWF11_DPMS_MASK		0x07
+-#define   SWF11_DPMS_OFF	(1<<2)
+-#define   SWF11_DPMS_SUSPEND	(1<<1)
+-#define   SWF11_DPMS_STANDBY	(1<<0)
+-#define   SWF11_DPMS_ON		0
+-
+-#define SWF14_GFX_PFIT_EN	(1<<31)
+-#define SWF14_TEXT_PFIT_EN	(1<<30)
+-#define SWF14_LID_STATUS_CLOSED	(1<<29) /* 0 here means open */
+-#define SWF14_POPUP_EN		(1<<28)
+-#define SWF14_DISPLAY_HOLDOFF	(1<<27)
+-#define SWF14_DISP_DETECT_EN	(1<<26)
+-#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+-#define SWF14_DRIVER_STATUS	(1<<24)
+-#define SWF14_OS_TYPE_WIN9X	(1<<23)
+-#define SWF14_OS_TYPE_WINNT	(1<<22)
+-/* 21:19 rsvd */
+-#define SWF14_PM_TYPE_MASK	0x00070000
+-#define   SWF14_PM_ACPI_VIDEO	(0x4 << 16)
+-#define   SWF14_PM_ACPI		(0x3 << 16)
+-#define   SWF14_PM_APM_12	(0x2 << 16)
+-#define   SWF14_PM_APM_11	(0x1 << 16)
+-#define SWF14_HK_REQUEST_MASK	0x0000ffff /* see GR18 6:3 for event type */
+-	  /* if GR18 indicates a display switch */
+-#define   SWF14_DS_PIPEB_LFP2_EN (1<<15)
+-#define   SWF14_DS_PIPEB_EFP2_EN (1<<14)
+-#define   SWF14_DS_PIPEB_TV2_EN  (1<<13)
+-#define   SWF14_DS_PIPEB_CRT2_EN (1<<12)
+-#define   SWF14_DS_PIPEB_LFP_EN  (1<<11)
+-#define   SWF14_DS_PIPEB_EFP_EN  (1<<10)
+-#define   SWF14_DS_PIPEB_TV_EN	 (1<<9)
+-#define   SWF14_DS_PIPEB_CRT_EN  (1<<8)
+-#define   SWF14_DS_PIPEA_LFP2_EN (1<<7)
+-#define   SWF14_DS_PIPEA_EFP2_EN (1<<6)
+-#define   SWF14_DS_PIPEA_TV2_EN  (1<<5)
+-#define   SWF14_DS_PIPEA_CRT2_EN (1<<4)
+-#define   SWF14_DS_PIPEA_LFP_EN  (1<<3)
+-#define   SWF14_DS_PIPEA_EFP_EN  (1<<2)
+-#define   SWF14_DS_PIPEA_TV_EN	 (1<<1)
+-#define   SWF14_DS_PIPEA_CRT_EN  (1<<0)
+-	  /* if GR18 indicates a panel fitting request */
+-#define   SWF14_PFIT_EN		(1<<0) /* 0 means disable */
+-	  /* if GR18 indicates an APM change request */
+-#define   SWF14_APM_HIBERNATE	0x4
+-#define   SWF14_APM_SUSPEND	0x3
+-#define   SWF14_APM_STANDBY	0x1
+-#define   SWF14_APM_RESTORE	0x0
+-
+-#endif /* _I830_BIOS_H_ */
+diff --git a/drivers/staging/gma500/intel_i2c.c b/drivers/staging/gma500/intel_i2c.c
+deleted file mode 100644
+index 51cbf65..0000000
+--- a/drivers/staging/gma500/intel_i2c.c
++++ /dev/null
+@@ -1,170 +0,0 @@
+-/*
+- * Copyright © 2006-2007 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *	Eric Anholt <eric at anholt.net>
+- */
+-
+-#include <linux/i2c.h>
+-#include <linux/i2c-algo-bit.h>
+-#include <linux/export.h>
+-
+-#include "psb_drv.h"
+-#include "psb_intel_reg.h"
+-
+-/*
+- * Intel GPIO access functions
+- */
+-
+-#define I2C_RISEFALL_TIME 20
+-
+-static int get_clock(void *data)
+-{
+-	struct psb_intel_i2c_chan *chan = data;
+-	struct drm_device *dev = chan->drm_dev;
+-	u32 val;
+-
+-	val = REG_READ(chan->reg);
+-	return (val & GPIO_CLOCK_VAL_IN) != 0;
+-}
+-
+-static int get_data(void *data)
+-{
+-	struct psb_intel_i2c_chan *chan = data;
+-	struct drm_device *dev = chan->drm_dev;
+-	u32 val;
+-
+-	val = REG_READ(chan->reg);
+-	return (val & GPIO_DATA_VAL_IN) != 0;
+-}
+-
+-static void set_clock(void *data, int state_high)
+-{
+-	struct psb_intel_i2c_chan *chan = data;
+-	struct drm_device *dev = chan->drm_dev;
+-	u32 reserved = 0, clock_bits;
+-
+-	/* On most chips, these bits must be preserved in software. */
+-	reserved =
+-		    REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+-					   GPIO_CLOCK_PULLUP_DISABLE);
+-
+-	if (state_high)
+-		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+-	else
+-		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+-		    GPIO_CLOCK_VAL_MASK;
+-	REG_WRITE(chan->reg, reserved | clock_bits);
+-	udelay(I2C_RISEFALL_TIME);	/* wait for the line to change state */
+-}
+-
+-static void set_data(void *data, int state_high)
+-{
+-	struct psb_intel_i2c_chan *chan = data;
+-	struct drm_device *dev = chan->drm_dev;
+-	u32 reserved = 0, data_bits;
+-
+-	/* On most chips, these bits must be preserved in software. */
+-	reserved =
+-		    REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
+-					   GPIO_CLOCK_PULLUP_DISABLE);
+-
+-	if (state_high)
+-		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+-	else
+-		data_bits =
+-		    GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+-		    GPIO_DATA_VAL_MASK;
+-
+-	REG_WRITE(chan->reg, reserved | data_bits);
+-	udelay(I2C_RISEFALL_TIME);	/* wait for the line to change state */
+-}
+-
+-/**
+- * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+- * @dev: DRM device
+- * @output: driver specific output device
+- * @reg: GPIO reg to use
+- * @name: name for this bus
+- *
+- * Creates and registers a new i2c bus with the Linux i2c layer, for use
+- * in output probing and control (e.g. DDC or SDVO control functions).
+- *
+- * Possible values for @reg include:
+- *   %GPIOA
+- *   %GPIOB
+- *   %GPIOC
+- *   %GPIOD
+- *   %GPIOE
+- *   %GPIOF
+- *   %GPIOG
+- *   %GPIOH
+- * see PRM for details on how these different busses are used.
+- */
+-struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+-					const u32 reg, const char *name)
+-{
+-	struct psb_intel_i2c_chan *chan;
+-
+-	chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
+-	if (!chan)
+-		goto out_free;
+-
+-	chan->drm_dev = dev;
+-	chan->reg = reg;
+-	snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
+-	chan->adapter.owner = THIS_MODULE;
+-	chan->adapter.algo_data = &chan->algo;
+-	chan->adapter.dev.parent = &dev->pdev->dev;
+-	chan->algo.setsda = set_data;
+-	chan->algo.setscl = set_clock;
+-	chan->algo.getsda = get_data;
+-	chan->algo.getscl = get_clock;
+-	chan->algo.udelay = 20;
+-	chan->algo.timeout = usecs_to_jiffies(2200);
+-	chan->algo.data = chan;
+-
+-	i2c_set_adapdata(&chan->adapter, chan);
+-
+-	if (i2c_bit_add_bus(&chan->adapter))
+-		goto out_free;
+-
+-	/* JJJ:  raise SCL and SDA? */
+-	set_data(chan, 1);
+-	set_clock(chan, 1);
+-	udelay(20);
+-
+-	return chan;
+-
+-out_free:
+-	kfree(chan);
+-	return NULL;
+-}
+-
+-/**
+- * psb_intel_i2c_destroy - unregister and free i2c bus resources
+- * @output: channel to free
+- *
+- * Unregister the adapter from the i2c layer, then free the structure.
+- */
+-void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
+-{
+-	if (!chan)
+-		return;
+-
+-	i2c_del_adapter(&chan->adapter);
+-	kfree(chan);
+-}
+diff --git a/drivers/staging/gma500/intel_opregion.c b/drivers/staging/gma500/intel_opregion.c
+deleted file mode 100644
+index d946bc1..0000000
+--- a/drivers/staging/gma500/intel_opregion.c
++++ /dev/null
+@@ -1,81 +0,0 @@
+-/*
+- * Copyright 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * FIXME: resolve with the i915 version
+- */
+-
+-#include "psb_drv.h"
+-
+-struct opregion_header {
+-	u8 signature[16];
+-	u32 size;
+-	u32 opregion_ver;
+-	u8 bios_ver[32];
+-	u8 vbios_ver[16];
+-	u8 driver_ver[16];
+-	u32 mboxes;
+-	u8 reserved[164];
+-} __packed;
+-
+-struct opregion_apci {
+-	/*FIXME: add it later*/
+-} __packed;
+-
+-struct opregion_swsci {
+-	/*FIXME: add it later*/
+-} __packed;
+-
+-struct opregion_acpi {
+-	/*FIXME: add it later*/
+-} __packed;
+-
+-int gma_intel_opregion_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 opregion_phy;
+-	void *base;
+-	u32 *lid_state;
+-
+-	dev_priv->lid_state = NULL;
+-
+-	pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
+-	if (opregion_phy == 0)
+-		return -ENOTSUPP;
+-
+-	base = ioremap(opregion_phy, 8*1024);
+-	if (!base)
+-		return -ENOMEM;
+-
+-	lid_state = base + 0x01ac;
+-
+-	dev_priv->lid_state = lid_state;
+-	dev_priv->lid_last_state = readl(lid_state);
+-	return 0;
+-}
+-
+-int gma_intel_opregion_exit(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	if (dev_priv->lid_state)
+-		iounmap(dev_priv->lid_state);
+-	return 0;
+-}
+diff --git a/drivers/staging/gma500/mdfld_device.c b/drivers/staging/gma500/mdfld_device.c
+deleted file mode 100644
+index f47aeb7..0000000
+--- a/drivers/staging/gma500/mdfld_device.c
++++ /dev/null
+@@ -1,714 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-#include <linux/backlight.h>
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include "psb_reg.h"
+-#include "psb_intel_reg.h"
+-#include "psb_drm.h"
+-#include "psb_drv.h"
+-#include "mdfld_output.h"
+-#include "mdfld_dsi_output.h"
+-#include "mid_bios.h"
+-
+-/*
+- *	Provide the Medfield specific backlight management
+- */
+-
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-
+-static int mdfld_brightness;
+-struct backlight_device *mdfld_backlight_device;
+-
+-static int mfld_set_brightness(struct backlight_device *bd)
+-{
+-	struct drm_device *dev = bl_get_data(mdfld_backlight_device);
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int level = bd->props.brightness;
+-
+-	/* Percentage 1-100% being valid */
+-	if (level < 1)
+-		level = 1;
+-
+-	if (gma_power_begin(dev, 0)) {
+-		/* Calculate and set the brightness value */
+-		u32 adjusted_level;
+-
+-		/* Adjust the backlight level with the percent in
+-		 * dev_priv->blc_adj2;
+-		 */
+-		adjusted_level = level * dev_priv->blc_adj2;
+-		adjusted_level = adjusted_level / 100;
+-#if 0
+-#ifndef CONFIG_MDFLD_DSI_DPU
+-		if(!(dev_priv->dsr_fb_update & MDFLD_DSR_MIPI_CONTROL) && 
+-			(dev_priv->dbi_panel_on || dev_priv->dbi_panel_on2)){
+-			mdfld_dsi_dbi_exit_dsr(dev,MDFLD_DSR_MIPI_CONTROL, 0, 0);
+-			dev_dbg(dev->dev, "Out of DSR before set brightness to %d.\n",adjusted_level);
+-		}
+-#endif
+-		mdfld_dsi_brightness_control(dev, 0, adjusted_level);
+-
+-		if ((dev_priv->dbi_panel_on2) || (dev_priv->dpi_panel_on2))
+-			mdfld_dsi_brightness_control(dev, 2, adjusted_level);
+-#endif
+-		gma_power_end(dev);
+-	}
+-	mdfld_brightness = level;
+-	return 0;
+-}
+-
+-int psb_get_brightness(struct backlight_device *bd)
+-{
+-	/* return locally cached var instead of HW read (due to DPST etc.) */
+-	/* FIXME: ideally return actual value in case firmware fiddled with
+-	   it */
+-	return mdfld_brightness;
+-}
+-
+-static const struct backlight_ops mfld_ops = {
+-	.get_brightness = psb_get_brightness,
+-	.update_status  = mfld_set_brightness,
+-};
+-
+-static int mdfld_backlight_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct backlight_properties props;
+-	memset(&props, 0, sizeof(struct backlight_properties));
+-	props.max_brightness = 100;
+-	props.type = BACKLIGHT_PLATFORM;
+-
+-	mdfld_backlight_device = backlight_device_register("mfld-bl",
+-					NULL, (void *)dev, &mfld_ops, &props);
+-					
+-	if (IS_ERR(mdfld_backlight_device))
+-		return PTR_ERR(mdfld_backlight_device);
+-
+-	dev_priv->blc_adj1 = 100;
+-	dev_priv->blc_adj2 = 100;
+-	mdfld_backlight_device->props.brightness = 100;
+-	mdfld_backlight_device->props.max_brightness = 100;
+-	backlight_update_status(mdfld_backlight_device);
+-	dev_priv->backlight_device = mdfld_backlight_device;
+-	return 0;
+-}
+-
+-#endif
+-
+-/*
+- *	Provide the Medfield specific chip logic and low level methods for
+- *	power management.
+- */
+-
+-static void mdfld_init_pm(struct drm_device *dev)
+-{
+-	/* No work needed here yet */
+-}
+-
+-/**
+- * mdfld_save_display_registers	-	save registers for pipe
+- * @dev: our device
+- * @pipe: pipe to save
+- *
+- * Save the pipe state of the device before we power it off. Keep everything
+- * we need to put it back again
+- */
+-static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int i;
+-
+-	/* register */
+-	u32 dpll_reg = MRST_DPLL_A;
+-	u32 fp_reg = MRST_FPA0;
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 htot_reg = HTOTAL_A;
+-	u32 hblank_reg = HBLANK_A;
+-	u32 hsync_reg = HSYNC_A;
+-	u32 vtot_reg = VTOTAL_A;
+-	u32 vblank_reg = VBLANK_A;
+-	u32 vsync_reg = VSYNC_A;
+-	u32 pipesrc_reg = PIPEASRC;
+-	u32 dspstride_reg = DSPASTRIDE;
+-	u32 dsplinoff_reg = DSPALINOFF;
+-	u32 dsptileoff_reg = DSPATILEOFF;
+-	u32 dspsize_reg = DSPASIZE;
+-	u32 dsppos_reg = DSPAPOS;
+-	u32 dspsurf_reg = DSPASURF;
+-	u32 mipi_reg = MIPI;
+-	u32 dspcntr_reg = DSPACNTR;
+-	u32 dspstatus_reg = PIPEASTAT;
+-	u32 palette_reg = PALETTE_A;
+-
+-	/* pointer to values */
+-	u32 *dpll_val = &dev_priv->saveDPLL_A;
+-	u32 *fp_val = &dev_priv->saveFPA0;
+-	u32 *pipeconf_val = &dev_priv->savePIPEACONF;
+-	u32 *htot_val = &dev_priv->saveHTOTAL_A;
+-	u32 *hblank_val = &dev_priv->saveHBLANK_A;
+-	u32 *hsync_val = &dev_priv->saveHSYNC_A;
+-	u32 *vtot_val = &dev_priv->saveVTOTAL_A;
+-	u32 *vblank_val = &dev_priv->saveVBLANK_A;
+-	u32 *vsync_val = &dev_priv->saveVSYNC_A;
+-	u32 *pipesrc_val = &dev_priv->savePIPEASRC;
+-	u32 *dspstride_val = &dev_priv->saveDSPASTRIDE;
+-	u32 *dsplinoff_val = &dev_priv->saveDSPALINOFF;
+-	u32 *dsptileoff_val = &dev_priv->saveDSPATILEOFF;
+-	u32 *dspsize_val = &dev_priv->saveDSPASIZE;
+-	u32 *dsppos_val = &dev_priv->saveDSPAPOS;
+-	u32 *dspsurf_val = &dev_priv->saveDSPASURF;
+-	u32 *mipi_val = &dev_priv->saveMIPI;
+-	u32 *dspcntr_val = &dev_priv->saveDSPACNTR;
+-	u32 *dspstatus_val = &dev_priv->saveDSPASTATUS;
+-	u32 *palette_val = dev_priv->save_palette_a;
+-
+-	switch (pipe) {
+-	case 0:
+-		break;
+-	case 1:
+-		/* register */
+-		dpll_reg = MDFLD_DPLL_B;
+-		fp_reg = MDFLD_DPLL_DIV0;
+-		pipeconf_reg = PIPEBCONF;
+-		htot_reg = HTOTAL_B;
+-		hblank_reg = HBLANK_B;
+-		hsync_reg = HSYNC_B;
+-		vtot_reg = VTOTAL_B;
+-		vblank_reg = VBLANK_B;
+-		vsync_reg = VSYNC_B;
+-		pipesrc_reg = PIPEBSRC;
+-		dspstride_reg = DSPBSTRIDE;
+-		dsplinoff_reg = DSPBLINOFF;
+-		dsptileoff_reg = DSPBTILEOFF;
+-		dspsize_reg = DSPBSIZE;
+-		dsppos_reg = DSPBPOS;
+-		dspsurf_reg = DSPBSURF;
+-		dspcntr_reg = DSPBCNTR;
+-		dspstatus_reg = PIPEBSTAT;
+-		palette_reg = PALETTE_B;
+-
+-		/* values */
+-		dpll_val = &dev_priv->saveDPLL_B;
+-		fp_val = &dev_priv->saveFPB0;
+-		pipeconf_val = &dev_priv->savePIPEBCONF;
+-		htot_val = &dev_priv->saveHTOTAL_B;
+-		hblank_val = &dev_priv->saveHBLANK_B;
+-		hsync_val = &dev_priv->saveHSYNC_B;
+-		vtot_val = &dev_priv->saveVTOTAL_B;
+-		vblank_val = &dev_priv->saveVBLANK_B;
+-		vsync_val = &dev_priv->saveVSYNC_B;
+-		pipesrc_val = &dev_priv->savePIPEBSRC;
+-		dspstride_val = &dev_priv->saveDSPBSTRIDE;
+-		dsplinoff_val = &dev_priv->saveDSPBLINOFF;
+-		dsptileoff_val = &dev_priv->saveDSPBTILEOFF;
+-		dspsize_val = &dev_priv->saveDSPBSIZE;
+-		dsppos_val = &dev_priv->saveDSPBPOS;
+-		dspsurf_val = &dev_priv->saveDSPBSURF;
+-		dspcntr_val = &dev_priv->saveDSPBCNTR;
+-		dspstatus_val = &dev_priv->saveDSPBSTATUS;
+-		palette_val = dev_priv->save_palette_b;
+-		break;
+-	case 2:
+-		/* register */
+-		pipeconf_reg = PIPECCONF;
+-		htot_reg = HTOTAL_C;
+-		hblank_reg = HBLANK_C;
+-		hsync_reg = HSYNC_C;
+-		vtot_reg = VTOTAL_C;
+-		vblank_reg = VBLANK_C;
+-		vsync_reg = VSYNC_C;
+-		pipesrc_reg = PIPECSRC;
+-		dspstride_reg = DSPCSTRIDE;
+-		dsplinoff_reg = DSPCLINOFF;
+-		dsptileoff_reg = DSPCTILEOFF;
+-		dspsize_reg = DSPCSIZE;
+-		dsppos_reg = DSPCPOS;
+-		dspsurf_reg = DSPCSURF;
+-		mipi_reg = MIPI_C;
+-		dspcntr_reg = DSPCCNTR;
+-		dspstatus_reg = PIPECSTAT;
+-		palette_reg = PALETTE_C;
+-
+-		/* pointer to values */
+-		pipeconf_val = &dev_priv->savePIPECCONF;
+-		htot_val = &dev_priv->saveHTOTAL_C;
+-		hblank_val = &dev_priv->saveHBLANK_C;
+-		hsync_val = &dev_priv->saveHSYNC_C;
+-		vtot_val = &dev_priv->saveVTOTAL_C;
+-		vblank_val = &dev_priv->saveVBLANK_C;
+-		vsync_val = &dev_priv->saveVSYNC_C;
+-		pipesrc_val = &dev_priv->savePIPECSRC;
+-		dspstride_val = &dev_priv->saveDSPCSTRIDE;
+-		dsplinoff_val = &dev_priv->saveDSPCLINOFF;
+-		dsptileoff_val = &dev_priv->saveDSPCTILEOFF;
+-		dspsize_val = &dev_priv->saveDSPCSIZE;
+-		dsppos_val = &dev_priv->saveDSPCPOS;
+-		dspsurf_val = &dev_priv->saveDSPCSURF;
+-		mipi_val = &dev_priv->saveMIPI_C;
+-		dspcntr_val = &dev_priv->saveDSPCCNTR;
+-		dspstatus_val = &dev_priv->saveDSPCSTATUS;
+-		palette_val = dev_priv->save_palette_c;
+-		break;
+-	default:
+-		DRM_ERROR("%s, invalid pipe number.\n", __func__);
+-		return -EINVAL;
+-	}
+-
+-	/* Pipe & plane A info */
+-	*dpll_val = PSB_RVDC32(dpll_reg);
+-	*fp_val = PSB_RVDC32(fp_reg);
+-	*pipeconf_val = PSB_RVDC32(pipeconf_reg);
+-	*htot_val = PSB_RVDC32(htot_reg);
+-	*hblank_val = PSB_RVDC32(hblank_reg);
+-	*hsync_val = PSB_RVDC32(hsync_reg);
+-	*vtot_val = PSB_RVDC32(vtot_reg);
+-	*vblank_val = PSB_RVDC32(vblank_reg);
+-	*vsync_val = PSB_RVDC32(vsync_reg);
+-	*pipesrc_val = PSB_RVDC32(pipesrc_reg);
+-	*dspstride_val = PSB_RVDC32(dspstride_reg);
+-	*dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
+-	*dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
+-	*dspsize_val = PSB_RVDC32(dspsize_reg);
+-	*dsppos_val = PSB_RVDC32(dsppos_reg);
+-	*dspsurf_val = PSB_RVDC32(dspsurf_reg);
+-	*dspcntr_val = PSB_RVDC32(dspcntr_reg);
+-	*dspstatus_val = PSB_RVDC32(dspstatus_reg);
+-
+-	/*save palette (gamma) */
+-	for (i = 0; i < 256; i++)
+-		palette_val[i] = PSB_RVDC32(palette_reg + (i<<2));
+-
+-	if (pipe == 1) {
+-		dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+-		dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+-		dev_priv->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
+-		dev_priv->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
+-		return 0;
+-	}
+-	*mipi_val = PSB_RVDC32(mipi_reg);
+-	return 0;
+-}
+-
+-/**
+- * mdfld_save_cursor_overlay_registers	-	save cursor overlay info
+- * @dev: our device
+- *
+- * Save the cursor and overlay register state
+- */
+-static int mdfld_save_cursor_overlay_registers(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	/* Save cursor regs */
+-	dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+-	dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+-	dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+-
+-	dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+-	dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+-	dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+-
+-	dev_priv->saveDSPCCURSOR_CTRL = PSB_RVDC32(CURCCNTR);
+-	dev_priv->saveDSPCCURSOR_BASE = PSB_RVDC32(CURCBASE);
+-	dev_priv->saveDSPCCURSOR_POS = PSB_RVDC32(CURCPOS);
+-
+-	/* HW overlay */
+-	dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+-	dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+-	dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+-	dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+-	dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+-	dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+-	dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+-
+-	dev_priv->saveOV_OVADD_C = PSB_RVDC32(OV_OVADD + OV_C_OFFSET);
+-	dev_priv->saveOV_OGAMC0_C = PSB_RVDC32(OV_OGAMC0 + OV_C_OFFSET);
+-	dev_priv->saveOV_OGAMC1_C = PSB_RVDC32(OV_OGAMC1 + OV_C_OFFSET);
+-	dev_priv->saveOV_OGAMC2_C = PSB_RVDC32(OV_OGAMC2 + OV_C_OFFSET);
+-	dev_priv->saveOV_OGAMC3_C = PSB_RVDC32(OV_OGAMC3 + OV_C_OFFSET);
+-	dev_priv->saveOV_OGAMC4_C = PSB_RVDC32(OV_OGAMC4 + OV_C_OFFSET);
+-	dev_priv->saveOV_OGAMC5_C = PSB_RVDC32(OV_OGAMC5 + OV_C_OFFSET);
+-
+-	return 0;
+-}
+-/*
+- * mdfld_restore_display_registers	-	restore the state of a pipe
+- * @dev: our device
+- * @pipe: the pipe to restore
+- *
+- * Restore the state of a pipe to that which was saved by the register save
+- * functions.
+- */
+-static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
+-{
+-	/* To get  panel out of ULPS mode */
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dsi_config *dsi_config = NULL;
+-	u32 i = 0;
+-	u32 dpll = 0;
+-	u32 timeout = 0;
+-	u32 reg_offset = 0;
+-
+-	/* register */
+-	u32 dpll_reg = MRST_DPLL_A;
+-	u32 fp_reg = MRST_FPA0;
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 htot_reg = HTOTAL_A;
+-	u32 hblank_reg = HBLANK_A;
+-	u32 hsync_reg = HSYNC_A;
+-	u32 vtot_reg = VTOTAL_A;
+-	u32 vblank_reg = VBLANK_A;
+-	u32 vsync_reg = VSYNC_A;
+-	u32 pipesrc_reg = PIPEASRC;
+-	u32 dspstride_reg = DSPASTRIDE;
+-	u32 dsplinoff_reg = DSPALINOFF;
+-	u32 dsptileoff_reg = DSPATILEOFF;
+-	u32 dspsize_reg = DSPASIZE;
+-	u32 dsppos_reg = DSPAPOS;
+-	u32 dspsurf_reg = DSPASURF;
+-	u32 dspstatus_reg = PIPEASTAT;
+-	u32 mipi_reg = MIPI;
+-	u32 dspcntr_reg = DSPACNTR;
+-	u32 palette_reg = PALETTE_A;
+-
+-	/* values */
+-	u32 dpll_val = dev_priv->saveDPLL_A & ~DPLL_VCO_ENABLE;
+-	u32 fp_val = dev_priv->saveFPA0;
+-	u32 pipeconf_val = dev_priv->savePIPEACONF;
+-	u32 htot_val = dev_priv->saveHTOTAL_A;
+-	u32 hblank_val = dev_priv->saveHBLANK_A;
+-	u32 hsync_val = dev_priv->saveHSYNC_A;
+-	u32 vtot_val = dev_priv->saveVTOTAL_A;
+-	u32 vblank_val = dev_priv->saveVBLANK_A;
+-	u32 vsync_val = dev_priv->saveVSYNC_A;
+-	u32 pipesrc_val = dev_priv->savePIPEASRC;
+-	u32 dspstride_val = dev_priv->saveDSPASTRIDE;
+-	u32 dsplinoff_val = dev_priv->saveDSPALINOFF;
+-	u32 dsptileoff_val = dev_priv->saveDSPATILEOFF;
+-	u32 dspsize_val = dev_priv->saveDSPASIZE;
+-	u32 dsppos_val = dev_priv->saveDSPAPOS;
+-	u32 dspsurf_val = dev_priv->saveDSPASURF;
+-	u32 dspstatus_val = dev_priv->saveDSPASTATUS;
+-	u32 mipi_val = dev_priv->saveMIPI;
+-	u32 dspcntr_val = dev_priv->saveDSPACNTR;
+-	u32 *palette_val = dev_priv->save_palette_a;
+-
+-	switch (pipe) {
+-	case 0:
+-		dsi_config = dev_priv->dsi_configs[0];
+-		break;
+-	case 1:
+-		/* register */
+-		dpll_reg = MDFLD_DPLL_B;
+-		fp_reg = MDFLD_DPLL_DIV0;
+-		pipeconf_reg = PIPEBCONF;
+-		htot_reg = HTOTAL_B;
+-		hblank_reg = HBLANK_B;
+-		hsync_reg = HSYNC_B;
+-		vtot_reg = VTOTAL_B;
+-		vblank_reg = VBLANK_B;
+-		vsync_reg = VSYNC_B;
+-		pipesrc_reg = PIPEBSRC;
+-		dspstride_reg = DSPBSTRIDE;
+-		dsplinoff_reg = DSPBLINOFF;
+-		dsptileoff_reg = DSPBTILEOFF;
+-		dspsize_reg = DSPBSIZE;
+-		dsppos_reg = DSPBPOS;
+-		dspsurf_reg = DSPBSURF;
+-		dspcntr_reg = DSPBCNTR;
+-		palette_reg = PALETTE_B;
+-		dspstatus_reg = PIPEBSTAT;
+-
+-		/* values */
+-		dpll_val = dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE;
+-		fp_val = dev_priv->saveFPB0;
+-		pipeconf_val = dev_priv->savePIPEBCONF;
+-		htot_val = dev_priv->saveHTOTAL_B;
+-		hblank_val = dev_priv->saveHBLANK_B;
+-		hsync_val = dev_priv->saveHSYNC_B;
+-		vtot_val = dev_priv->saveVTOTAL_B;
+-		vblank_val = dev_priv->saveVBLANK_B;
+-		vsync_val = dev_priv->saveVSYNC_B;
+-		pipesrc_val = dev_priv->savePIPEBSRC;
+-		dspstride_val = dev_priv->saveDSPBSTRIDE;
+-		dsplinoff_val = dev_priv->saveDSPBLINOFF;
+-		dsptileoff_val = dev_priv->saveDSPBTILEOFF;
+-		dspsize_val = dev_priv->saveDSPBSIZE;
+-		dsppos_val = dev_priv->saveDSPBPOS;
+-		dspsurf_val = dev_priv->saveDSPBSURF;
+-		dspcntr_val = dev_priv->saveDSPBCNTR;
+-		dspstatus_val = dev_priv->saveDSPBSTATUS;
+-		palette_val = dev_priv->save_palette_b;
+-		break;
+-	case 2:
+-		reg_offset = MIPIC_REG_OFFSET;
+-
+-		/* register */
+-		pipeconf_reg = PIPECCONF;
+-		htot_reg = HTOTAL_C;
+-		hblank_reg = HBLANK_C;
+-		hsync_reg = HSYNC_C;
+-		vtot_reg = VTOTAL_C;
+-		vblank_reg = VBLANK_C;
+-		vsync_reg = VSYNC_C;
+-		pipesrc_reg = PIPECSRC;
+-		dspstride_reg = DSPCSTRIDE;
+-		dsplinoff_reg = DSPCLINOFF;
+-		dsptileoff_reg = DSPCTILEOFF;
+-		dspsize_reg = DSPCSIZE;
+-		dsppos_reg = DSPCPOS;
+-		dspsurf_reg = DSPCSURF;
+-		mipi_reg = MIPI_C;
+-		dspcntr_reg = DSPCCNTR;
+-		palette_reg = PALETTE_C;
+-		dspstatus_reg = PIPECSTAT;
+-
+-		/* values */
+-		pipeconf_val = dev_priv->savePIPECCONF;
+-		htot_val = dev_priv->saveHTOTAL_C;
+-		hblank_val = dev_priv->saveHBLANK_C;
+-		hsync_val = dev_priv->saveHSYNC_C;
+-		vtot_val = dev_priv->saveVTOTAL_C;
+-		vblank_val = dev_priv->saveVBLANK_C;
+-		vsync_val = dev_priv->saveVSYNC_C;
+-		pipesrc_val = dev_priv->savePIPECSRC;
+-		dspstride_val = dev_priv->saveDSPCSTRIDE;
+-		dsplinoff_val = dev_priv->saveDSPCLINOFF;
+-		dsptileoff_val = dev_priv->saveDSPCTILEOFF;
+-		dspsize_val = dev_priv->saveDSPCSIZE;
+-		dsppos_val = dev_priv->saveDSPCPOS;
+-		dspsurf_val = dev_priv->saveDSPCSURF;
+-		dspstatus_val = dev_priv->saveDSPCSTATUS;
+-		mipi_val = dev_priv->saveMIPI_C;
+-		dspcntr_val = dev_priv->saveDSPCCNTR;
+-		palette_val = dev_priv->save_palette_c;
+-
+-		dsi_config = dev_priv->dsi_configs[1];
+-		break;
+-	default:
+-		DRM_ERROR("%s, invalid pipe number.\n", __func__);
+-		return -EINVAL;
+-	}
+-
+-	/* Make sure VGA plane is off. it initializes to on after reset!*/
+-	PSB_WVDC32(0x80000000, VGACNTRL);
+-	if (pipe == 1) {
+-		PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
+-		PSB_RVDC32(dpll_reg);
+-
+-		PSB_WVDC32(fp_val, fp_reg);
+-	} else {
+-		dpll = PSB_RVDC32(dpll_reg);
+-
+-		if (!(dpll & DPLL_VCO_ENABLE)) {
+-
+-			/* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
+-			if (dpll & MDFLD_PWR_GATE_EN) {
+-				dpll &= ~MDFLD_PWR_GATE_EN;
+-				PSB_WVDC32(dpll, dpll_reg);
+-				udelay(500);	/* FIXME: 1 ? */
+-			}
+-
+-			PSB_WVDC32(fp_val, fp_reg);
+-			PSB_WVDC32(dpll_val, dpll_reg);
+-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+-			udelay(500);
+-
+-			dpll_val |= DPLL_VCO_ENABLE;
+-			PSB_WVDC32(dpll_val, dpll_reg);
+-			PSB_RVDC32(dpll_reg);
+-
+-			/* wait for DSI PLL to lock */
+-			while ((timeout < 20000) && !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+-				udelay(150);
+-				timeout++;
+-			}
+-
+-			if (timeout == 20000) {
+-				DRM_ERROR("%s, can't lock DSIPLL.\n",
+-							__func__);
+-				return -EINVAL;
+-			}
+-		}
+-	}
+-	/* Restore mode */
+-	PSB_WVDC32(htot_val, htot_reg);
+-	PSB_WVDC32(hblank_val, hblank_reg);
+-	PSB_WVDC32(hsync_val, hsync_reg);
+-	PSB_WVDC32(vtot_val, vtot_reg);
+-	PSB_WVDC32(vblank_val, vblank_reg);
+-	PSB_WVDC32(vsync_val, vsync_reg);
+-	PSB_WVDC32(pipesrc_val, pipesrc_reg);
+-	PSB_WVDC32(dspstatus_val, dspstatus_reg);
+-
+-	/* Set up the plane */
+-	PSB_WVDC32(dspstride_val, dspstride_reg);
+-	PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
+-	PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
+-	PSB_WVDC32(dspsize_val, dspsize_reg);
+-	PSB_WVDC32(dsppos_val, dsppos_reg);
+-	PSB_WVDC32(dspsurf_val, dspsurf_reg);
+-
+-	if (pipe == 1) {
+-		PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
+-		PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+-		PSB_WVDC32(dev_priv->saveHDMIPHYMISCCTL, HDMIPHYMISCCTL);
+-		PSB_WVDC32(dev_priv->saveHDMIB_CONTROL, HDMIB_CONTROL);
+-
+-	} else {
+-		/* Set up pipe related registers */
+-		PSB_WVDC32(mipi_val, mipi_reg);
+-		/* Setup MIPI adapter + MIPI IP registers */
+-		mdfld_dsi_controller_init(dsi_config, pipe);
+-		msleep(20);
+-	}
+-	/* Enable the plane */
+-	PSB_WVDC32(dspcntr_val, dspcntr_reg);
+-	msleep(20);
+-	/* Enable the pipe */
+-	PSB_WVDC32(pipeconf_val, pipeconf_reg);
+-
+-	for (i = 0; i < 256; i++)
+-		PSB_WVDC32(palette_val[i], palette_reg + (i<<2));
+-	if (pipe == 1)
+-		return 0;
+-	if (!mdfld_panel_dpi(dev))
+-		mdfld_enable_te(dev, pipe);
+-	return 0;
+-}
+-
+-/**
+- * mdfld_restore_cursor_overlay_registers	-	restore cursor
+- * @dev: our device
+- *
+- * Restore the cursor and overlay state that was saved earlier
+- */
+-static int mdfld_restore_cursor_overlay_registers(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	/* Enable Cursor A */
+-	PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
+-	PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
+-	PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
+-
+-	PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
+-	PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
+-	PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
+-
+-	PSB_WVDC32(dev_priv->saveDSPCCURSOR_CTRL, CURCCNTR);
+-	PSB_WVDC32(dev_priv->saveDSPCCURSOR_POS, CURCPOS);
+-	PSB_WVDC32(dev_priv->saveDSPCCURSOR_BASE, CURCBASE);
+-
+-	/* Restore HW overlay */
+-	PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
+-
+-	PSB_WVDC32(dev_priv->saveOV_OVADD_C, OV_OVADD + OV_C_OFFSET);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC0_C, OV_OGAMC0 + OV_C_OFFSET);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC1_C, OV_OGAMC1 + OV_C_OFFSET);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC2_C, OV_OGAMC2 + OV_C_OFFSET);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC3_C, OV_OGAMC3 + OV_C_OFFSET);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC4_C, OV_OGAMC4 + OV_C_OFFSET);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC5_C, OV_OGAMC5 + OV_C_OFFSET);
+-
+-	return 0;
+-}
+-
+-/**
+- *	mdfld_save_display_registers	-	save registers lost on suspend
+- *	@dev: our DRM device
+- *
+- *	Save the state we need in order to be able to restore the interface
+- *	upon resume from suspend
+- */
+-static int mdfld_save_registers(struct drm_device *dev)
+-{
+-	/* FIXME: We need to shut down panels here if using them
+-	   and once the right bits are merged */
+-	mdfld_save_cursor_overlay_registers(dev);
+-	mdfld_save_display_registers(dev, 0);
+-	mdfld_save_display_registers(dev, 0);
+-	mdfld_save_display_registers(dev, 2);
+-	mdfld_save_display_registers(dev, 1);
+-	mdfld_disable_crtc(dev, 0);
+-	mdfld_disable_crtc(dev, 2);
+-	mdfld_disable_crtc(dev, 1);
+-	return 0;
+-}
+-
+-/**
+- *	mdfld_restore_display_registers	-	restore lost register state
+- *	@dev: our DRM device
+- *
+- *	Restore register state that was lost during suspend and resume.
+- */
+-static int mdfld_restore_registers(struct drm_device *dev)
+-{
+-	mdfld_restore_display_registers(dev, 1);
+-	mdfld_restore_display_registers(dev, 0);
+-	mdfld_restore_display_registers(dev, 2);
+-	mdfld_restore_cursor_overlay_registers(dev);
+-	return 0;
+-}
+-
+-static int mdfld_power_down(struct drm_device *dev)
+-{
+-	/* FIXME */
+-	return 0;
+-}
+-
+-static int mdfld_power_up(struct drm_device *dev)
+-{
+-	/* FIXME */
+-	return 0;
+-}
+-
+-const struct psb_ops mdfld_chip_ops = {
+-	.name = "Medfield",
+-	.accel_2d = 0,
+-	.pipes = 3,
+-	.crtcs = 2,
+-	.sgx_offset = MRST_SGX_OFFSET,
+-
+-	.chip_setup = mid_chip_setup,
+-
+-	.crtc_helper = &mdfld_helper_funcs,
+-	.crtc_funcs = &mdfld_intel_crtc_funcs,
+-
+-	.output_init = mdfld_output_init,
+-
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	.backlight_init = mdfld_backlight_init,
+-#endif
+-
+-	.init_pm = mdfld_init_pm,
+-	.save_regs = mdfld_save_registers,
+-	.restore_regs = mdfld_restore_registers,
+-	.power_down = mdfld_power_down,
+-	.power_up = mdfld_power_up,
+-};
+-
+diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c
+deleted file mode 100644
+index fd211f3..0000000
+--- a/drivers/staging/gma500/mdfld_dsi_dbi.c
++++ /dev/null
+@@ -1,761 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- *  jim liu <jim.liu at intel.com>
+- *  Jackie Li<yaodong.li at intel.com>
+- */
+-
+-#include "mdfld_dsi_dbi.h"
+-#include "mdfld_dsi_dbi_dpu.h"
+-#include "mdfld_dsi_pkg_sender.h"
+-
+-#include "power.h"
+-#include <linux/pm_runtime.h>
+-
+-int enable_gfx_rtpm;
+-
+-extern struct drm_device *gpDrmDevice;
+-extern int gfxrtdelay;
+-int enter_dsr;
+-struct mdfld_dsi_dbi_output *gdbi_output;
+-extern bool gbgfxsuspended;
+-extern int enable_gfx_rtpm;
+-extern int gfxrtdelay;
+-
+-#define MDFLD_DSR_MAX_IDLE_COUNT	2
+-
+-/*
+- * set refreshing area
+- */
+-int mdfld_dsi_dbi_update_area(struct mdfld_dsi_dbi_output *dbi_output,
+-				u16 x1, u16 y1, u16 x2, u16 y2)
+-{
+-	struct mdfld_dsi_pkg_sender *sender =
+-		mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+-	u8 param[4];
+-	u8 cmd;
+-	int err;
+-
+-	if (!sender) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-
+-	/* Set column */
+-	cmd = DCS_SET_COLUMN_ADDRESS;
+-	param[0] = x1 >> 8;
+-	param[1] = x1;
+-	param[2] = x2 >> 8;
+-	param[3] = x2;
+-
+-	err = mdfld_dsi_send_dcs(sender,
+-				 cmd,
+-				 param,
+-				 4,
+-				 CMD_DATA_SRC_SYSTEM_MEM,
+-				 MDFLD_DSI_QUEUE_PACKAGE);
+-	if (err) {
+-		dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
+-		goto err_out;
+-	}
+-
+-	/* Set page */
+-	cmd = DCS_SET_PAGE_ADDRESS;
+-	param[0] = y1 >> 8;
+-	param[1] = y1;
+-	param[2] = y2 >> 8;
+-	param[3] = y2;
+-
+-	err = mdfld_dsi_send_dcs(sender,
+-				 cmd,
+-				 param,
+-				 4,
+-				 CMD_DATA_SRC_SYSTEM_MEM,
+-				 MDFLD_DSI_QUEUE_PACKAGE);
+-	if (err) {
+-		dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
+-		goto err_out;
+-	}
+-
+-	/*update screen*/
+-	err = mdfld_dsi_send_dcs(sender,
+-				 write_mem_start,
+-				 NULL,
+-				 0,
+-				 CMD_DATA_SRC_PIPE,
+-				 MDFLD_DSI_QUEUE_PACKAGE);
+-	if (err) {
+-		dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
+-		goto err_out;
+-	}
+-	mdfld_dsi_cmds_kick_out(sender);
+-err_out:
+-	return err;
+-}
+-
+-/*
+- * set panel's power state
+- */
+-int mdfld_dsi_dbi_update_power(struct mdfld_dsi_dbi_output *dbi_output,
+-								int mode)
+-{
+-	struct drm_device *dev = dbi_output->dev;
+-	struct mdfld_dsi_pkg_sender *sender =
+-		mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+-	u8 param = 0;
+-	u32 err = 0;
+-
+-	if (!sender) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-
+-	if (mode == DRM_MODE_DPMS_ON) {
+-		/* Exit sleep mode */
+-		err = mdfld_dsi_send_dcs(sender,
+-					 DCS_EXIT_SLEEP_MODE,
+-					 NULL,
+-					 0,
+-					 CMD_DATA_SRC_SYSTEM_MEM,
+-					 MDFLD_DSI_QUEUE_PACKAGE);
+-		if (err) {
+-			dev_err(dev->dev, "DCS 0x%x sent failed\n",
+-						DCS_EXIT_SLEEP_MODE);
+-			goto power_err;
+-		}
+-
+-		/* Set display on */
+-		err = mdfld_dsi_send_dcs(sender,
+-					 DCS_SET_DISPLAY_ON,
+-					 NULL,
+-					 0,
+-					 CMD_DATA_SRC_SYSTEM_MEM,
+-					 MDFLD_DSI_QUEUE_PACKAGE);
+-		if (err) {
+-			dev_err(dev->dev, "DCS 0x%x sent failed\n",
+-							DCS_SET_DISPLAY_ON);
+-			goto power_err;
+-		}
+-
+-		/* set tear effect on */
+-		err = mdfld_dsi_send_dcs(sender,
+-					 DCS_SET_TEAR_ON,
+-					 &param,
+-					 1,
+-					 CMD_DATA_SRC_SYSTEM_MEM,
+-					 MDFLD_DSI_QUEUE_PACKAGE);
+-		if (err) {
+-			dev_err(dev->dev, "DCS 0x%x sent failed\n",
+-							set_tear_on);
+-			goto power_err;
+-		}
+-
+-		/**
+-		 * FIXME: remove this later
+-		 */
+-		err = mdfld_dsi_send_dcs(sender,
+-					 DCS_WRITE_MEM_START,
+-					 NULL,
+-					 0,
+-					 CMD_DATA_SRC_PIPE,
+-					 MDFLD_DSI_QUEUE_PACKAGE);
+-		if (err) {
+-			dev_err(dev->dev, "DCS 0x%x sent failed\n",
+-						DCS_WRITE_MEM_START);
+-			goto power_err;
+-		}
+-	} else {
+-		/* Set tear effect off */
+-		err = mdfld_dsi_send_dcs(sender,
+-					 DCS_SET_TEAR_OFF,
+-					 NULL,
+-					 0,
+-					 CMD_DATA_SRC_SYSTEM_MEM,
+-					 MDFLD_DSI_QUEUE_PACKAGE);
+-		if (err) {
+-			dev_err(dev->dev, "DCS 0x%x sent failed\n",
+-							DCS_SET_TEAR_OFF);
+-			goto power_err;
+-		}
+-
+-		/* Turn display off */
+-		err = mdfld_dsi_send_dcs(sender,
+-					 DCS_SET_DISPLAY_OFF,
+-					 NULL,
+-					 0,
+-					 CMD_DATA_SRC_SYSTEM_MEM,
+-					 MDFLD_DSI_QUEUE_PACKAGE);
+-		if (err) {
+-			dev_err(dev->dev, "DCS 0x%x sent failed\n",
+-						DCS_SET_DISPLAY_OFF);
+-			goto power_err;
+-		}
+-
+-		/* Now enter sleep mode */
+-		err = mdfld_dsi_send_dcs(sender,
+-					 DCS_ENTER_SLEEP_MODE,
+-					 NULL,
+-					 0,
+-					 CMD_DATA_SRC_SYSTEM_MEM,
+-					 MDFLD_DSI_QUEUE_PACKAGE);
+-		if (err) {
+-			dev_err(dev->dev, "DCS 0x%x sent failed\n",
+-							DCS_ENTER_SLEEP_MODE);
+-			goto power_err;
+-		}
+-	}
+-	mdfld_dsi_cmds_kick_out(sender);
+-power_err:
+-	return err;
+-}
+-
+-/*
+- * send a generic DCS command with a parameter list
+- */
+-int mdfld_dsi_dbi_send_dcs(struct mdfld_dsi_dbi_output *dbi_output,
+-			u8 dcs,  u8 *param, u32 num, u8 data_src)
+-{
+-	struct mdfld_dsi_pkg_sender *sender =
+-		mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+-	int ret;
+-
+-	if (!sender) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-
+-	ret = mdfld_dsi_send_dcs(sender,
+-				 dcs,
+-				 param,
+-				 num,
+-				 data_src,
+-				 MDFLD_DSI_SEND_PACKAGE);
+-
+-	return ret;
+-}
+-
+-/*
+- * Enter DSR
+- */
+-void mdfld_dsi_dbi_enter_dsr(struct mdfld_dsi_dbi_output *dbi_output, int pipe)
+-{
+-	u32 reg_val;
+-	struct drm_device *dev = dbi_output->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct drm_crtc *crtc = dbi_output->base.base.crtc;
+-	struct psb_intel_crtc *psb_crtc = (crtc) ?
+-					to_psb_intel_crtc(crtc) : NULL;
+-	u32 dpll_reg = MRST_DPLL_A;
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 dspcntr_reg = DSPACNTR;
+-
+-	if (!dbi_output)
+-		return;
+-
+-	/* FIXME check if can go */
+-	dev_priv->is_in_idle = true;
+-
+-	gdbi_output = dbi_output;
+-	if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+-		(psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
+-		return;
+-
+-	if (pipe == 2) {
+-		dpll_reg = MRST_DPLL_A;
+-		pipeconf_reg = PIPECCONF;
+-		dspcntr_reg = DSPCCNTR;
+-	}
+-
+-	if (!gma_power_begin(dev, true)) {
+-		dev_err(dev->dev, "hw begin failed\n");
+-		return;
+-	}
+-	/* Disable te interrupts */
+-	mdfld_disable_te(dev, pipe);
+-
+-	/* Disable plane */
+-	reg_val = REG_READ(dspcntr_reg);
+-	if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
+-		REG_WRITE(dspcntr_reg, reg_val & ~DISPLAY_PLANE_ENABLE);
+-		REG_READ(dspcntr_reg);
+-	}
+-
+-	/* Disable pipe */
+-	reg_val = REG_READ(pipeconf_reg);
+-	if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
+-		reg_val &= ~DISPLAY_PLANE_ENABLE;
+-		reg_val |= (PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF);
+-		REG_WRITE(pipeconf_reg, reg_val);
+-		REG_READ(pipeconf_reg);
+-		mdfldWaitForPipeDisable(dev, pipe);
+-	}
+-
+-	/* Disable DPLL */
+-	reg_val = REG_READ(dpll_reg);
+-	if (!(reg_val & DPLL_VCO_ENABLE)) {
+-		reg_val &= ~DPLL_VCO_ENABLE;
+-		REG_WRITE(dpll_reg, reg_val);
+-		REG_READ(dpll_reg);
+-		udelay(500);
+-	}
+-
+-	gma_power_end(dev);
+-	dbi_output->mode_flags |= MODE_SETTING_IN_DSR;
+-	if (pipe == 2) {
+-		enter_dsr = 1;
+-		/* pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay); */
+-	}
+-}
+-
+-static void mdfld_dbi_output_exit_dsr(struct mdfld_dsi_dbi_output *dbi_output,
+-			int pipe)
+-{
+-	struct drm_device *dev = dbi_output->dev;
+-	struct drm_crtc *crtc = dbi_output->base.base.crtc;
+-	struct psb_intel_crtc *psb_crtc = (crtc) ?
+-					to_psb_intel_crtc(crtc) : NULL;
+-	u32 reg_val;
+-	u32 dpll_reg = MRST_DPLL_A;
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 dspcntr_reg = DSPACNTR;
+-	u32 reg_offset = 0;
+-
+-	/*if mode setting on-going, back off*/
+-	if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+-		(psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
+-		return;
+-
+-	if (pipe == 2) {
+-		dpll_reg = MRST_DPLL_A;
+-		pipeconf_reg = PIPECCONF;
+-		dspcntr_reg = DSPCCNTR;
+-		reg_offset = MIPIC_REG_OFFSET;
+-	}
+-
+-	if (!gma_power_begin(dev, true)) {
+-		dev_err(dev->dev, "hw begin failed\n");
+-		return;
+-	}
+-
+-	/* Enable DPLL */
+-	reg_val = REG_READ(dpll_reg);
+-	if (!(reg_val & DPLL_VCO_ENABLE)) {
+-		if (reg_val & MDFLD_PWR_GATE_EN) {
+-			reg_val &= ~MDFLD_PWR_GATE_EN;
+-			REG_WRITE(dpll_reg, reg_val);
+-			REG_READ(dpll_reg);
+-			udelay(500);
+-		}
+-
+-		reg_val |= DPLL_VCO_ENABLE;
+-		REG_WRITE(dpll_reg, reg_val);
+-		REG_READ(dpll_reg);
+-		udelay(500);
+-
+-		/* Add timeout */
+-		while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK))
+-			cpu_relax();
+-	}
+-
+-	/* Enable pipe */
+-	reg_val = REG_READ(pipeconf_reg);
+-	if (!(reg_val & PIPEACONF_ENABLE)) {
+-		reg_val |= PIPEACONF_ENABLE;
+-		REG_WRITE(pipeconf_reg, reg_val);
+-		REG_READ(pipeconf_reg);
+-		udelay(500);
+-		mdfldWaitForPipeEnable(dev, pipe);
+-	}
+-
+-	/* Enable plane */
+-	reg_val = REG_READ(dspcntr_reg);
+-	if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
+-		reg_val |= DISPLAY_PLANE_ENABLE;
+-		REG_WRITE(dspcntr_reg, reg_val);
+-		REG_READ(dspcntr_reg);
+-		udelay(500);
+-	}
+-
+-	/* Enable TE interrupt on this pipe */
+-	mdfld_enable_te(dev, pipe);
+-	gma_power_end(dev);
+-
+-	/*clean IN_DSR flag*/
+-	dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
+-}
+-
+-/*
+- * Exit from DSR
+- */
+-void mdfld_dsi_dbi_exit_dsr(struct drm_device *dev, u32 update_src)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+-	struct mdfld_dsi_dbi_output **dbi_output;
+-	int i;
+-	int pipe;
+-
+-	/* FIXME can go ? */
+-	dev_priv->is_in_idle = false;
+-	dbi_output = dsr_info->dbi_outputs;
+-
+-#ifdef CONFIG_PM_RUNTIME
+-	 if (!enable_gfx_rtpm) {
+-/*                pm_runtime_allow(&gpDrmDevice->pdev->dev); */
+-/*		schedule_delayed_work(&rtpm_work, 30 * 1000);*/ /* FIXME: HZ ? */
+-	}
+-#endif
+-
+-	/* For each output, exit dsr */
+-	for (i = 0; i < dsr_info->dbi_output_num; i++) {
+-		/* If panel has been turned off, skip */
+-		if (!dbi_output[i] || !dbi_output[i]->dbi_panel_on)
+-			continue;
+-		pipe = dbi_output[i]->channel_num ? 2 : 0;
+-		enter_dsr = 0;
+-		mdfld_dbi_output_exit_dsr(dbi_output[i], pipe);
+-	}
+-	dev_priv->dsr_fb_update |= update_src;
+-}
+-
+-static bool mdfld_dbi_is_in_dsr(struct drm_device *dev)
+-{
+-	if (REG_READ(MRST_DPLL_A) & DPLL_VCO_ENABLE)
+-		return false;
+-	if ((REG_READ(PIPEACONF) & PIPEACONF_ENABLE) ||
+-	   (REG_READ(PIPECCONF) & PIPEACONF_ENABLE))
+-		return false;
+-	if ((REG_READ(DSPACNTR) & DISPLAY_PLANE_ENABLE) ||
+-	   (REG_READ(DSPCCNTR) & DISPLAY_PLANE_ENABLE))
+-		return false;
+-
+-	return true;
+-}
+-
+-/* Periodically update dbi panel */
+-void mdfld_dbi_update_panel(struct drm_device *dev, int pipe)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+-	struct mdfld_dsi_dbi_output **dbi_outputs;
+-	struct mdfld_dsi_dbi_output *dbi_output;
+-	int i;
+-	int can_enter_dsr = 0;
+-	u32 damage_mask;
+-
+-	dbi_outputs = dsr_info->dbi_outputs;
+-	dbi_output = pipe ? dbi_outputs[1] : dbi_outputs[0];
+-
+-	if (!dbi_output)
+-		return;
+-
+-	if (pipe == 0)
+-		damage_mask = dev_priv->dsr_fb_update & MDFLD_DSR_DAMAGE_MASK_0;
+-	else if (pipe == 2)
+-		damage_mask = dev_priv->dsr_fb_update & MDFLD_DSR_DAMAGE_MASK_2;
+-	else
+-		return;
+-
+-	/* If FB is damaged and panel is on update on-panel FB */
+-	if (damage_mask && dbi_output->dbi_panel_on) {
+-		dbi_output->dsr_fb_update_done = false;
+-
+-		if (dbi_output->p_funcs->update_fb)
+-			dbi_output->p_funcs->update_fb(dbi_output, pipe);
+-
+-		if (dev_priv->dsr_enable && dbi_output->dsr_fb_update_done)
+-			dev_priv->dsr_fb_update &= ~damage_mask;
+-
+-		/*clean IN_DSR flag*/
+-		dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
+-
+-		dbi_output->dsr_idle_count = 0;
+-	} else {
+-		dbi_output->dsr_idle_count++;
+-	}
+-
+-	switch (dsr_info->dbi_output_num) {
+-	case 1:
+-		if (dbi_output->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT)
+-			can_enter_dsr = 1;
+-		break;
+-	case 2:
+-		if (dbi_outputs[0]->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT
+-		   && dbi_outputs[1]->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT)
+-			can_enter_dsr = 1;
+-		break;
+-	default:
+-		DRM_ERROR("Wrong DBI output number\n");
+-	}
+-
+-	/* Try to enter DSR */
+-	if (can_enter_dsr) {
+-		for (i = 0; i < dsr_info->dbi_output_num; i++) {
+-			if (!mdfld_dbi_is_in_dsr(dev) && dbi_outputs[i] &&
+-			   !(dbi_outputs[i]->mode_flags & MODE_SETTING_ON_GOING)) {
+-				mdfld_dsi_dbi_enter_dsr(dbi_outputs[i],
+-					dbi_outputs[i]->channel_num ? 2 : 0);
+-#if 0
+-				enter_dsr = 1;
+-				pr_err("%s: enter_dsr = 1\n", __func__);
+-#endif
+-			}
+-		}
+-	/*schedule rpm suspend after gfxrtdelay*/
+-#ifdef CONFIG_GFX_RTPM
+-		if (!dev_priv->rpm_enabled
+-			|| !enter_dsr
+-	/*		|| (REG_READ(HDMIB_CONTROL) & HDMIB_PORT_EN) */
+-			|| pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay))
+-			dev_warn(dev->dev,
+-				"Runtime PM schedule suspend failed, rpm %d\n",
+-					dev_priv->rpm_enabled);
+-#endif
+-	}
+-}
+-
+-int mdfld_dbi_dsr_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+-
+-	if (!dsr_info || IS_ERR(dsr_info)) {
+-		dsr_info = kzalloc(sizeof(struct mdfld_dbi_dsr_info),
+-								GFP_KERNEL);
+-		if (!dsr_info) {
+-			dev_err(dev->dev, "No memory\n");
+-			return -ENOMEM;
+-		}
+-		dev_priv->dbi_dsr_info = dsr_info;
+-	}
+-	return 0;
+-}
+-
+-void mdfld_dbi_dsr_exit(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
+-
+-	if (dsr_info) {
+-		kfree(dsr_info);
+-		dev_priv->dbi_dsr_info = NULL;
+-	}
+-}
+-
+-void mdfld_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
+-								int pipe)
+-{
+-	struct drm_device *dev = dsi_config->dev;
+-	u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+-	int lane_count = dsi_config->lane_count;
+-	u32 val = 0;
+-
+-	dev_dbg(dev->dev, "Init DBI interface on pipe %d...\n", pipe);
+-
+-	/* Un-ready device */
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+-
+-	/* Init dsi adapter before kicking off */
+-	REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+-
+-	/* TODO: figure out how to setup these registers */
+-	REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
+-	REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset),
+-							0x000a0014);
+-	REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
+-	REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000001);
+-	REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
+-
+-	/* Enable all interrupts */
+-	REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+-	/* Max value: 20 clock cycles of txclkesc */
+-	REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
+-	/* Min 21 txclkesc, max: ffffh */
+-	REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
+-	/* Min: 7d0 max: 4e20 */
+-	REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
+-
+-	/* Set up func_prg */
+-	val |= lane_count;
+-	val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
+-	val |= DSI_DBI_COLOR_FORMAT_OPTION2;
+-	REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+-
+-	REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
+-	REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
+-
+-	/* De-assert dbi_stall when half of DBI FIFO is empty */
+-	/* REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000000); */
+-
+-	REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+-	REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
+-	REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+-}
+-
+-#if 0
+-/*DBI encoder helper funcs*/
+-static const struct drm_encoder_helper_funcs mdfld_dsi_dbi_helper_funcs = {
+-	.dpms = mdfld_dsi_dbi_dpms,
+-	.mode_fixup = mdfld_dsi_dbi_mode_fixup,
+-	.prepare = mdfld_dsi_dbi_prepare,
+-	.mode_set = mdfld_dsi_dbi_mode_set,
+-	.commit = mdfld_dsi_dbi_commit,
+-};
+-
+-/*DBI encoder funcs*/
+-static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
+-	.destroy = drm_encoder_cleanup,
+-};
+-
+-#endif
+-
+-/*
+- * Init DSI DBI encoder.
+- * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
+- * return pointer of newly allocated DBI encoder, NULL on error
+- */
+-struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
+-				struct mdfld_dsi_connector *dsi_connector,
+-				struct panel_funcs *p_funcs)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dsi_dbi_output *dbi_output = NULL;
+-	struct mdfld_dsi_config *dsi_config;
+-	struct drm_connector *connector = NULL;
+-	struct drm_encoder *encoder = NULL;
+-	struct drm_display_mode *fixed_mode = NULL;
+-	struct psb_gtt *pg = dev_priv ? (&dev_priv->gtt) : NULL;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv ? (dev_priv->dbi_dpu_info) : NULL;
+-	struct mdfld_dbi_dsr_info *dsr_info = dev_priv ? (dev_priv->dbi_dsr_info) : NULL;
+-	u32 data = 0;
+-	int pipe;
+-	int ret;
+-
+-	if (!pg || !dsi_connector || !p_funcs) {
+-		WARN_ON(1);
+-		return NULL;
+-	}
+-
+-	dsi_config = mdfld_dsi_get_config(dsi_connector);
+-	pipe = dsi_connector->pipe;
+-
+-	/*panel hard-reset*/
+-	if (p_funcs->reset) {
+-		ret = p_funcs->reset(pipe);
+-		if (ret) {
+-			DRM_ERROR("Panel %d hard-reset failed\n", pipe);
+-			return NULL;
+-		}
+-	}
+-	/* Panel drvIC init */
+-	if (p_funcs->drv_ic_init)
+-		p_funcs->drv_ic_init(dsi_config, pipe);
+-
+-	/* Panel power mode detect */
+-	ret = mdfld_dsi_get_power_mode(dsi_config,
+-				       &data,
+-				       MDFLD_DSI_HS_TRANSMISSION);
+-	if (ret) {
+-		DRM_ERROR("Panel %d get power mode failed\n", pipe);
+-		dsi_connector->status = connector_status_disconnected;
+-	} else {
+-		DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
+-		dsi_connector->status = connector_status_connected;
+-	}
+-
+-	/*TODO: get panel info from DDB*/
+-
+-	dbi_output = kzalloc(sizeof(struct mdfld_dsi_dbi_output), GFP_KERNEL);
+-	if (!dbi_output) {
+-		dev_err(dev->dev, "No memory\n");
+-		return NULL;
+-	}
+-
+-	if (dsi_connector->pipe == 0) {
+-		dbi_output->channel_num = 0;
+-		dev_priv->dbi_output = dbi_output;
+-	} else if (dsi_connector->pipe == 2) {
+-		dbi_output->channel_num = 1;
+-		dev_priv->dbi_output2 = dbi_output;
+-	} else {
+-		dev_err(dev->dev, "only support 2 DSI outputs\n");
+-		goto out_err1;
+-	}
+-
+-	dbi_output->dev = dev;
+-	dbi_output->p_funcs = p_funcs;
+-	fixed_mode = dsi_config->fixed_mode;
+-	dbi_output->panel_fixed_mode = fixed_mode;
+-
+-	/* Create drm encoder object */
+-	connector = &dsi_connector->base.base;
+-	encoder = &dbi_output->base.base;
+-	/* Review this if we ever get MIPI-HDMI bridges or similar */
+-	drm_encoder_init(dev,
+-			encoder,
+-			p_funcs->encoder_funcs,
+-			DRM_MODE_ENCODER_LVDS);
+-	drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs);
+-
+-	/* Attach to given connector */
+-	drm_mode_connector_attach_encoder(connector, encoder);
+-
+-	/* Set possible CRTCs and clones */
+-	if (dsi_connector->pipe) {
+-		encoder->possible_crtcs = (1 << 2);
+-		encoder->possible_clones = (1 << 1);
+-	} else {
+-		encoder->possible_crtcs = (1 << 0);
+-		encoder->possible_clones = (1 << 0);
+-	}
+-
+-	dev_priv->dsr_fb_update = 0;
+-	dev_priv->dsr_enable = false;
+-	dev_priv->exit_idle = mdfld_dsi_dbi_exit_dsr;
+-
+-	dbi_output->first_boot = true;
+-	dbi_output->mode_flags = MODE_SETTING_IN_ENCODER;
+-
+-	/* Add this output to dpu_info if in DPU mode */
+-	if (dpu_info && dsi_connector->status == connector_status_connected) {
+-		if (dsi_connector->pipe == 0)
+-			dpu_info->dbi_outputs[0] = dbi_output;
+-		else
+-			dpu_info->dbi_outputs[1] = dbi_output;
+-
+-		dpu_info->dbi_output_num++;
+-	} else if (dsi_connector->status == connector_status_connected) {
+-		/* Add this output to dsr_info if not */
+-		if (dsi_connector->pipe == 0)
+-			dsr_info->dbi_outputs[0] = dbi_output;
+-		else
+-			dsr_info->dbi_outputs[1] = dbi_output;
+-
+-		dsr_info->dbi_output_num++;
+-	}
+-	return &dbi_output->base;
+-out_err1:
+-	kfree(dbi_output);
+-	return NULL;
+-}
+diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h
+deleted file mode 100644
+index f0fa986..0000000
+--- a/drivers/staging/gma500/mdfld_dsi_dbi.h
++++ /dev/null
+@@ -1,173 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * jim liu <jim.liu at intel.com>
+- * Jackie Li<yaodong.li at intel.com>
+- */
+-
+-#ifndef __MDFLD_DSI_DBI_H__
+-#define __MDFLD_DSI_DBI_H__
+-
+-#include <linux/backlight.h>
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include <drm/drm_crtc.h>
+-#include <drm/drm_edid.h>
+-
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "power.h"
+-
+-#include "mdfld_dsi_output.h"
+-#include "mdfld_output.h"
+-
+-/*
+- * DBI encoder which inherits from mdfld_dsi_encoder
+- */
+-struct mdfld_dsi_dbi_output {
+-	struct mdfld_dsi_encoder base;
+-	struct drm_display_mode *panel_fixed_mode;
+-	u8 last_cmd;
+-	u8 lane_count;
+-	u8 channel_num;
+-	struct drm_device *dev;
+-
+-	/* Backlight operations */
+-
+-	/* DSR timer */
+-	u32 dsr_idle_count;
+-	bool dsr_fb_update_done;
+-
+-	/* Mode setting flags */
+-	u32 mode_flags;
+-
+-	/* Panel status */
+-	bool dbi_panel_on;
+-	bool first_boot;
+-	struct panel_funcs *p_funcs;
+-
+-	/* DPU */
+-	u32 *dbi_cb_addr;
+-	u32 dbi_cb_phy;
+-	spinlock_t cb_lock;
+-	u32 cb_write;
+-};
+-
+-#define MDFLD_DSI_DBI_OUTPUT(dsi_encoder) \
+-	container_of(dsi_encoder, struct mdfld_dsi_dbi_output, base)
+-
+-struct mdfld_dbi_dsr_info {
+-	int dbi_output_num;
+-	struct mdfld_dsi_dbi_output *dbi_outputs[2];
+-
+-	u32 dsr_idle_count;
+-};
+-
+-#define DBI_CB_TIMEOUT_COUNT	0xffff
+-
+-/* Offsets */
+-#define CMD_MEM_ADDR_OFFSET	0
+-
+-#define CMD_DATA_SRC_SYSTEM_MEM	0
+-#define CMD_DATA_SRC_PIPE	1
+-
+-static inline int mdfld_dsi_dbi_fifo_ready(struct mdfld_dsi_dbi_output *dbi_output)
+-{
+-	struct drm_device *dev = dbi_output->dev;
+-	u32 retry = DBI_CB_TIMEOUT_COUNT;
+-	int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
+-	int ret = 0;
+-
+-	/* Query the dbi fifo status*/
+-	while (retry--) {
+-		if (REG_READ(MIPIA_GEN_FIFO_STAT_REG + reg_offset) & (1 << 27))
+-			break;
+-	}
+-
+-	if (!retry) {
+-		DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
+-		ret = -EAGAIN;
+-	}
+-	return ret;
+-}
+-
+-static inline int mdfld_dsi_dbi_cmd_sent(struct mdfld_dsi_dbi_output *dbi_output)
+-{
+-	struct drm_device *dev = dbi_output->dev;
+-	u32 retry = DBI_CB_TIMEOUT_COUNT;
+-	int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
+-	int ret = 0;
+-
+-	/* Query the command execution status */
+-	while (retry--)
+-		if (!(REG_READ(MIPIA_CMD_ADD_REG + reg_offset) & (1 << 0)))
+-			break;
+-
+-	if (!retry) {
+-		DRM_ERROR("Timeout waiting for DBI command status\n");
+-		ret = -EAGAIN;
+-	}
+-
+-	return ret;
+-}
+-
+-static inline int mdfld_dsi_dbi_cb_ready(struct mdfld_dsi_dbi_output *dbi_output)
+-{
+-	int ret = 0;
+-
+-	/* Query the command execution status*/
+-	ret = mdfld_dsi_dbi_cmd_sent(dbi_output);
+-	if (ret) {
+-		DRM_ERROR("Peripheral is busy\n");
+-		ret = -EAGAIN;
+-	}
+-	/* Query the dbi fifo status*/
+-	ret = mdfld_dsi_dbi_fifo_ready(dbi_output);
+-	if (ret) {
+-		DRM_ERROR("DBI FIFO is not empty\n");
+-		ret = -EAGAIN;
+-	}
+-	return ret;
+-}
+-
+-extern void mdfld_dsi_dbi_output_init(struct drm_device *dev,
+-			struct psb_intel_mode_device *mode_dev, int pipe);
+-extern void mdfld_dsi_dbi_exit_dsr(struct drm_device *dev, u32 update_src);
+-extern void mdfld_dsi_dbi_enter_dsr(struct mdfld_dsi_dbi_output *dbi_output,
+-			int pipe);
+-extern int mdfld_dbi_dsr_init(struct drm_device *dev);
+-extern void mdfld_dbi_dsr_exit(struct drm_device *dev);
+-extern struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
+-			struct mdfld_dsi_connector *dsi_connector,
+-			struct panel_funcs *p_funcs);
+-extern int mdfld_dsi_dbi_send_dcs(struct mdfld_dsi_dbi_output *dbi_output,
+-			u8 dcs, u8 *param, u32 num, u8 data_src);
+-extern int mdfld_dsi_dbi_update_area(struct mdfld_dsi_dbi_output *dbi_output,
+-			u16 x1, u16 y1, u16 x2, u16 y2);
+-extern int mdfld_dsi_dbi_update_power(struct mdfld_dsi_dbi_output *dbi_output,
+-			int mode);
+-extern void mdfld_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
+-			int pipe);
+-
+-#endif /*__MDFLD_DSI_DBI_H__*/
+diff --git a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c b/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c
+deleted file mode 100644
+index a4e2ff4..0000000
+--- a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c
++++ /dev/null
+@@ -1,778 +0,0 @@
+-/*
+- * Copyright © 2010-2011 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Jim Liu <jim.liu at intel.com>
+- * Jackie Li<yaodong.li at intel.com>
+- */
+-
+-#include "mdfld_dsi_dbi_dpu.h"
+-#include "mdfld_dsi_dbi.h"
+-
+-/*
+- * NOTE: all mdlfd_x_damage funcs should be called by holding dpu_update_lock
+- */
+-
+-static int mdfld_cursor_damage(struct mdfld_dbi_dpu_info *dpu_info,
+-			   mdfld_plane_t plane,
+-			   struct psb_drm_dpu_rect *damaged_rect)
+-{
+-	int x, y;
+-	int new_x, new_y;
+-	struct psb_drm_dpu_rect *rect;
+-	struct psb_drm_dpu_rect *pipe_rect;
+-	int cursor_size;
+-	struct mdfld_cursor_info *cursor;
+-	mdfld_plane_t fb_plane;
+-
+-	if (plane == MDFLD_CURSORA) {
+-		cursor = &dpu_info->cursors[0];
+-		x = dpu_info->cursors[0].x;
+-		y = dpu_info->cursors[0].y;
+-		cursor_size = dpu_info->cursors[0].size;
+-		pipe_rect = &dpu_info->damage_pipea;
+-		fb_plane = MDFLD_PLANEA;
+-	} else {
+-		cursor = &dpu_info->cursors[1];
+-		x = dpu_info->cursors[1].x;
+-		y = dpu_info->cursors[1].y;
+-		cursor_size = dpu_info->cursors[1].size;
+-		pipe_rect = &dpu_info->damage_pipec;
+-		fb_plane = MDFLD_PLANEC;
+-	}
+-	new_x = damaged_rect->x;
+-	new_y = damaged_rect->y;
+-
+-	if (x == new_x && y == new_y)
+-		return 0;
+-
+-	rect = &dpu_info->damaged_rects[plane];
+-	/* Move to right */
+-	if (new_x >= x) {
+-		if (new_y > y) {
+-			rect->x = x;
+-			rect->y = y;
+-			rect->width = (new_x + cursor_size) - x;
+-			rect->height = (new_y + cursor_size) - y;
+-			goto cursor_out;
+-		} else {
+-			rect->x = x;
+-			rect->y = new_y;
+-			rect->width = (new_x + cursor_size) - x;
+-			rect->height = (y - new_y);
+-			goto cursor_out;
+-		}
+-	} else {
+-		if (new_y > y) {
+-			rect->x = new_x;
+-			rect->y = y;
+-			rect->width = (x + cursor_size) - new_x;
+-			rect->height = new_y - y;
+-			goto cursor_out;
+-		} else {
+-			rect->x = new_x;
+-			rect->y = new_y;
+-			rect->width = (x + cursor_size) - new_x;
+-			rect->height = (y + cursor_size) - new_y;
+-		}
+-	}
+-cursor_out:
+-	if (new_x < 0)
+-		cursor->x = 0;
+-	else if (new_x > 864)
+-		cursor->x = 864;
+-	else
+-		cursor->x = new_x;
+-
+-	if (new_y < 0)
+-		cursor->y = 0;
+-	else if (new_y > 480)
+-		cursor->y = 480;
+-	else
+-		cursor->y = new_y;
+-
+-	/*
+-	 * FIXME: this is a workaround for cursor plane update,
+-	 * remove it later!
+-	 */
+-	rect->x = 0;
+-	rect->y = 0;
+-	rect->width = 864;
+-	rect->height = 480;
+-
+-	mdfld_check_boundary(dpu_info, rect);
+-	mdfld_dpu_region_extent(pipe_rect, rect);
+-
+-	/* Update pending status of dpu_info */
+-	dpu_info->pending |= (1 << plane);
+-	/* Update fb panel as well */
+-	dpu_info->pending |= (1 << fb_plane);
+-	return 0;
+-}
+-
+-static int mdfld_fb_damage(struct mdfld_dbi_dpu_info *dpu_info,
+-				   mdfld_plane_t plane,
+-				   struct psb_drm_dpu_rect *damaged_rect)
+-{
+-	struct psb_drm_dpu_rect *rect;
+-
+-	if (plane == MDFLD_PLANEA)
+-		rect = &dpu_info->damage_pipea;
+-	else
+-		rect = &dpu_info->damage_pipec;
+-
+-	mdfld_check_boundary(dpu_info, damaged_rect);
+-
+-	/* Add fb damage area to this pipe */
+-	mdfld_dpu_region_extent(rect, damaged_rect);
+-
+-	/* Update pending status of dpu_info */
+-	dpu_info->pending |= (1 << plane);
+-	return 0;
+-}
+-
+-/* Do nothing here, right now */
+-static int mdfld_overlay_damage(struct mdfld_dbi_dpu_info *dpu_info,
+-				mdfld_plane_t plane,
+-				struct psb_drm_dpu_rect *damaged_rect)
+-{
+-	return 0;
+-}
+-
+-int mdfld_dbi_dpu_report_damage(struct drm_device *dev,
+-				mdfld_plane_t plane,
+-				struct psb_drm_dpu_rect *rect)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+-	int ret = 0;
+-
+-	/* DPU not in use, no damage reporting needed */
+-	if (dpu_info == NULL)
+-		return 0;
+-
+-	spin_lock(&dpu_info->dpu_update_lock);
+-
+-	switch (plane) {
+-	case MDFLD_PLANEA:
+-	case MDFLD_PLANEC:
+-		mdfld_fb_damage(dpu_info, plane, rect);
+-		break;
+-	case MDFLD_CURSORA:
+-	case MDFLD_CURSORC:
+-		mdfld_cursor_damage(dpu_info, plane, rect);
+-		break;
+-	case MDFLD_OVERLAYA:
+-	case MDFLD_OVERLAYC:
+-		mdfld_overlay_damage(dpu_info, plane, rect);
+-		break;
+-	default:
+-		DRM_ERROR("Invalid plane type %d\n", plane);
+-		ret = -EINVAL;
+-	}
+-	spin_unlock(&dpu_info->dpu_update_lock);
+-	return ret;
+-}
+-
+-int mdfld_dbi_dpu_report_fullscreen_damage(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv;
+-	struct mdfld_dbi_dpu_info *dpu_info;
+-	struct mdfld_dsi_config  *dsi_config;
+-	struct psb_drm_dpu_rect rect;
+-	int i;
+-
+-	if (!dev) {
+-		DRM_ERROR("Invalid parameter\n");
+-		return -EINVAL;
+-	}
+-
+-	dev_priv = dev->dev_private;
+-	dpu_info = dev_priv->dbi_dpu_info;
+-
+-	/* This is fine - we may be in non DPU mode */
+-	if (!dpu_info)
+-		return -EINVAL;
+-
+-	for (i = 0; i < dpu_info->dbi_output_num; i++) {
+-		dsi_config = dev_priv->dsi_configs[i];
+-		if (dsi_config) {
+-			rect.x = rect.y = 0;
+-			rect.width = dsi_config->fixed_mode->hdisplay;
+-			rect.height = dsi_config->fixed_mode->vdisplay;
+-			mdfld_dbi_dpu_report_damage(dev,
+-				    i ? (MDFLD_PLANEC) : (MDFLD_PLANEA),
+-				    &rect);
+-		}
+-	}
+-	/* Exit DSR state */
+-	mdfld_dpu_exit_dsr(dev);
+-	return 0;
+-}
+-
+-int mdfld_dsi_dbi_dsr_off(struct drm_device *dev,
+-					struct psb_drm_dpu_rect *rect)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+-
+-	mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, rect);
+-
+-	/* If dual display mode */
+-	if (dpu_info->dbi_output_num == 2)
+-		mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, rect);
+-
+-	/* Force dsi to exit DSR mode */
+-	mdfld_dpu_exit_dsr(dev);
+-	return 0;
+-}
+-
+-static void mdfld_dpu_cursor_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
+-						 mdfld_plane_t plane)
+-{
+-	struct drm_device *dev = dpu_info->dev;
+-	u32 curpos_reg = CURAPOS;
+-	u32 curbase_reg = CURABASE;
+-	u32 curcntr_reg = CURACNTR;
+-	struct mdfld_cursor_info *cursor = &dpu_info->cursors[0];
+-
+-	if (plane == MDFLD_CURSORC) {
+-		curpos_reg = CURCPOS;
+-		curbase_reg = CURCBASE;
+-		curcntr_reg = CURCCNTR;
+-		cursor = &dpu_info->cursors[1];
+-	}
+-
+-	REG_WRITE(curcntr_reg, REG_READ(curcntr_reg));
+-	REG_WRITE(curpos_reg,
+-		(((cursor->x & CURSOR_POS_MASK) << CURSOR_X_SHIFT) |
+-		((cursor->y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT)));
+-	REG_WRITE(curbase_reg, REG_READ(curbase_reg));
+-}
+-
+-static void mdfld_dpu_fb_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
+-						 mdfld_plane_t plane)
+-{
+-	u32 pipesrc_reg = PIPEASRC;
+-	u32 dspsize_reg = DSPASIZE;
+-	u32 dspoff_reg = DSPALINOFF;
+-	u32 dspsurf_reg = DSPASURF;
+-	u32 dspstride_reg = DSPASTRIDE;
+-	u32 stride;
+-	struct psb_drm_dpu_rect *rect = &dpu_info->damage_pipea;
+-	struct drm_device *dev = dpu_info->dev;
+-
+-	if (plane == MDFLD_PLANEC) {
+-		pipesrc_reg = PIPECSRC;
+-		dspsize_reg = DSPCSIZE;
+-		dspoff_reg = DSPCLINOFF;
+-		dspsurf_reg = DSPCSURF;
+-		dspstride_reg = DSPCSTRIDE;
+-		rect = &dpu_info->damage_pipec;
+-	}
+-
+-	stride = REG_READ(dspstride_reg);
+-	/* FIXME: should I do the pipe src update here? */
+-	REG_WRITE(pipesrc_reg, ((rect->width - 1) << 16) | (rect->height - 1));
+-	/* Flush plane */
+-	REG_WRITE(dspsize_reg, ((rect->height - 1) << 16) | (rect->width - 1));
+-	REG_WRITE(dspoff_reg, ((rect->x * 4) + (rect->y * stride)));
+-	REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
+-
+-	/*
+-	 * TODO: wait for flip finished and restore the pipesrc reg,
+-	 * or cursor will be show at a wrong position
+-	 */
+-}
+-
+-static void mdfld_dpu_overlay_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
+-						  mdfld_plane_t plane)
+-{
+-}
+-
+-/*
+- * TODO: we are still in dbi normal mode now, we will try to use partial
+- * mode later.
+- */
+-static int mdfld_dbi_prepare_cb(struct mdfld_dsi_dbi_output *dbi_output,
+-				struct mdfld_dbi_dpu_info *dpu_info, int pipe)
+-{
+-	u8 *cb_addr = (u8 *)dbi_output->dbi_cb_addr;
+-	u32 *index;
+-	struct psb_drm_dpu_rect *rect = pipe ?
+-		(&dpu_info->damage_pipec) : (&dpu_info->damage_pipea);
+-
+-	/* FIXME: lock command buffer, this may lead to a deadlock,
+-	   as we already hold the dpu_update_lock */
+-	if (!spin_trylock(&dbi_output->cb_lock)) {
+-		DRM_ERROR("lock command buffer failed, try again\n");
+-		return -EAGAIN;
+-	}
+-
+-	index = &dbi_output->cb_write;
+-
+-	if (*index) {
+-		DRM_ERROR("DBI command buffer unclean\n");
+-		return -EAGAIN;
+-	}
+-
+-	/* Column address */
+-	*(cb_addr + ((*index)++)) = set_column_address;
+-	*(cb_addr + ((*index)++)) = rect->x >> 8;
+-	*(cb_addr + ((*index)++)) = rect->x;
+-	*(cb_addr + ((*index)++)) = (rect->x + rect->width - 1) >> 8;
+-	*(cb_addr + ((*index)++)) = (rect->x + rect->width - 1);
+-
+-	*index = 8;
+-
+-	/* Page address */
+-	*(cb_addr + ((*index)++)) = set_page_addr;
+-	*(cb_addr + ((*index)++)) = rect->y >> 8;
+-	*(cb_addr + ((*index)++)) = rect->y;
+-	*(cb_addr + ((*index)++)) = (rect->y + rect->height - 1) >> 8;
+-	*(cb_addr + ((*index)++)) = (rect->y + rect->height - 1);
+-
+-	*index = 16;
+-
+-	/*write memory*/
+-	*(cb_addr + ((*index)++)) = write_mem_start;
+-
+-	return 0;
+-}
+-
+-static int mdfld_dbi_flush_cb(struct mdfld_dsi_dbi_output *dbi_output, int pipe)
+-{
+-	u32 cmd_phy = dbi_output->dbi_cb_phy;
+-	u32 *index = &dbi_output->cb_write;
+-	int reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+-	struct drm_device *dev = dbi_output->dev;
+-
+-	if (*index == 0 || !dbi_output)
+-		return 0;
+-
+-	REG_WRITE((MIPIA_CMD_LEN_REG + reg_offset), 0x010505);
+-	REG_WRITE((MIPIA_CMD_ADD_REG + reg_offset), cmd_phy | 3);
+-
+-	*index = 0;
+-
+-	/* FIXME: unlock command buffer */
+-	spin_unlock(&dbi_output->cb_lock);
+-	return 0;
+-}
+-
+-static int mdfld_dpu_update_pipe(struct mdfld_dsi_dbi_output *dbi_output,
+-				 struct mdfld_dbi_dpu_info *dpu_info, int pipe)
+-{
+-	struct drm_device *dev =  dbi_output->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	mdfld_plane_t cursor_plane = MDFLD_CURSORA;
+-	mdfld_plane_t fb_plane = MDFLD_PLANEA;
+-	mdfld_plane_t overlay_plane = MDFLD_OVERLAYA;
+-	int ret = 0;
+-	u32 plane_mask = MDFLD_PIPEA_PLANE_MASK;
+-
+-	/* Damaged rects on this pipe */
+-	if (pipe) {
+-		cursor_plane = MDFLD_CURSORC;
+-		fb_plane = MDFLD_PLANEC;
+-		overlay_plane = MDFLD_OVERLAYC;
+-		plane_mask = MDFLD_PIPEC_PLANE_MASK;
+-	}
+-
+-	/*update cursor which assigned to @pipe*/
+-	if (dpu_info->pending & (1 << cursor_plane))
+-		mdfld_dpu_cursor_plane_flush(dpu_info, cursor_plane);
+-
+-	/*update fb which assigned to @pipe*/
+-	if (dpu_info->pending & (1 << fb_plane))
+-		mdfld_dpu_fb_plane_flush(dpu_info, fb_plane);
+-
+-	/* TODO: update overlay */
+-	if (dpu_info->pending & (1 << overlay_plane))
+-		mdfld_dpu_overlay_plane_flush(dpu_info, overlay_plane);
+-
+-	/* Flush damage area to panel fb */
+-	if (dpu_info->pending & plane_mask) {
+-		ret = mdfld_dbi_prepare_cb(dbi_output, dpu_info, pipe);
+-		/*
+-		 * TODO: remove b_dsr_enable later,
+-		 * added it so that text console could boot smoothly
+-		 */
+-		/* Clean pending flags on this pipe */
+-		if (!ret && dev_priv->dsr_enable) {
+-			dpu_info->pending &= ~plane_mask;
+-			/* Reset overlay pipe damage rect */
+-			mdfld_dpu_init_damage(dpu_info, pipe);
+-		}
+-	}
+-	return ret;
+-}
+-
+-static int mdfld_dpu_update_fb(struct drm_device *dev)
+-{
+-	struct drm_crtc *crtc;
+-	struct psb_intel_crtc *psb_crtc;
+-	struct mdfld_dsi_dbi_output **dbi_output;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+-	bool pipe_updated[2];
+-	unsigned long irq_flags;
+-	u32 dpll_reg = MRST_DPLL_A;
+-	u32 dspcntr_reg = DSPACNTR;
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 dsplinoff_reg = DSPALINOFF;
+-	u32 dspsurf_reg = DSPASURF;
+-	u32 mipi_state_reg = MIPIA_INTR_STAT_REG;
+-	u32 reg_offset = 0;
+-	int pipe;
+-	int i;
+-	int ret;
+-
+-	dbi_output = dpu_info->dbi_outputs;
+-	pipe_updated[0] = pipe_updated[1] = false;
+-
+-	if (!gma_power_begin(dev, true))
+-		return -EAGAIN;
+-
+-	/* Try to prevent any new damage reports */
+-	if (!spin_trylock_irqsave(&dpu_info->dpu_update_lock, irq_flags))
+-		return -EAGAIN;
+-
+-	for (i = 0; i < dpu_info->dbi_output_num; i++) {
+-		crtc = dbi_output[i]->base.base.crtc;
+-		psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL;
+-
+-		pipe = dbi_output[i]->channel_num ? 2 : 0;
+-
+-		if (pipe == 2) {
+-			dspcntr_reg = DSPCCNTR;
+-			pipeconf_reg = PIPECCONF;
+-			dsplinoff_reg = DSPCLINOFF;
+-			dspsurf_reg = DSPCSURF;
+-			reg_offset = MIPIC_REG_OFFSET;
+-		}
+-
+-		if (!(REG_READ((MIPIA_GEN_FIFO_STAT_REG + reg_offset))
+-							& (1 << 27)) ||
+-			!(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
+-			!(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
+-			!(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE)) {
+-			dev_err(dev->dev,
+-				"DBI FIFO is busy, DSI %d state %x\n",
+-				pipe,
+-				REG_READ(mipi_state_reg + reg_offset));
+-			continue;
+-		}
+-
+-		/*
+-		 *	If DBI output is in a exclusive state then the pipe
+-		 *	change won't be updated
+-		 */
+-		if (dbi_output[i]->dbi_panel_on &&
+-		   !(dbi_output[i]->mode_flags & MODE_SETTING_ON_GOING) &&
+-		   !(psb_crtc &&
+-			psb_crtc->mode_flags & MODE_SETTING_ON_GOING) &&
+-		   !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) {
+-			ret = mdfld_dpu_update_pipe(dbi_output[i],
+-				dpu_info, dbi_output[i]->channel_num ? 2 : 0);
+-			if (!ret)
+-				pipe_updated[i] = true;
+-		}
+-	}
+-
+-	for (i = 0; i < dpu_info->dbi_output_num; i++)
+-		if (pipe_updated[i])
+-			mdfld_dbi_flush_cb(dbi_output[i],
+-				dbi_output[i]->channel_num ? 2 : 0);
+-
+-	spin_unlock_irqrestore(&dpu_info->dpu_update_lock, irq_flags);
+-	gma_power_end(dev);
+-	return 0;
+-}
+-
+-static int __mdfld_dbi_exit_dsr(struct mdfld_dsi_dbi_output *dbi_output,
+-								int pipe)
+-{
+-	struct drm_device *dev = dbi_output->dev;
+-	struct drm_crtc *crtc = dbi_output->base.base.crtc;
+-	struct psb_intel_crtc *psb_crtc = (crtc) ? to_psb_intel_crtc(crtc)
+-								: NULL;
+-	u32 reg_val;
+-	u32 dpll_reg = MRST_DPLL_A;
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 dspcntr_reg = DSPACNTR;
+-	u32 dspbase_reg = DSPABASE;
+-	u32 dspsurf_reg = DSPASURF;
+-	u32 reg_offset = 0;
+-
+-	if (!dbi_output)
+-		return 0;
+-
+-	/* If mode setting on-going, back off */
+-	if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+-		(psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
+-		return -EAGAIN;
+-
+-	if (pipe == 2) {
+-		dpll_reg = MRST_DPLL_A;
+-		pipeconf_reg = PIPECCONF;
+-		dspcntr_reg = DSPCCNTR;
+-		dspbase_reg = MDFLD_DSPCBASE;
+-		dspsurf_reg = DSPCSURF;
+-
+-		reg_offset = MIPIC_REG_OFFSET;
+-	}
+-
+-	if (!gma_power_begin(dev, true))
+-		return -EAGAIN;
+-
+-	/* Enable DPLL */
+-	reg_val = REG_READ(dpll_reg);
+-	if (!(reg_val & DPLL_VCO_ENABLE)) {
+-
+-		if (reg_val & MDFLD_PWR_GATE_EN) {
+-			reg_val &= ~MDFLD_PWR_GATE_EN;
+-			REG_WRITE(dpll_reg, reg_val);
+-			REG_READ(dpll_reg);
+-			udelay(500);
+-		}
+-
+-		reg_val |= DPLL_VCO_ENABLE;
+-		REG_WRITE(dpll_reg, reg_val);
+-		REG_READ(dpll_reg);
+-		udelay(500);
+-
+-		/* FIXME: add timeout */
+-		while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK))
+-			cpu_relax();
+-	}
+-
+-	/* Enable pipe */
+-	reg_val = REG_READ(pipeconf_reg);
+-	if (!(reg_val & PIPEACONF_ENABLE)) {
+-		reg_val |= PIPEACONF_ENABLE;
+-		REG_WRITE(pipeconf_reg, reg_val);
+-		REG_READ(pipeconf_reg);
+-		udelay(500);
+-		mdfldWaitForPipeEnable(dev, pipe);
+-	}
+-
+-	/* Enable plane */
+-	reg_val = REG_READ(dspcntr_reg);
+-	if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
+-		reg_val |= DISPLAY_PLANE_ENABLE;
+-		REG_WRITE(dspcntr_reg, reg_val);
+-		REG_READ(dspcntr_reg);
+-		udelay(500);
+-	}
+-
+-	gma_power_end(dev);
+-
+-	/* Clean IN_DSR flag */
+-	dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
+-
+-	return 0;
+-}
+-
+-int mdfld_dpu_exit_dsr(struct drm_device *dev)
+-{
+-	struct mdfld_dsi_dbi_output **dbi_output;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+-	int i;
+-	int pipe;
+-
+-	dbi_output = dpu_info->dbi_outputs;
+-
+-	for (i = 0; i < dpu_info->dbi_output_num; i++) {
+-		/* If this output is not in DSR mode, don't call exit dsr */
+-		if (dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)
+-			__mdfld_dbi_exit_dsr(dbi_output[i],
+-					dbi_output[i]->channel_num ? 2 : 0);
+-	}
+-
+-	/* Enable TE interrupt */
+-	for (i = 0; i < dpu_info->dbi_output_num; i++) {
+-		/* If this output is not in DSR mode, don't call exit dsr */
+-		pipe = dbi_output[i]->channel_num ? 2 : 0;
+-		if (dbi_output[i]->dbi_panel_on && pipe) {
+-			mdfld_disable_te(dev, 0);
+-			mdfld_enable_te(dev, 2);
+-		} else if (dbi_output[i]->dbi_panel_on && !pipe) {
+-			mdfld_disable_te(dev, 2);
+-			mdfld_enable_te(dev, 0);
+-		}
+-	}
+-	return 0;
+-}
+-
+-static int mdfld_dpu_enter_dsr(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+-	struct mdfld_dsi_dbi_output **dbi_output;
+-	int i;
+-
+-	dbi_output = dpu_info->dbi_outputs;
+-
+-	for (i = 0; i < dpu_info->dbi_output_num; i++) {
+-		/* If output is off or already in DSR state, don't re-enter */
+-		if (dbi_output[i]->dbi_panel_on &&
+-		   !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) {
+-			mdfld_dsi_dbi_enter_dsr(dbi_output[i],
+-				dbi_output[i]->channel_num ? 2 : 0);
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static void mdfld_dbi_dpu_timer_func(unsigned long data)
+-{
+-	struct drm_device *dev = (struct drm_device *)data;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+-	struct timer_list *dpu_timer = &dpu_info->dpu_timer;
+-	unsigned long flags;
+-
+-	if (dpu_info->pending) {
+-		dpu_info->idle_count = 0;
+-		/* Update panel fb with damaged area */
+-		mdfld_dpu_update_fb(dev);
+-	} else {
+-		dpu_info->idle_count++;
+-	}
+-
+-	if (dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
+-		mdfld_dpu_enter_dsr(dev);
+-		/* Stop timer by return */
+-		return;
+-	}
+-
+-	spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
+-	if (!timer_pending(dpu_timer)) {
+-		dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
+-		add_timer(dpu_timer);
+-	}
+-	spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
+-}
+-
+-void mdfld_dpu_update_panel(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+-
+-	if (dpu_info->pending) {
+-		dpu_info->idle_count = 0;
+-
+-		/*update panel fb with damaged area*/
+-		mdfld_dpu_update_fb(dev);
+-	} else {
+-		dpu_info->idle_count++;
+-	}
+-
+-	if (dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
+-		/*enter dsr*/
+-		mdfld_dpu_enter_dsr(dev);
+-	}
+-}
+-
+-static int mdfld_dbi_dpu_timer_init(struct drm_device *dev,
+-				struct mdfld_dbi_dpu_info *dpu_info)
+-{
+-	struct timer_list *dpu_timer = &dpu_info->dpu_timer;
+-	unsigned long flags;
+-
+-	spin_lock_init(&dpu_info->dpu_timer_lock);
+-	spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
+-
+-	init_timer(dpu_timer);
+-
+-	dpu_timer->data = (unsigned long)dev;
+-	dpu_timer->function = mdfld_dbi_dpu_timer_func;
+-	dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
+-
+-	spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
+-
+-	return 0;
+-}
+-
+-void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info *dpu_info)
+-{
+-	struct timer_list *dpu_timer = &dpu_info->dpu_timer;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
+-	if (!timer_pending(dpu_timer)) {
+-		dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
+-		add_timer(dpu_timer);
+-	}
+-	spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
+-}
+-
+-int mdfld_dbi_dpu_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+-
+-	if (!dpu_info || IS_ERR(dpu_info)) {
+-		dpu_info = kzalloc(sizeof(struct mdfld_dbi_dpu_info),
+-								GFP_KERNEL);
+-		if (!dpu_info) {
+-			DRM_ERROR("No memory\n");
+-			return -ENOMEM;
+-		}
+-		dev_priv->dbi_dpu_info = dpu_info;
+-	}
+-
+-	dpu_info->dev = dev;
+-
+-	dpu_info->cursors[0].size = MDFLD_CURSOR_SIZE;
+-	dpu_info->cursors[1].size = MDFLD_CURSOR_SIZE;
+-
+-	/*init dpu_update_lock*/
+-	spin_lock_init(&dpu_info->dpu_update_lock);
+-
+-	/*init dpu refresh timer*/
+-	mdfld_dbi_dpu_timer_init(dev, dpu_info);
+-
+-	/*init pipe damage area*/
+-	mdfld_dpu_init_damage(dpu_info, 0);
+-	mdfld_dpu_init_damage(dpu_info, 2);
+-
+-	return 0;
+-}
+-
+-void mdfld_dbi_dpu_exit(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+-
+-	if (!dpu_info)
+-		return;
+-
+-	del_timer_sync(&dpu_info->dpu_timer);
+-	kfree(dpu_info);
+-	dev_priv->dbi_dpu_info = NULL;
+-}
+-
+-
+diff --git a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h b/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h
+deleted file mode 100644
+index 42367ed..0000000
+--- a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h
++++ /dev/null
+@@ -1,154 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * jim liu <jim.liu at intel.com>
+- * Jackie Li<yaodong.li at intel.com>
+- */
+-
+-#ifndef __MDFLD_DSI_DBI_DPU_H__
+-#define __MDFLD_DSI_DBI_DPU_H__
+-
+-#include "mdfld_dsi_dbi.h"
+-
+-typedef enum {
+-	MDFLD_PLANEA,
+-	MDFLD_PLANEC,
+-	MDFLD_CURSORA,
+-	MDFLD_CURSORC,
+-	MDFLD_OVERLAYA,
+-	MDFLD_OVERLAYC,
+-	MDFLD_PLANE_NUM,
+-} mdfld_plane_t;
+-
+-#define MDFLD_PIPEA_PLANE_MASK	0x15
+-#define MDFLD_PIPEC_PLANE_MASK	0x2A
+-
+-struct mdfld_cursor_info {
+-	int x, y;
+-	int size;
+-};
+-
+-#define MDFLD_CURSOR_SIZE	64
+-
+-/*
+- * enter DSR mode if screen has no update for 2 frames.
+- */
+-#define MDFLD_MAX_IDLE_COUNT	2
+-
+-struct mdfld_dbi_dpu_info {
+-	struct drm_device *dev;
+-	/* Lock */
+-	spinlock_t dpu_update_lock;
+-
+-	/* Cursor postion */
+-	struct mdfld_cursor_info cursors[2];
+-
+-	/* Damaged area for each plane */
+-	struct psb_drm_dpu_rect damaged_rects[MDFLD_PLANE_NUM];
+-
+-	/* Final damaged area */
+-	struct psb_drm_dpu_rect damage_pipea;
+-	struct psb_drm_dpu_rect damage_pipec;
+-
+-	/* Pending */
+-	u32 pending;
+-
+-	/* DPU timer */
+-	struct timer_list dpu_timer;
+-	spinlock_t dpu_timer_lock;
+-
+-	/* DPU idle count */
+-	u32 idle_count;
+-
+-	/* DSI outputs */
+-	struct mdfld_dsi_dbi_output *dbi_outputs[2];
+-	int dbi_output_num;
+-};
+-
+-static inline int mdfld_dpu_region_extent(struct psb_drm_dpu_rect *origin,
+-			 struct psb_drm_dpu_rect *rect)
+-{
+-	int x1, y1, x2, y2;
+-
+-	x1 = origin->x + origin->width;
+-	y1 = origin->y + origin->height;
+-
+-	x2 = rect->x + rect->width;
+-	y2 = rect->y + rect->height;
+-
+-	origin->x = min(origin->x, rect->x);
+-	origin->y = min(origin->y, rect->y);
+-	origin->width = max(x1, x2) - origin->x;
+-	origin->height = max(y1, y2) - origin->y;
+-
+-	return 0;
+-}
+-
+-static inline void mdfld_check_boundary(struct mdfld_dbi_dpu_info *dpu_info,
+-				struct psb_drm_dpu_rect *rect)
+-{
+-	if (rect->x < 0)
+-		rect->x = 0;
+-	if (rect->y < 0)
+-		rect->y = 0;
+-
+-	if (rect->x + rect->width > 864)
+-		rect->width = 864 - rect->x;
+-	if (rect->y + rect->height > 480)
+-		rect->height = 480 - rect->height;
+-
+-	if (!rect->width)
+-		rect->width = 1;
+-	if (!rect->height)
+-		rect->height = 1;
+-}
+-
+-static inline void mdfld_dpu_init_damage(struct mdfld_dbi_dpu_info *dpu_info,
+-				int pipe)
+-{
+-	struct psb_drm_dpu_rect *rect;
+-
+-	if (pipe == 0)
+-		rect = &dpu_info->damage_pipea;
+-	else
+-		rect = &dpu_info->damage_pipec;
+-
+-	rect->x = 864;
+-	rect->y = 480;
+-	rect->width = -864;
+-	rect->height = -480;
+-}
+-
+-extern int mdfld_dsi_dbi_dsr_off(struct drm_device *dev,
+-				struct psb_drm_dpu_rect *rect);
+-extern int mdfld_dbi_dpu_report_damage(struct drm_device *dev,
+-				mdfld_plane_t plane,
+-				struct psb_drm_dpu_rect *rect);
+-extern int mdfld_dbi_dpu_report_fullscreen_damage(struct drm_device *dev);
+-extern int mdfld_dpu_exit_dsr(struct drm_device *dev);
+-extern void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info *dpu_info);
+-extern int mdfld_dbi_dpu_init(struct drm_device *dev);
+-extern void mdfld_dbi_dpu_exit(struct drm_device *dev);
+-extern void mdfld_dpu_update_panel(struct drm_device *dev);
+-
+-#endif /*__MDFLD_DSI_DBI_DPU_H__*/
+diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c
+deleted file mode 100644
+index e685f12..0000000
+--- a/drivers/staging/gma500/mdfld_dsi_dpi.c
++++ /dev/null
+@@ -1,805 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * jim liu <jim.liu at intel.com>
+- * Jackie Li<yaodong.li at intel.com>
+- */
+-
+-#include "mdfld_dsi_dpi.h"
+-#include "mdfld_output.h"
+-#include "mdfld_dsi_pkg_sender.h"
+-
+-
+-static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
+-{
+-	u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
+-	int timeout = 0;
+-
+-	if (pipe == 2)
+-		gen_fifo_stat_reg += MIPIC_REG_OFFSET;
+-
+-	udelay(500);
+-
+-	/* This will time out after approximately 2+ seconds */
+-	while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
+-		udelay(100);
+-		timeout++;
+-	}
+-
+-	if (timeout == 20000)
+-		dev_warn(dev->dev, "MIPI: HS Data FIFO was never cleared!\n");
+-}
+-
+-static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
+-{
+-	u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
+-	int timeout = 0;
+-
+-	if (pipe == 2)
+-		gen_fifo_stat_reg += MIPIC_REG_OFFSET;
+-
+-	udelay(500);
+-
+-	/* This will time out after approximately 2+ seconds */
+-	while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_CTRL_FULL)) {
+-		udelay(100);
+-		timeout++;
+-	}
+-	if (timeout == 20000)
+-		dev_warn(dev->dev, "MIPI: HS CMD FIFO was never cleared!\n");
+-}
+-
+-static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
+-{
+-	u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
+-        int timeout = 0;
+-
+-	if (pipe == 2)
+-		gen_fifo_stat_reg += MIPIC_REG_OFFSET;
+-
+-        udelay(500);
+-
+-        /* This will time out after approximately 2+ seconds */
+-        while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) & DPI_FIFO_EMPTY)
+-                                                        != DPI_FIFO_EMPTY)) {
+-                udelay(100);
+-                timeout++;
+-        }
+-
+-        if (timeout == 20000)
+-                dev_warn(dev->dev, "MIPI: DPI FIFO was never cleared!\n");
+-}
+-
+-static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
+-{
+-	u32 intr_stat_reg = MIPIA_INTR_STAT_REG;
+-	int timeout = 0;
+-
+-	if (pipe == 2)
+-		intr_stat_reg += MIPIC_REG_OFFSET;
+-
+-        udelay(500);
+-
+-        /* This will time out after approximately 2+ seconds */
+-        while ((timeout < 20000) && (!(REG_READ(intr_stat_reg) & DSI_INTR_STATE_SPL_PKG_SENT))) {
+-                udelay(100);
+-                timeout++;
+-        }
+-
+-        if (timeout == 20000)
+-                dev_warn(dev->dev, "MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
+-}
+-
+-
+-/* ************************************************************************* *\
+- * FUNCTION: mdfld_dsi_tpo_ic_init
+- *
+- * DESCRIPTION:  This function is called only by mrst_dsi_mode_set and
+- *               restore_display_registers.  since this function does not
+- *               acquire the mutex, it is important that the calling function
+- *               does!
+-\* ************************************************************************* */
+-void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
+-{
+-	struct drm_device *dev = dsi_config->dev;
+-	u32 dcsChannelNumber = dsi_config->channel_num;
+-	u32 gen_data_reg = MIPIA_HS_GEN_DATA_REG; 
+-	u32 gen_ctrl_reg = MIPIA_HS_GEN_CTRL_REG;
+-	u32 gen_ctrl_val = GEN_LONG_WRITE;
+-
+-	if (pipe == 2) {
+-		gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET; 
+-		gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
+-	}
+-
+-	gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
+-
+-	/* Flip page order */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x00008036);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
+-
+-	/* 0xF0 */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x005a5af0);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+-
+-	/* Write protection key */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x005a5af1);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+-
+-	/* 0xFC */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x005a5afc);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+-
+-	/* 0xB7 */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x770000b7);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x00000044);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
+-
+-	/* 0xB6 */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x000a0ab6);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+-
+-	/* 0xF2 */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x081010f2);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x4a070708);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x000000c5);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+-
+-	/* 0xF8 */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x024003f8);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x01030a04);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x0e020220);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x00000004);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
+-
+-	/* 0xE2 */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x398fc3e2);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x0000916f);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
+-
+-	/* 0xB0 */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x000000b0);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
+-
+-	/* 0xF4 */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x240242f4);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x78ee2002);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x2a071050);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x507fee10);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x10300710);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
+-
+-	/* 0xBA */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x19fe07ba);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x101c0a31);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x00000010);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+-
+-	/* 0xBB */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x28ff07bb);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x24280a31);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x00000034);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
+-
+-	/* 0xFB */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x535d05fb);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x1b1a2130);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x221e180e);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x131d2120);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x535d0508);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x1c1a2131);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x231f160d);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x111b2220);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x535c2008);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x1f1d2433);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x2c251a10);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x2c34372d);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x00000023);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
+-
+-	/* 0xFA */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x525c0bfa);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x1c1c232f);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x2623190e);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x18212625);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x545d0d0e);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x1e1d2333);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x26231a10);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x1a222725);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x545d280f);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x21202635);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x31292013);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x31393d33);
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x00000029);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
+-
+-	/* Set DM */
+-	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
+-	REG_WRITE(gen_data_reg, 0x000100f7);
+-	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
+-	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
+-}
+-
+-static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
+-						int num_lane, int bpp)
+-{
+-	return (u16)((pixel_clock_count * bpp) / (num_lane * 8)); 
+-}
+-
+-/*
+- * Calculate the dpi time basing on a given drm mode @mode
+- * return 0 on success.
+- * FIXME: I was using proposed mode value for calculation, may need to 
+- * use crtc mode values later 
+- */
+-int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode, 
+-			struct mdfld_dsi_dpi_timing *dpi_timing,
+-			int num_lane, int bpp)
+-{
+-	int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
+-	int pclk_vsync, pclk_vfp, pclk_vbp, pclk_vactive;
+-	
+-	if(!mode || !dpi_timing) {
+-		DRM_ERROR("Invalid parameter\n");
+-		return -EINVAL;
+-	}
+-	
+-	pclk_hactive = mode->hdisplay;
+-	pclk_hfp = mode->hsync_start - mode->hdisplay;
+-	pclk_hsync = mode->hsync_end - mode->hsync_start;
+-	pclk_hbp = mode->htotal - mode->hsync_end;
+-	
+-	pclk_vactive = mode->vdisplay;
+-	pclk_vfp = mode->vsync_start - mode->vdisplay;
+-	pclk_vsync = mode->vsync_end - mode->vsync_start;
+-	pclk_vbp = mode->vtotal - mode->vsync_end;
+-
+-	/*
+-	 * byte clock counts were calculated by following formula
+-	 * bclock_count = pclk_count * bpp / num_lane / 8
+-	 */
+-	dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hsync, num_lane, bpp);
+-	dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hbp, num_lane, bpp);
+-	dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hfp, num_lane, bpp);
+-	dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hactive, num_lane, bpp);
+-	dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vsync, num_lane, bpp);
+-	dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vbp, num_lane, bpp);
+-	dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vfp, num_lane, bpp);
+-
+-	return 0; 
+-}
+-
+-void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
+-{
+-	struct drm_device *dev = dsi_config->dev;
+-	u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+-	int lane_count = dsi_config->lane_count;
+-	struct mdfld_dsi_dpi_timing dpi_timing;
+-	struct drm_display_mode *mode = dsi_config->mode;
+-	u32 val = 0;
+-	
+-	/*un-ready device*/
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+-	
+-	/*init dsi adapter before kicking off*/
+-	REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+-	
+-	/*enable all interrupts*/
+-	REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+-	
+-
+-	/*set up func_prg*/
+-	val |= lane_count;
+-	val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
+-		
+-	switch(dsi_config->bpp) {
+-	case 16:
+-		val |= DSI_DPI_COLOR_FORMAT_RGB565;
+-		break;
+-	case 18:
+-		val |= DSI_DPI_COLOR_FORMAT_RGB666;
+-		break;
+-	case 24:
+-		val |= DSI_DPI_COLOR_FORMAT_RGB888;
+-		break;
+-	default:
+-		DRM_ERROR("unsupported color format, bpp = %d\n", dsi_config->bpp);
+-	}
+-	REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+-	
+-	REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 
+-			(mode->vtotal * mode->htotal * dsi_config->bpp / (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
+-	REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff & DSI_LP_RX_TIMEOUT_MASK);
+-	
+-	/*max value: 20 clock cycles of txclkesc*/
+-	REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
+-	
+-	/*min 21 txclkesc, max: ffffh*/
+-	REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0xffff & DSI_RESET_TIMER_MASK);
+-
+-	REG_WRITE((MIPIA_DPI_RESOLUTION_REG + reg_offset), mode->vdisplay << 16 | mode->hdisplay);
+-	
+-	/*set DPI timing registers*/
+-	mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp);
+-	
+-	REG_WRITE((MIPIA_HSYNC_COUNT_REG + reg_offset), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_HBP_COUNT_REG + reg_offset), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_HFP_COUNT_REG + reg_offset), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_HACTIVE_COUNT_REG + reg_offset), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_VSYNC_COUNT_REG + reg_offset), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_VBP_COUNT_REG + reg_offset), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_VFP_COUNT_REG + reg_offset), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
+-	
+-	REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+-	
+-	/*min: 7d0 max: 4e20*/
+-	REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x000007d0);
+-	
+-	/*set up video mode*/
+-	val = 0;
+-	val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
+-	REG_WRITE((MIPIA_VIDEO_MODE_FORMAT_REG + reg_offset), val);
+-	
+-	REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
+-	
+-	REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+-	
+-	/*TODO: figure out how to setup these registers*/
+-	REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
+-	
+-	REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), (0xa << 16) | 0x14);
+-	/*set device ready*/
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+-}
+-
+-void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
+-{
+-	struct drm_device *dev = output->dev;
+-	u32 reg_offset = 0;
+-	
+-	if(output->panel_on) 
+-		return;
+-		
+-	if(pipe) 
+-		reg_offset = MIPIC_REG_OFFSET;
+-
+-	/* clear special packet sent bit */
+-	if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
+-		REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
+-	}
+-		
+-	/*send turn on package*/
+-	REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_TURN_ON);
+-	
+-	/*wait for SPL_PKG_SENT interrupt*/
+-	mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
+-	
+-	if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
+-		REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
+-	}
+-
+-	output->panel_on = 1;
+-
+-	/* FIXME the following is disabled to WA the X slow start issue for TMD panel */
+-	/* if(pipe == 2) */
+-	/* 	dev_priv->dpi_panel_on2 = true; */
+-	/* else if (pipe == 0) */
+-	/* 	dev_priv->dpi_panel_on = true; */
+-}
+-
+-static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, int pipe)
+-{
+-	struct drm_device *dev = output->dev;
+-	u32 reg_offset = 0;
+-	
+-	/*if output is on, or mode setting didn't happen, ignore this*/
+-	if((!output->panel_on) || output->first_boot) {
+-		output->first_boot = 0; 
+-		return;
+-	}
+-	
+-	if(pipe)
+-		reg_offset = MIPIC_REG_OFFSET;
+-
+-	/* Wait for dpi fifo to empty */
+-	mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
+-
+-	/* Clear the special packet interrupt bit if set */
+-	if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
+-		REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
+-	}
+-	
+-	if(REG_READ(MIPIA_DPI_CONTROL_REG + reg_offset) == DSI_DPI_CTRL_HS_SHUTDOWN) {
+-		dev_warn(dev->dev, "try to send the same package again, abort!");
+-		goto shutdown_out;
+-	}
+-	
+-	REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_SHUTDOWN);
+-
+-shutdown_out:
+-	output->panel_on = 0;
+-	output->first_boot = 0;
+-
+-	/* FIXME the following is disabled to WA the X slow start issue for TMD panel */
+-	/* if(pipe == 2) */
+-	/* 	dev_priv->dpi_panel_on2 = false; */
+-	/* else if (pipe == 0) */
+-	/* 	dev_priv->dpi_panel_on = false;	 */
+-	/* #ifdef CONFIG_PM_RUNTIME*/ 
+-	/*	if (drm_psb_ospm && !enable_gfx_rtpm) { */
+-	/*		pm_runtime_allow(&gpDrmDevice->pdev->dev); */
+-	/*	schedule_delayed_work(&dev_priv->rtpm_work, 30 * 1000); */
+-	/* } */
+-	/*if (enable_gfx_rtpm) */
+-	/*		pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay); */
+-	/* #endif */
+-}
+-
+-void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
+-{
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+-	struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+-	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+-	struct drm_device *dev = dsi_config->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 mipi_reg = MIPI;
+-	u32 pipeconf_reg = PIPEACONF;
+-	
+-	if(pipe) {
+-		mipi_reg = MIPI_C;
+-		pipeconf_reg = PIPECCONF;
+-	}
+-	
+-	/* Start up display island if it was shutdown */
+-	if (!gma_power_begin(dev, true))
+-		return;
+-
+-	if(on) {
+-		if (mdfld_get_panel_type(dev, pipe) == TMD_VID){
+- 			mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+- 		} else {
+-			/* Enable mipi port */
+-			REG_WRITE(mipi_reg, (REG_READ(mipi_reg) | (1 << 31)));
+-			REG_READ(mipi_reg);
+-
+-			mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+-			mdfld_dsi_tpo_ic_init(dsi_config, pipe);
+-		}
+-
+-		if(pipe == 2) {
+-			dev_priv->dpi_panel_on2 = true;
+-		}
+-		else {
+-			dev_priv->dpi_panel_on  = true;
+-		}
+-
+-	} else {
+- 		if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+- 			mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+- 		} else {
+-			mdfld_dsi_dpi_shut_down(dpi_output, pipe);
+-			/* Disable mipi port */
+-			REG_WRITE(mipi_reg, (REG_READ(mipi_reg) & ~(1<<31)));
+-			REG_READ(mipi_reg);
+-		}
+-
+-		if(pipe == 2)
+-			dev_priv->dpi_panel_on2 = false;
+-		else
+-			dev_priv->dpi_panel_on  = false;
+-	}
+-	gma_power_end(dev);
+-}
+-
+-void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
+-{
+-	dev_dbg(encoder->dev->dev, "DPMS %s\n",
+-			(mode == DRM_MODE_DPMS_ON ? "on":"off"));
+-
+-	if (mode == DRM_MODE_DPMS_ON)
+-		mdfld_dsi_dpi_set_power(encoder, true);
+-	else {
+-		mdfld_dsi_dpi_set_power(encoder, false);
+-#if 0 /* FIXME */
+-#ifdef CONFIG_PM_RUNTIME
+-		if (enable_gfx_rtpm)
+-			pm_schedule_suspend(&gpDrmDevice->pdev->dev, gfxrtdelay);
+-#endif
+-#endif
+-	}
+-}
+-
+-bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+-				     struct drm_display_mode *mode,
+-				     struct drm_display_mode *adjusted_mode)
+-{
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+-	struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
+-
+-	if(fixed_mode) {
+-		adjusted_mode->hdisplay = fixed_mode->hdisplay;
+-		adjusted_mode->hsync_start = fixed_mode->hsync_start;
+-		adjusted_mode->hsync_end = fixed_mode->hsync_end;
+-		adjusted_mode->htotal = fixed_mode->htotal;
+-		adjusted_mode->vdisplay = fixed_mode->vdisplay;
+-		adjusted_mode->vsync_start = fixed_mode->vsync_start;
+-		adjusted_mode->vsync_end = fixed_mode->vsync_end;
+-		adjusted_mode->vtotal = fixed_mode->vtotal;
+-		adjusted_mode->clock = fixed_mode->clock;
+-		drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+-	}
+-	
+-	return true;
+-}
+-
+-void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder) 
+-{
+-	mdfld_dsi_dpi_set_power(encoder, false);
+-}
+-
+-void mdfld_dsi_dpi_commit(struct drm_encoder *encoder) 
+-{
+-	mdfld_dsi_dpi_set_power(encoder, true);
+-}
+-
+-void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+-				   struct drm_display_mode *mode,
+-				   struct drm_display_mode *adjusted_mode)
+-{
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
+-	struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
+-	struct drm_device *dev = dsi_config->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+-	
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 dspcntr_reg = DSPACNTR;
+-	u32 mipi_reg = MIPI;
+-	u32 reg_offset = 0;
+-	
+-	u32 pipeconf = dev_priv->pipeconf;
+-	u32 dspcntr = dev_priv->dspcntr;
+-	u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+-	
+-	dev_dbg(dev->dev, "set mode %dx%d on pipe %d\n",
+-				mode->hdisplay, mode->vdisplay, pipe);
+-
+-	if(pipe) {
+-		pipeconf_reg = PIPECCONF;
+-		dspcntr_reg = DSPCCNTR;
+-		mipi_reg = MIPI_C;
+-		reg_offset = MIPIC_REG_OFFSET;
+-	} else {
+-		mipi |= 2;
+-	}
+-	
+-	if (!gma_power_begin(dev, true))
+-		return;
+-
+-	/* Set up mipi port FIXME: do at init time */
+-	REG_WRITE(mipi_reg, mipi);
+-	REG_READ(mipi_reg);
+-
+-	/* Set up DSI controller DPI interface */
+-	mdfld_dsi_dpi_controller_init(dsi_config, pipe);
+-
+-	if (mdfld_get_panel_type(dev, pipe) != TMD_VID) {
+-		/* Turn on DPI interface */
+-		mdfld_dsi_dpi_turn_on(dpi_output, pipe);
+-	}
+-	
+-	/* Set up pipe */
+-	REG_WRITE(pipeconf_reg, pipeconf);
+-	REG_READ(pipeconf_reg);
+-	
+-	/* Set up display plane */
+-	REG_WRITE(dspcntr_reg, dspcntr);
+-	REG_READ(dspcntr_reg);
+-	
+-	msleep(20); /* FIXME: this should wait for vblank */
+-	
+-	dev_dbg(dev->dev, "State %x, power %d\n",
+-		REG_READ(MIPIA_INTR_STAT_REG + reg_offset),
+-		dpi_output->panel_on);
+-
+-	if (mdfld_get_panel_type(dev, pipe) != TMD_VID) {
+-		/* Init driver ic */
+-		mdfld_dsi_tpo_ic_init(dsi_config, pipe);
+-		/* Init backlight */
+-		mdfld_dsi_brightness_init(dsi_config, pipe);
+-	}
+-	gma_power_end(dev);
+-}
+-
+-
+-/*
+- * Init DSI DPI encoder. 
+- * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
+- * return pointer of newly allocated DPI encoder, NULL on error
+- */ 
+-struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, 
+-				struct mdfld_dsi_connector *dsi_connector,
+-				struct panel_funcs *p_funcs)
+-{
+-	struct mdfld_dsi_dpi_output *dpi_output = NULL;
+-	struct mdfld_dsi_config *dsi_config;
+-	struct drm_connector *connector = NULL;
+-	struct drm_encoder *encoder = NULL;
+-	struct drm_display_mode *fixed_mode = NULL;
+-	int pipe;
+-	u32 data;
+-	int ret;
+-
+-	if (!dsi_connector || !p_funcs) {
+-		WARN_ON(1);
+-		return NULL;
+-	}
+-
+-	dsi_config = mdfld_dsi_get_config(dsi_connector);
+-	pipe = dsi_connector->pipe;
+-
+-	/* Panel hard-reset */
+-	if (p_funcs->reset) {
+-		ret = p_funcs->reset(pipe);
+-		if (ret) {
+-			DRM_ERROR("Panel %d hard-reset failed\n", pipe);
+-			return NULL;
+-		}
+-	}
+-
+-	/* Panel drvIC init */
+-	if (p_funcs->drv_ic_init)
+-		p_funcs->drv_ic_init(dsi_config, pipe);
+-
+-	/* Panel power mode detect */
+-	ret = mdfld_dsi_get_power_mode(dsi_config,
+-					&data,
+-					MDFLD_DSI_LP_TRANSMISSION);
+-	if (ret) {
+-		DRM_ERROR("Panel %d get power mode failed\n", pipe);
+-		dsi_connector->status = connector_status_disconnected;
+-	} else {
+-		DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
+-		dsi_connector->status = connector_status_connected;
+-	}
+-
+-	dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
+-	if(!dpi_output) {
+-		dev_err(dev->dev, "No memory for dsi_dpi_output\n");
+-		return NULL;
+-	}
+-
+-	if(dsi_connector->pipe) 
+-		dpi_output->panel_on = 0;
+-	else
+-		dpi_output->panel_on = 0;
+-	
+-	dpi_output->dev = dev;
+-	dpi_output->p_funcs = p_funcs;
+-	dpi_output->first_boot = 1;
+-	
+-	/* Get fixed mode */
+-	dsi_config = mdfld_dsi_get_config(dsi_connector);
+-	fixed_mode = dsi_config->fixed_mode;
+-	
+-	/* Create drm encoder object */
+-	connector = &dsi_connector->base.base;
+-	encoder = &dpi_output->base.base;
+-	/*
+-	 * On existing hardware this will be a panel of some form,
+-	 * if future devices also have HDMI bridges this will need
+-	 * revisiting
+-	 */
+-	drm_encoder_init(dev,
+-			encoder,
+-			p_funcs->encoder_funcs,
+-			DRM_MODE_ENCODER_LVDS);
+-	drm_encoder_helper_add(encoder,
+-				p_funcs->encoder_helper_funcs);
+-	
+-	/* Attach to given connector */
+-	drm_mode_connector_attach_encoder(connector, encoder);
+-	
+-	/* Set possible crtcs and clones */
+-	if(dsi_connector->pipe) {
+-		encoder->possible_crtcs = (1 << 2);
+-		encoder->possible_clones = (1 << 1);
+-	} else {
+-		encoder->possible_crtcs = (1 << 0);
+-		encoder->possible_clones = (1 << 0);
+-	}
+-	return &dpi_output->base;
+-}
+-
+diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.h b/drivers/staging/gma500/mdfld_dsi_dpi.h
+deleted file mode 100644
+index ed92d45..0000000
+--- a/drivers/staging/gma500/mdfld_dsi_dpi.h
++++ /dev/null
+@@ -1,78 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * jim liu <jim.liu at intel.com>
+- * Jackie Li<yaodong.li at intel.com>
+- */
+-
+-#ifndef __MDFLD_DSI_DPI_H__
+-#define __MDFLD_DSI_DPI_H__
+-
+-#include "mdfld_dsi_output.h"
+-#include "mdfld_output.h"
+-
+-struct mdfld_dsi_dpi_timing {
+-	u16 hsync_count;
+-	u16 hbp_count;
+-	u16 hfp_count;
+-	u16 hactive_count;
+-	u16 vsync_count;
+-	u16 vbp_count;
+-	u16 vfp_count;
+-};
+-
+-struct mdfld_dsi_dpi_output {
+-	struct mdfld_dsi_encoder base;
+-	struct drm_device *dev;
+-
+-	int panel_on;
+-	int first_boot;
+-
+-	struct panel_funcs *p_funcs;
+-};
+-
+-#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder) \
+-	container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
+-
+-extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
+-			struct mdfld_dsi_dpi_timing *dpi_timing,
+-			int num_lane, int bpp);
+-extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
+-			struct mdfld_dsi_connector *dsi_connector,
+-			struct panel_funcs *p_funcs);
+-
+-/* Medfield DPI helper functions */
+-extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
+-extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
+-			struct drm_display_mode *mode,
+-			struct drm_display_mode *adjusted_mode);
+-extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
+-extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
+-extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+-			struct drm_display_mode *mode,
+-			struct drm_display_mode *adjusted_mode);
+-extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
+-			int pipe);
+-extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *si_config,
+-			int pipe);
+-#endif /*__MDFLD_DSI_DPI_H__*/
+diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c
+deleted file mode 100644
+index 3f979db..0000000
+--- a/drivers/staging/gma500/mdfld_dsi_output.c
++++ /dev/null
+@@ -1,1014 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * jim liu <jim.liu at intel.com>
+- * Jackie Li<yaodong.li at intel.com>
+- */
+-
+-#include "mdfld_dsi_output.h"
+-#include "mdfld_dsi_dbi.h"
+-#include "mdfld_dsi_dpi.h"
+-#include "mdfld_output.h"
+-#include <asm/intel_scu_ipc.h>
+-#include "mdfld_dsi_pkg_sender.h"
+-#include <linux/pm_runtime.h>
+-#include <linux/moduleparam.h>
+-
+-#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
+-
+-static int CABC_control = 1;
+-static int LABC_control = 1;
+-
+-module_param (CABC_control, int, 0644);
+-module_param (LABC_control, int, 0644);
+-
+-/**
+- * make these MCS command global 
+- * we don't need 'movl' everytime we send them.
+- * FIXME: these datas were provided by OEM, we should get them from GCT.
+- **/
+-static u32 mdfld_dbi_mcs_hysteresis[] = {
+-	0x42000f57, 0x8c006400, 0xff00bf00, 0xffffffff,
+-	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+-	0x38000aff, 0x82005000, 0xff00ab00, 0xffffffff,
+-	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+-	0x000000ff,
+-};
+-
+-static u32 mdfld_dbi_mcs_display_profile[] = {
+-	0x50281450, 0x0000c882, 0x00000000, 0x00000000,
+-	0x00000000,
+-};
+-
+-static u32 mdfld_dbi_mcs_kbbc_profile[] = {
+-	0x00ffcc60, 0x00000000, 0x00000000, 0x00000000,
+-}; 
+-	
+-static u32 mdfld_dbi_mcs_gamma_profile[] = {
+-	0x81111158, 0x88888888, 0x88888888,
+-}; 
+-
+-/*
+- * write hysteresis values.
+- */
+-static void mdfld_dsi_write_hysteresis (struct mdfld_dsi_config *dsi_config,
+-                                                                int pipe)
+-{
+-	struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
+-
+-	if(!sender) {
+-	        WARN_ON(1);
+-		return;
+-	}
+-	mdfld_dsi_send_mcs_long_hs(sender,
+-				   mdfld_dbi_mcs_hysteresis,
+-				   17,
+-				   MDFLD_DSI_SEND_PACKAGE);
+-}
+-
+-/*
+- * write display profile values.
+- */
+-static void mdfld_dsi_write_display_profile(struct mdfld_dsi_config *dsi_config, int pipe)
+-{
+-	struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
+-
+-	if(!sender) {
+-	        WARN_ON(1);
+-		return;
+-        }
+-	mdfld_dsi_send_mcs_long_hs(sender,
+-				   mdfld_dbi_mcs_display_profile,
+-				   5,
+-				   MDFLD_DSI_SEND_PACKAGE);
+-}
+-
+-/*
+- * write KBBC profile values.
+- */
+-static void mdfld_dsi_write_kbbc_profile (struct mdfld_dsi_config * dsi_config, int pipe)
+-{
+-	struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
+-
+-	if(!sender) {
+-	        WARN_ON(1);
+-		return;
+-        }
+-	mdfld_dsi_send_mcs_long_hs(sender,
+-				   mdfld_dbi_mcs_kbbc_profile,
+-				   4,
+-				   MDFLD_DSI_SEND_PACKAGE);
+-}
+-
+-/*
+- * write gamma setting.
+- */
+-static void mdfld_dsi_write_gamma_setting (struct mdfld_dsi_config *dsi_config, int pipe)
+-{
+-	struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
+-
+-	if(!sender) {
+-	        WARN_ON(1);
+-		return;
+-	}
+-	mdfld_dsi_send_mcs_long_hs(sender,
+-				   mdfld_dbi_mcs_gamma_profile,
+-				   3,
+-				   MDFLD_DSI_SEND_PACKAGE);
+-}
+-
+-/*
+- * Check and see if the generic control or data buffer is empty and ready.
+- */
+-void mdfld_dsi_gen_fifo_ready (struct drm_device *dev, u32 gen_fifo_stat_reg, u32 fifo_stat)
+-{
+-	u32 GEN_BF_time_out_count = 0;
+-	
+-	/* Check MIPI Adatper command registers */
+-	for (GEN_BF_time_out_count = 0; GEN_BF_time_out_count < GEN_FB_TIME_OUT; GEN_BF_time_out_count++)
+-	{
+-		if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
+-			break;
+-		udelay (100);
+-	}
+-
+-	if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
+-		dev_err(dev->dev,
+-        "mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x. \n",
+-                                                gen_fifo_stat_reg);
+-}
+-
+-/*
+- * Manage the DSI MIPI keyboard and display brightness.
+- * FIXME: this is exported to OSPM code. should work out an specific 
+- * display interface to OSPM. 
+- */
+-void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
+-{
+-	struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
+-	struct drm_device *dev = sender->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 gen_ctrl_val;
+-	
+-	if(!sender) {
+-	        WARN_ON(1);
+-	        return;
+-	}
+-	/* Set default display backlight value to 85% (0xd8)*/
+-	mdfld_dsi_send_mcs_short_hs(sender,
+-				    write_display_brightness,
+-				    0xd8,
+-				    1,
+-				    MDFLD_DSI_SEND_PACKAGE);
+-
+-	/* Set minimum brightness setting of CABC function to 20% (0x33)*/
+-	mdfld_dsi_send_mcs_short_hs(sender,
+-				    write_cabc_min_bright,
+-				    0x33,
+-				    1,
+-				    MDFLD_DSI_SEND_PACKAGE);
+-
+-	mdfld_dsi_write_hysteresis(dsi_config, pipe);
+-	mdfld_dsi_write_display_profile (dsi_config, pipe);
+-	mdfld_dsi_write_kbbc_profile (dsi_config, pipe);
+-	mdfld_dsi_write_gamma_setting (dsi_config, pipe);
+-
+-	/* Enable backlight or/and LABC */
+-	gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON| BACKLIGHT_ON;
+-	if (LABC_control == 1 || CABC_control == 1)
+-		gen_ctrl_val |= DISPLAY_DIMMING_ON| DISPLAY_BRIGHTNESS_AUTO | GAMMA_AUTO;
+-
+-	if (LABC_control == 1)
+-		gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
+-
+-	dev_priv->mipi_ctrl_display = gen_ctrl_val;
+-
+-	mdfld_dsi_send_mcs_short_hs(sender,
+-				    write_ctrl_display,
+-				    (u8)gen_ctrl_val,
+-				    1,
+-				    MDFLD_DSI_SEND_PACKAGE);
+-
+-	if (CABC_control == 0)
+-		return;
+-	mdfld_dsi_send_mcs_short_hs(sender,
+-				    write_ctrl_cabc,
+-				    UI_IMAGE,
+-				    1,
+-				    MDFLD_DSI_SEND_PACKAGE);
+-}
+-
+-/*
+- * Manage the mipi display brightness.
+- * TODO: refine this interface later
+- */
+-void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
+-{
+-	struct mdfld_dsi_pkg_sender *sender;
+-	struct drm_psb_private *dev_priv;
+-	struct mdfld_dsi_config *dsi_config;
+-	u32 gen_ctrl_val;
+-	int p_type;	
+-	
+-	if (!dev || (pipe != 0 && pipe != 2)) {
+-		dev_err(dev->dev, "Invalid parameter\n");
+-		return;
+-	}
+-
+-	p_type = mdfld_get_panel_type(dev, 0);
+-
+-	dev_priv = dev->dev_private;
+-
+-	if(pipe)
+-		dsi_config = dev_priv->dsi_configs[1];
+-	else
+-		dsi_config = dev_priv->dsi_configs[0];
+-
+-	sender = mdfld_dsi_get_pkg_sender(dsi_config);
+-
+-	if(!sender) {
+-	        WARN_ON(1);
+-		return;
+-	}
+-
+-	gen_ctrl_val = ((level * 0xff) / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
+-
+-	dev_dbg(dev->dev,
+-                "pipe = %d, gen_ctrl_val = %d.  \n", pipe, gen_ctrl_val);
+-	
+-	if(p_type == TMD_VID || p_type == TMD_CMD){
+-		/* Set display backlight value */
+-		mdfld_dsi_send_mcs_short_hs(sender, 
+-					tmd_write_display_brightness, 
+-					(u8)gen_ctrl_val, 
+-	                                 1, 
+-	                        	MDFLD_DSI_SEND_PACKAGE);		
+-	} else {			
+-		/* Set display backlight value */
+-		mdfld_dsi_send_mcs_short_hs(sender,
+-				    write_display_brightness,
+-				    (u8)gen_ctrl_val,
+-                                    1,
+-                                    MDFLD_DSI_SEND_PACKAGE);
+-
+-
+-		/* Enable backlight control */
+-		if (level == 0)
+-			gen_ctrl_val = 0;
+-		else 
+-			gen_ctrl_val = dev_priv->mipi_ctrl_display;
+-
+-		mdfld_dsi_send_mcs_short_hs(sender,
+-                                    write_ctrl_display,
+-                                   (u8)gen_ctrl_val,
+-                                   1,
+-                                   MDFLD_DSI_SEND_PACKAGE);
+-	}
+-}
+-
+-/*
+- * shut down DSI controller
+- */ 
+-void mdfld_dsi_controller_shutdown(struct mdfld_dsi_config * dsi_config, int pipe)
+-{
+-	struct drm_device * dev;
+-	u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+-	int retry = 100;
+-	
+-	if (!dsi_config) {
+-	        WARN_ON(1);
+-		return;
+-	}
+-	
+-	dev = dsi_config->dev;
+-	
+-	if (!gma_power_begin(dev, true)) {
+-		dev_err(dev->dev, "hw begin failed\n");
+-		return;
+-	}
+-		
+-	if(!(REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) &  DSI_DEVICE_READY)) 
+-		goto shutdown_out;
+-	
+-	/* Send shut down package, clean packet send bit first */
+-	if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
+-		REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), 
+-				(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) | DSI_INTR_STATE_SPL_PKG_SENT));
+-	}
+-	
+-	/*send shut down package in HS*/
+-	REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_SHUTDOWN);
+-	
+-	
+-	/*
+-	 * make sure shut down is sent.
+-	 * FIXME: add max retry counter
+-	 */
+-	while(!(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT)) {
+-		retry--;
+-		
+-		if(!retry) {
+-			dev_err(dev->dev, "timeout\n");
+-			break;
+-		}
+-	}
+-	
+-	/*sleep 1 ms to ensure shutdown finished*/
+-	msleep(100);
+-	
+-	/*un-ready device*/
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset),
+-			   (REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & ~DSI_DEVICE_READY));
+-
+-shutdown_out:			   
+-	gma_power_end(dev);
+-}
+-
+-void mdfld_dsi_controller_startup(struct mdfld_dsi_config * dsi_config, int pipe)
+-{
+-	struct drm_device * dev;
+-	u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+-	int retry = 100;
+-	
+-	
+-	if (!dsi_config) {
+-		WARN_ON(1);
+-		return;
+-	}
+-	
+-	dev = dsi_config->dev;
+-	dev_dbg(dev->dev, "starting up DSI controller on pipe %d...\n", pipe);
+-	
+-	if (!gma_power_begin(dev, true)) {
+-		dev_err(dev->dev, "hw begin failed\n");
+-		return;
+-	}
+-	
+-	if((REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & DSI_DEVICE_READY)) 
+-		goto startup_out;
+-	
+-	/*if config DPI, turn on DPI interface*/
+-	if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
+-		if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
+-			REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
+-		}
+-		
+-		REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_TURN_ON);
+-		
+-		/*
+-		 * make sure shut down is sent.
+-		 * FIXME: add max retry counter
+-		 */
+-		while(!(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT)) {
+-			retry--;
+-			if(!retry) {
+-				dev_err(dev->dev, "timeout\n");
+-				break;
+-			}
+-		}
+-		
+-		msleep(100);
+-	}
+-	
+-	/*set device ready*/
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset),
+-			   (REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) | DSI_DEVICE_READY));
+-
+-startup_out:	
+-	gma_power_end(dev);
+-}
+-
+-
+-static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
+-					u8 dcs,
+-					u32 *data,
+-					u8 transmission)
+-{
+-	struct mdfld_dsi_pkg_sender *sender
+-		= mdfld_dsi_get_pkg_sender(dsi_config);
+-
+-	if (!sender || !data) {
+-		DRM_ERROR("Invalid parameter\n");
+-		return -EINVAL;
+-	}
+-
+-	if (transmission == MDFLD_DSI_HS_TRANSMISSION)
+-		return mdfld_dsi_read_mcs_hs(sender, dcs, data, 1);
+-	else if (transmission == MDFLD_DSI_LP_TRANSMISSION)
+-		return mdfld_dsi_read_mcs_lp(sender, dcs, data, 1);
+-	else
+-		return -EINVAL;
+-}
+-
+-int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
+-				u32 *mode,
+-				u8 transmission)
+-{
+-	if (!dsi_config || !mode) {
+-		DRM_ERROR("Invalid parameter\n");
+-		return -EINVAL;
+-	}
+-
+-	return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, transmission);
+-}
+-
+-int mdfld_dsi_get_diagnostic_result(struct mdfld_dsi_config *dsi_config,
+-					u32 *result,
+-					u8 transmission)
+-{
+-	if (!dsi_config || !result) {
+-		DRM_ERROR("Invalid parameter\n");
+-		return -EINVAL;
+-	}
+-
+-	return mdfld_dsi_get_panel_status(dsi_config, 0x0f, result,
+-					  transmission);
+-}
+-
+-/*
+- * NOTE: this function was used by OSPM.
+- * TODO: will be removed later, should work out display interfaces for OSPM
+- */
+-void mdfld_dsi_controller_init(struct mdfld_dsi_config * dsi_config, int pipe)
+-{
+-	if(!dsi_config || ((pipe != 0) && (pipe != 2))) {
+-	        WARN_ON(1);
+-		return;
+-	}
+-
+-	if(dsi_config->type)
+-		mdfld_dsi_dpi_controller_init(dsi_config, pipe);
+-	else
+-		mdfld_dsi_controller_dbi_init(dsi_config, pipe);
+-}
+-
+-static void mdfld_dsi_connector_save(struct drm_connector * connector)
+-{
+-}
+-
+-static void mdfld_dsi_connector_restore(struct drm_connector * connector)
+-{
+-}
+-
+-static enum drm_connector_status mdfld_dsi_connector_detect(struct drm_connector * connector, bool force)
+-{
+-	struct psb_intel_output *psb_output
+-					= to_psb_intel_output(connector);
+-	struct mdfld_dsi_connector *dsi_connector
+-	                                = MDFLD_DSI_CONNECTOR(psb_output);
+-	return dsi_connector->status;
+-}
+-
+-static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
+-					struct drm_property *property,
+-					uint64_t value)
+-{
+-	struct drm_encoder *encoder = connector->encoder;
+-
+-	if (!strcmp(property->name, "scaling mode") && encoder) {
+-		struct psb_intel_crtc * psb_crtc = to_psb_intel_crtc(encoder->crtc);
+-		bool bTransitionFromToCentered;
+-		uint64_t curValue;
+-
+-		if (!psb_crtc)
+-			goto set_prop_error;
+-
+-		switch (value) {
+-		case DRM_MODE_SCALE_FULLSCREEN:
+-			break;
+-		case DRM_MODE_SCALE_NO_SCALE:
+-			break;
+-		case DRM_MODE_SCALE_ASPECT:
+-			break;
+-		default:
+-			goto set_prop_error;
+-		}
+-
+-		if (drm_connector_property_get_value(connector, property, &curValue))
+-			goto set_prop_error;
+-
+-		if (curValue == value)
+-			goto set_prop_done;
+-
+-		if (drm_connector_property_set_value(connector, property, value))
+-			goto set_prop_error;
+-
+-		bTransitionFromToCentered = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
+-			(value == DRM_MODE_SCALE_NO_SCALE);
+-
+-		if (psb_crtc->saved_mode.hdisplay != 0 &&
+-		    psb_crtc->saved_mode.vdisplay != 0) {
+-			if (bTransitionFromToCentered) {
+-				if (!drm_crtc_helper_set_mode(encoder->crtc, &psb_crtc->saved_mode,
+-					    encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
+-					goto set_prop_error;
+-			} else {
+-				struct drm_encoder_helper_funcs *pEncHFuncs  = encoder->helper_private;
+-				pEncHFuncs->mode_set(encoder, &psb_crtc->saved_mode,
+-						     &psb_crtc->saved_adjusted_mode);
+-			}
+-		}
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	} else if (!strcmp(property->name, "backlight") && encoder) {
+-		struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+-		struct backlight_device *psb_bd = dev_priv->backlight_device;
+-		dev_dbg(encoder->dev->dev, "backlight level = %d\n", (int)value);
+-		if (drm_connector_property_set_value(connector, property, value))
+-			goto set_prop_error;
+-		else {
+-			dev_dbg(encoder->dev->dev,
+-			                "set brightness to %d", (int)value);
+-			if (psb_bd) {
+-				psb_bd->props.brightness = value;
+-				backlight_update_status(psb_bd);
+-			}
+-		}
+-#endif
+-	}
+-set_prop_done:
+-    return 0;
+-set_prop_error:
+-    return -1;
+-}
+-
+-static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
+-{
+-	struct psb_intel_output * psb_output = to_psb_intel_output(connector);
+-	struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
+-	struct mdfld_dsi_pkg_sender * sender;
+-	
+-	if(!dsi_connector)
+-	        return;
+-	
+-	drm_sysfs_connector_remove(connector);
+-	drm_connector_cleanup(connector);
+-	
+-	sender = dsi_connector->pkg_sender;
+-
+-	mdfld_dsi_pkg_sender_destroy(sender);
+-
+-	kfree(dsi_connector);
+-}
+-
+-static int mdfld_dsi_connector_get_modes(struct drm_connector * connector)
+-{
+-	struct psb_intel_output * psb_output = to_psb_intel_output(connector);
+-	struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
+-	struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
+-	struct drm_display_mode * fixed_mode = dsi_config->fixed_mode;
+-	struct drm_display_mode * dup_mode = NULL;
+-	struct drm_device * dev = connector->dev;
+-	
+-	connector->display_info.min_vfreq = 0;
+-	connector->display_info.max_vfreq = 200;
+-	connector->display_info.min_hfreq = 0;
+-	connector->display_info.max_hfreq = 200;
+-
+-	if(fixed_mode) {
+-		dev_dbg(dev->dev, "fixed_mode %dx%d\n",
+-		        fixed_mode->hdisplay, fixed_mode->vdisplay);
+-		
+-		dup_mode = drm_mode_duplicate(dev, fixed_mode);
+-		drm_mode_probed_add(connector, dup_mode);
+-		return 1;
+-	}
+-	dev_err(dev->dev, "Didn't get any modes!\n");
+-	return 0;
+-}
+-
+-static int mdfld_dsi_connector_mode_valid(struct drm_connector * connector, struct drm_display_mode * mode)
+-{
+-	struct psb_intel_output * psb_output = to_psb_intel_output(connector);
+-	struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
+-	struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
+-	struct drm_display_mode * fixed_mode = dsi_config->fixed_mode;
+-
+-	dev_dbg(connector->dev->dev, "mode %p, fixed mode %p\n",
+-	                                                mode, fixed_mode);
+-
+-	if(mode->flags & DRM_MODE_FLAG_DBLSCAN) 
+-		return MODE_NO_DBLESCAN;
+-
+-	if(mode->flags & DRM_MODE_FLAG_INTERLACE)
+-		return MODE_NO_INTERLACE;
+-
+-	/**
+-	 * FIXME: current DC has no fitting unit, reject any mode setting request
+-	 * will figure out a way to do up-scaling(pannel fitting) later.  
+-	 **/
+-	if(fixed_mode) {
+-		if(mode->hdisplay != fixed_mode->hdisplay)
+-			return MODE_PANEL;
+-
+-		if(mode->vdisplay != fixed_mode->vdisplay)
+-			return MODE_PANEL;
+-	}
+-	dev_dbg(connector->dev->dev, "mode ok\n");
+-
+-	return MODE_OK;
+-}
+-
+-static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
+-{
+-#ifdef CONFIG_PM_RUNTIME
+-	struct drm_device * dev = connector->dev;
+-	struct drm_psb_private * dev_priv = dev->dev_private;
+-	bool panel_on, panel_on2;
+-#endif
+-	/* First, execute DPMS */
+-	drm_helper_connector_dpms(connector, mode);
+-
+-#ifdef CONFIG_PM_RUNTIME
+-	if(mdfld_panel_dpi(dev)) {
+-		/* DPI panel */
+-		panel_on = dev_priv->dpi_panel_on;
+-		panel_on2 = dev_priv->dpi_panel_on2;
+-	} else {
+-		/* DBI panel */
+-		panel_on = dev_priv->dbi_panel_on;
+-		panel_on2 = dev_priv->dbi_panel_on2;
+-	}
+-
+-	/* Then check all display panels + monitors status */
+-	/* Make sure that the Display (B) sub-system status isn't i3 when
+-	 * R/W the DC register, otherwise "Fabric error" issue would occur
+-	 * during S0i3 state. */
+-	if(!panel_on && !panel_on2 && !(REG_READ(HDMIB_CONTROL)
+-	                                        & HDMIB_PORT_EN)) {
+-		/* Request rpm idle */
+-		if(dev_priv->rpm_enabled)
+-			pm_request_idle(&dev->pdev->dev);
+-	}
+-	/*
+-	 * if rpm wasn't enabled yet, try to allow it
+-	 * FIXME: won't enable rpm for DPI since DPI
+-	 * CRTC setting is a little messy now.
+-	 * Enable it later!
+-	 */
+-#if 0
+-	if(!dev_priv->rpm_enabled && !mdfld_panel_dpi(dev))
+-		ospm_runtime_pm_allow(dev);
+-#endif
+-#endif
+-}
+-
+-static struct drm_encoder *mdfld_dsi_connector_best_encoder(
+-                                        struct drm_connector *connector) 
+-{
+-	struct psb_intel_output * psb_output = to_psb_intel_output(connector);
+-	struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
+-	struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
+-	struct mdfld_dsi_encoder * encoder = NULL;
+-	
+-	if(dsi_config->type == MDFLD_DSI_ENCODER_DBI) 
+-		encoder = dsi_config->encoders[MDFLD_DSI_ENCODER_DBI];
+-	else if (dsi_config->type == MDFLD_DSI_ENCODER_DPI) 
+-		encoder = dsi_config->encoders[MDFLD_DSI_ENCODER_DPI];
+-	
+-	dev_dbg(connector->dev->dev, "get encoder %p\n", encoder);
+-	
+-	if(!encoder) {
+-		dev_err(connector->dev->dev,
+-                        "Invalid encoder for type %d\n", dsi_config->type);
+-		return NULL;
+-	}
+-	dsi_config->encoder = encoder;	
+-	return &encoder->base;	
+-}
+-
+-/* DSI connector funcs */
+-static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
+-	.dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
+-	.save = mdfld_dsi_connector_save,
+-	.restore = mdfld_dsi_connector_restore,
+-	.detect = mdfld_dsi_connector_detect,
+-	.fill_modes = drm_helper_probe_single_connector_modes,
+-	.set_property = mdfld_dsi_connector_set_property,
+-	.destroy = mdfld_dsi_connector_destroy,
+-};
+-
+-/* DSI connector helper funcs */
+-static const struct drm_connector_helper_funcs mdfld_dsi_connector_helper_funcs = {
+-	.get_modes = mdfld_dsi_connector_get_modes,
+-	.mode_valid = mdfld_dsi_connector_mode_valid,
+-	.best_encoder = mdfld_dsi_connector_best_encoder,
+-};
+-
+-static int mdfld_dsi_get_default_config(struct drm_device * dev, 
+-										struct mdfld_dsi_config * config, int pipe)
+-{
+-	if(!dev || !config) {
+-	        WARN_ON(1);
+-		return -EINVAL;
+-	}
+-	
+-	config->bpp = 24;
+-	config->type = mdfld_panel_dpi(dev);
+-	config->lane_count = 2;
+-	config->channel_num = 0;
+-	/*NOTE: video mode is ignored when type is MDFLD_DSI_ENCODER_DBI*/
+-	if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+-		config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
+-	} else {
+-		config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
+-	}
+-	
+-	return 0;
+-}
+-
+-/*
+- * Returns the panel fixed mode from configuration. 
+- */
+-struct drm_display_mode *
+-mdfld_dsi_get_configuration_mode(struct mdfld_dsi_config * dsi_config, int pipe)
+-{
+-	struct drm_device *dev = dsi_config->dev;
+-	struct drm_display_mode *mode;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+-	bool use_gct = false;
+-
+-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+-	if (!mode) {
+-	        dev_err(dev->dev, "Out of memory for mode\n");
+-		return NULL;
+-        }
+-	if (use_gct) {
+-		dev_dbg(dev->dev, "gct find MIPI panel.\n");
+-
+-		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+-		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+-		mode->hsync_start = mode->hdisplay + \
+-				((ti->hsync_offset_hi << 8) | \
+-				ti->hsync_offset_lo);
+-		mode->hsync_end = mode->hsync_start + \
+-				((ti->hsync_pulse_width_hi << 8) | \
+-				ti->hsync_pulse_width_lo);
+-		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+-								ti->hblank_lo);
+-		mode->vsync_start = \
+-			mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
+-						ti->vsync_offset_lo);
+-		mode->vsync_end = \
+-			mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+-						ti->vsync_pulse_width_lo);
+-		mode->vtotal = mode->vdisplay + \
+-				((ti->vblank_hi << 8) | ti->vblank_lo);
+-		mode->clock = ti->pixel_clock * 10;
+-	} else {
+-		if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) { 
+-			if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
+-				mode->hdisplay = 480;
+-				mode->vdisplay = 854;
+-				mode->hsync_start = 487;
+-				mode->hsync_end = 490;
+-				mode->htotal = 499;
+-				mode->vsync_start = 861;
+-				mode->vsync_end = 865;
+-				mode->vtotal = 873;
+-				mode->clock = 33264;
+-			} else {
+-				mode->hdisplay = 864;
+-				mode->vdisplay = 480;
+-				mode->hsync_start = 873;
+-				mode->hsync_end = 876;
+-				mode->htotal = 887;
+-				mode->vsync_start = 487;
+-				mode->vsync_end = 490;
+-				mode->vtotal = 499;
+-				mode->clock = 33264;
+-			}
+-		} else if(dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+-			mode->hdisplay = 864;
+-			mode->vdisplay = 480;
+-			mode->hsync_start = 872;
+-			mode->hsync_end = 876;
+-			mode->htotal = 884;
+-			mode->vsync_start = 482;
+-			mode->vsync_end = 494;
+-			mode->vtotal = 486;
+-			mode->clock = 25777;
+-			
+-		}
+-	}
+-
+-	drm_mode_set_name(mode);
+-	drm_mode_set_crtcinfo(mode, 0);
+-	
+-	mode->type |= DRM_MODE_TYPE_PREFERRED;
+-
+-	return mode;
+-}
+-
+-int mdfld_dsi_panel_reset(int pipe)
+-{
+-	unsigned gpio;
+-	int ret = 0;
+-
+-	switch (pipe) {
+-	case 0:
+-		gpio = 128;
+-		break;
+-	case 2:
+-		gpio = 34;
+-		break;
+-	default:
+-		DRM_ERROR("Invalid output\n");
+-		return -EINVAL;
+-	}
+-
+-	ret = gpio_request(gpio, "gfx");
+-	if (ret) {
+-		DRM_ERROR("gpio_rqueset failed\n");
+-		return ret;
+-	}
+-
+-	ret = gpio_direction_output(gpio, 1);
+-	if (ret) {
+-		DRM_ERROR("gpio_direction_output failed\n");
+-		goto gpio_error;
+-	}
+-
+-	gpio_get_value(128);
+-
+-gpio_error:
+-	if (gpio_is_valid(gpio))
+-		gpio_free(gpio);
+-
+-	return ret;
+-}
+-
+-/*
+- * MIPI output init
+- * @dev drm device
+- * @pipe pipe number. 0 or 2
+- * @config 
+- * 
+- * Do the initialization of a MIPI output, including create DRM mode objects
+- * initialization of DSI output on @pipe 
+- */
+-void mdfld_dsi_output_init(struct drm_device *dev,
+-			   int pipe, 
+-			   struct mdfld_dsi_config *config,
+-			   struct panel_funcs* p_cmd_funcs,
+-			   struct panel_funcs* p_vid_funcs)
+-{
+-	struct mdfld_dsi_config * dsi_config;
+-	struct mdfld_dsi_connector * dsi_connector;
+-	struct psb_intel_output * psb_output;
+-	struct drm_connector * connector;
+-	struct mdfld_dsi_encoder * encoder;
+-	struct drm_psb_private * dev_priv = dev->dev_private;
+-	struct panel_info dsi_panel_info;
+-	u32 width_mm, height_mm;
+-
+-	dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
+-	
+-	if(!dev || ((pipe != 0) && (pipe != 2))) {
+-	        WARN_ON(1);
+-		return;
+-	}
+-	
+-	/*create a new connetor*/
+-	dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
+-	if(!dsi_connector) {
+-		DRM_ERROR("No memory");
+-		return;
+-	}
+-	
+-	dsi_connector->pipe =  pipe;
+-	
+-	/*set DSI config*/
+-	if(config) { 
+-		dsi_config = config;
+-	} else {
+-		dsi_config = kzalloc(sizeof(struct mdfld_dsi_config), GFP_KERNEL);
+-		if(!dsi_config) {
+-			dev_err(dev->dev,
+-			        "cannot allocate memory for DSI config\n");
+-			goto dsi_init_err0;
+-		}
+-		
+-		mdfld_dsi_get_default_config(dev, dsi_config, pipe);
+-	}
+-	
+-	dsi_connector->private = dsi_config;
+-	
+-	dsi_config->changed = 1;
+-	dsi_config->dev = dev;
+-	
+-	/* Init fixed mode basing on DSI config type */
+-	if(dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+-		dsi_config->fixed_mode = p_cmd_funcs->get_config_mode(dev);
+-		if(p_cmd_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
+-			goto dsi_init_err0;
+-	} else if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
+-		dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
+-		if(p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
+-			goto dsi_init_err0;
+-	}
+-
+-	width_mm = dsi_panel_info.width_mm;
+-	height_mm = dsi_panel_info.height_mm;
+-
+-	dsi_config->mode = dsi_config->fixed_mode;
+-	dsi_config->connector = dsi_connector;
+-	
+-	if(!dsi_config->fixed_mode) {
+-		dev_err(dev->dev, "No pannel fixed mode was found\n");
+-		goto dsi_init_err0;
+-	}
+-	
+-	if(pipe && dev_priv->dsi_configs[0]) {
+-		dsi_config->dvr_ic_inited = 0;
+-		dev_priv->dsi_configs[1] = dsi_config;
+-	} else if(pipe == 0) {
+-		dsi_config->dvr_ic_inited = 1;
+-		dev_priv->dsi_configs[0] = dsi_config;
+-	} else {
+-		dev_err(dev->dev, "Trying to init MIPI1 before MIPI0\n");
+-		goto dsi_init_err0;
+-	}
+-
+-	/*init drm connector object*/
+-	psb_output = &dsi_connector->base;
+-	
+-	psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
+-
+-	connector = &psb_output->base;
+-	/* Revisit type if MIPI/HDMI bridges ever appear on Medfield */
+-	drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
+-						DRM_MODE_CONNECTOR_LVDS);
+-	drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
+-	
+-	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+-	connector->display_info.width_mm = width_mm;
+-	connector->display_info.height_mm = height_mm;
+-	connector->interlace_allowed = false;
+-	connector->doublescan_allowed = false;
+-	
+-	/* Attach properties */
+-	drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
+-	drm_connector_attach_property(connector, dev_priv->backlight_property, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
+-
+-	/* Init DSI package sender on this output */
+-	if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
+-		DRM_ERROR("Package Sender initialization failed on pipe %d\n", pipe);
+-		goto dsi_init_err0;
+-	}
+-
+-	/* Init DBI & DPI encoders */
+-	if (p_cmd_funcs) {
+-		encoder = mdfld_dsi_dbi_init(dev, dsi_connector, p_cmd_funcs);
+-		if(!encoder) {
+-			dev_err(dev->dev, "Create DBI encoder failed\n");
+-			goto dsi_init_err1;
+-		}
+-		encoder->private = dsi_config;
+-		dsi_config->encoders[MDFLD_DSI_ENCODER_DBI] = encoder;
+-	}
+-	
+-	if(p_vid_funcs) {
+-		encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
+-		if(!encoder) {
+-			dev_err(dev->dev, "Create DPI encoder failed\n");
+-			goto dsi_init_err1;
+-		}
+-		encoder->private = dsi_config;
+-		dsi_config->encoders[MDFLD_DSI_ENCODER_DPI] = encoder;
+-	}
+-	
+-	drm_sysfs_connector_add(connector);
+-	return;
+-	
+-	/*TODO: add code to destroy outputs on error*/
+-dsi_init_err1:
+-	/*destroy sender*/
+-	mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
+-
+-	drm_connector_cleanup(connector);
+-	kfree(dsi_config->fixed_mode);
+-	kfree(dsi_config);
+-dsi_init_err0:
+-	kfree(dsi_connector);
+-}
+diff --git a/drivers/staging/gma500/mdfld_dsi_output.h b/drivers/staging/gma500/mdfld_dsi_output.h
+deleted file mode 100644
+index 4699267..0000000
+--- a/drivers/staging/gma500/mdfld_dsi_output.h
++++ /dev/null
+@@ -1,138 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * jim liu <jim.liu at intel.com>
+- * Jackie Li<yaodong.li at intel.com>
+- */
+-
+-#ifndef __MDFLD_DSI_OUTPUT_H__
+-#define __MDFLD_DSI_OUTPUT_H__
+-
+-#include <linux/backlight.h>
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include <drm/drm_crtc.h>
+-#include <drm/drm_edid.h>
+-
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "power.h"
+-#include "mdfld_output.h"
+-
+-#include <asm/mrst.h>
+-
+-
+-static inline struct mdfld_dsi_config *
+-	mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
+-{
+-	if (!connector)
+-		return NULL;
+-	return (struct mdfld_dsi_config *)connector->private;
+-}
+-
+-static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
+-{
+-	struct mdfld_dsi_connector *dsi_connector;
+-
+-	if (!config)
+-		return NULL;
+-
+-	dsi_connector = config->connector;
+-
+-	if (!dsi_connector)
+-		return NULL;
+-
+-	return dsi_connector->pkg_sender;
+-}
+-
+-static inline struct mdfld_dsi_config *
+-	mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
+-{
+-	if (!encoder)
+-		return NULL;
+-	return (struct mdfld_dsi_config *)encoder->private;
+-}
+-
+-static inline struct mdfld_dsi_connector *
+-	mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
+-{
+-	struct mdfld_dsi_config *config;
+-
+-	if (!encoder)
+-		return NULL;
+-
+-	config = mdfld_dsi_encoder_get_config(encoder);
+-	if (!config)
+-		return NULL;
+-
+-	return config->connector;
+-}
+-
+-static inline void *mdfld_dsi_encoder_get_pkg_sender(
+-	struct mdfld_dsi_encoder *encoder)
+-{
+-	struct mdfld_dsi_config *dsi_config;
+-
+-	dsi_config = mdfld_dsi_encoder_get_config(encoder);
+-	if (!dsi_config)
+-		return NULL;
+-
+-	return mdfld_dsi_get_pkg_sender(dsi_config);
+-}
+-
+-static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
+-{
+-	struct mdfld_dsi_connector *connector;
+-
+-	if (!encoder)
+-		return -1;
+-
+-	connector = mdfld_dsi_encoder_get_connector(encoder);
+-	if (!connector)
+-		return -1;
+-
+-	return connector->pipe;
+-}
+-
+-extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
+-				u32 gen_fifo_stat_reg, u32 fifo_stat);
+-extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
+-				int pipe);
+-extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
+-				int level);
+-extern void mdfld_dsi_output_init(struct drm_device *dev, int pipe,
+-				struct mdfld_dsi_config *config,
+-				struct panel_funcs *p_cmd_funcs,
+-				struct panel_funcs *p_vid_funcs);
+-extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
+-				int pipe);
+-extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
+-				u32 *mode,
+-				u8 transmission);
+-extern int mdfld_dsi_get_diagnostic_result(struct mdfld_dsi_config *dsi_config,
+-				u32 *result,
+-				u8 transmission);
+-extern int mdfld_dsi_panel_reset(int pipe);
+-
+-#endif /*__MDFLD_DSI_OUTPUT_H__*/
+diff --git a/drivers/staging/gma500/mdfld_dsi_pkg_sender.c b/drivers/staging/gma500/mdfld_dsi_pkg_sender.c
+deleted file mode 100644
+index 9b96a5c..0000000
+--- a/drivers/staging/gma500/mdfld_dsi_pkg_sender.c
++++ /dev/null
+@@ -1,1484 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Jackie Li<yaodong.li at intel.com>
+- */
+-
+-#include <linux/freezer.h>
+-
+-#include "mdfld_dsi_output.h"
+-#include "mdfld_dsi_pkg_sender.h"
+-#include "mdfld_dsi_dbi.h"
+-#include "mdfld_dsi_dpi.h"
+-
+-#define MDFLD_DSI_DBI_FIFO_TIMEOUT		100
+-#define MDFLD_DSI_MAX_RETURN_PACKET_SIZE	512
+-#define MDFLD_DSI_READ_MAX_COUNT		5000
+-
+-static const char * const dsi_errors[] = {
+-	"RX SOT Error",
+-	"RX SOT Sync Error",
+-	"RX EOT Sync Error",
+-	"RX Escape Mode Entry Error",
+-	"RX LP TX Sync Error",
+-	"RX HS Receive Timeout Error",
+-	"RX False Control Error",
+-	"RX ECC Single Bit Error",
+-	"RX ECC Multibit Error",
+-	"RX Checksum Error",
+-	"RX DSI Data Type Not Recognised",
+-	"RX DSI VC ID Invalid",
+-	"TX False Control Error",
+-	"TX ECC Single Bit Error",
+-	"TX ECC Multibit Error",
+-	"TX Checksum Error",
+-	"TX DSI Data Type Not Recognised",
+-	"TX DSI VC ID invalid",
+-	"High Contention",
+-	"Low contention",
+-	"DPI FIFO Under run",
+-	"HS TX Timeout",
+-	"LP RX Timeout",
+-	"Turn Around ACK Timeout",
+-	"ACK With No Error",
+-	"RX Invalid TX Length",
+-	"RX Prot Violation",
+-	"HS Generic Write FIFO Full",
+-	"LP Generic Write FIFO Full",
+-	"Generic Read Data Avail",
+-	"Special Packet Sent",
+-	"Tearing Effect",
+-};
+-
+-static int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
+-								u32 mask)
+-{
+-	struct drm_device *dev = sender->dev;
+-	u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
+-	int retry = 0xffff;
+-
+-	while (retry--) {
+-		if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
+-			return 0;
+-		udelay(100);
+-	}
+-	dev_err(dev->dev, "fifo is NOT empty 0x%08x\n",
+-					REG_READ(gen_fifo_stat_reg));
+-	return -EIO;
+-}
+-
+-static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+-{
+-	return wait_for_gen_fifo_empty(sender, (1 << 2) | (1 << 10) | (1 << 18)
+-		| (1 << 26) | (1 << 27) | (1 << 28));
+-}
+-
+-static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+-{
+-	return wait_for_gen_fifo_empty(sender, (1 << 10) | (1 << 26));
+-}
+-
+-static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
+-{
+-	return wait_for_gen_fifo_empty(sender, (1 << 2) | (1 << 18));
+-}
+-
+-static int wait_for_dbi_fifo_empty(struct mdfld_dsi_pkg_sender *sender)
+-{
+-	return wait_for_gen_fifo_empty(sender, (1 << 27));
+-}
+-
+-static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
+-{
+-	u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+-	struct drm_device *dev = sender->dev;
+-
+-	switch (mask) {
+-	case (1 << 0):
+-	case (1 << 1):
+-	case (1 << 2):
+-	case (1 << 3):
+-	case (1 << 4):
+-	case (1 << 5):
+-	case (1 << 6):
+-	case (1 << 7):
+-	case (1 << 8):
+-	case (1 << 9):
+-	case (1 << 10):
+-	case (1 << 11):
+-	case (1 << 12):
+-	case (1 << 13):
+-		break;
+-	case (1 << 14):
+-		/*wait for all fifo empty*/
+-		/*wait_for_all_fifos_empty(sender)*/;
+-		break;
+-	case (1 << 15):
+-		break;
+-	case (1 << 16):
+-		break;
+-	case (1 << 17):
+-		break;
+-	case (1 << 18):
+-	case (1 << 19):
+-		/*wait for contention recovery time*/
+-		/*mdelay(10);*/
+-		/*wait for all fifo empty*/
+-		if (0)
+-			wait_for_all_fifos_empty(sender);
+-		break;
+-	case (1 << 20):
+-		break;
+-	case (1 << 21):
+-		/*wait for all fifo empty*/
+-		/*wait_for_all_fifos_empty(sender);*/
+-		break;
+-	case (1 << 22):
+-		break;
+-	case (1 << 23):
+-	case (1 << 24):
+-	case (1 << 25):
+-	case (1 << 26):
+-	case (1 << 27):
+-		/* HS Gen fifo full */
+-		REG_WRITE(intr_stat_reg, mask);
+-		wait_for_hs_fifos_empty(sender);
+-		break;
+-	case (1 << 28):
+-		/* LP Gen fifo full\n */
+-		REG_WRITE(intr_stat_reg, mask);
+-		wait_for_lp_fifos_empty(sender);
+-		break;
+-	case (1 << 29):
+-	case (1 << 30):
+-	case (1 << 31):
+-		break;
+-	}
+-
+-	if (mask & REG_READ(intr_stat_reg))
+-		dev_warn(dev->dev, "Cannot clean interrupt 0x%08x\n", mask);
+-
+-	return 0;
+-}
+-
+-static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
+-{
+-	struct drm_device *dev = sender->dev;
+-	u32 intr_stat_reg = sender->mipi_intr_stat_reg;
+-	u32 mask;
+-	u32 intr_stat;
+-	int i;
+-	int err = 0;
+-
+-	intr_stat = REG_READ(intr_stat_reg);
+-
+-	for (i = 0; i < 32; i++) {
+-		mask = (0x00000001UL) << i;
+-		if (intr_stat & mask) {
+-			dev_dbg(dev->dev, "[DSI]: %s\n", dsi_errors[i]);
+-			err = handle_dsi_error(sender, mask);
+-			if (err)
+-				dev_err(dev->dev, "Cannot handle error\n");
+-		}
+-	}
+-	return err;
+-}
+-
+-static inline int dbi_cmd_sent(struct mdfld_dsi_pkg_sender *sender)
+-{
+-	struct drm_device *dev = sender->dev;
+-	u32 retry = 0xffff;
+-	u32 dbi_cmd_addr_reg = sender->mipi_cmd_addr_reg;
+-
+-	/* Query the command execution status */
+-	while (retry--) {
+-		if (!(REG_READ(dbi_cmd_addr_reg) & (1 << 0)))
+-			break;
+-	}
+-
+-	if (!retry) {
+-		dev_err(dev->dev, "Timeout waiting for DBI Command status\n");
+-		return -EAGAIN;
+-	}
+-	return 0;
+-}
+-
+-/*
+- * NOTE: this interface is abandoned expect for write_mem_start DCS
+- * other DCS are sent via generic pkg interfaces
+- */
+-static int send_dcs_pkg(struct mdfld_dsi_pkg_sender *sender,
+-			struct mdfld_dsi_pkg *pkg)
+-{
+-	struct drm_device *dev = sender->dev;
+-	struct mdfld_dsi_dcs_pkg *dcs_pkg = &pkg->pkg.dcs_pkg;
+-	u32 dbi_cmd_len_reg = sender->mipi_cmd_len_reg;
+-	u32 dbi_cmd_addr_reg = sender->mipi_cmd_addr_reg;
+-	u32 cb_phy = sender->dbi_cb_phy;
+-	u32 index = 0;
+-	u8 *cb = (u8 *)sender->dbi_cb_addr;
+-	int i;
+-	int ret;
+-
+-	if (!sender->dbi_pkg_support) {
+-		dev_err(dev->dev, "Trying to send DCS on a non DBI output, abort!\n");
+-		return -ENOTSUPP;
+-	}
+-
+-	/*wait for DBI fifo empty*/
+-	wait_for_dbi_fifo_empty(sender);
+-
+-	*(cb + (index++)) = dcs_pkg->cmd;
+-	if (dcs_pkg->param_num) {
+-		for (i = 0; i < dcs_pkg->param_num; i++)
+-			*(cb + (index++)) = *(dcs_pkg->param + i);
+-	}
+-
+-	REG_WRITE(dbi_cmd_len_reg, (1 + dcs_pkg->param_num));
+-	REG_WRITE(dbi_cmd_addr_reg,
+-		(cb_phy << CMD_MEM_ADDR_OFFSET)
+-		| (1 << 0)
+-		| ((dcs_pkg->data_src == CMD_DATA_SRC_PIPE) ? (1 << 1) : 0));
+-
+-	ret = dbi_cmd_sent(sender);
+-	if (ret) {
+-		dev_err(dev->dev, "command 0x%x not complete\n", dcs_pkg->cmd);
+-		return -EAGAIN;
+-	}
+-	return 0;
+-}
+-
+-static int __send_short_pkg(struct mdfld_dsi_pkg_sender *sender,
+-				struct mdfld_dsi_pkg *pkg)
+-{
+-	struct drm_device *dev = sender->dev;
+-	u32 hs_gen_ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+-	u32 lp_gen_ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+-	u32 gen_ctrl_val = 0;
+-	struct mdfld_dsi_gen_short_pkg *short_pkg = &pkg->pkg.short_pkg;
+-
+-	gen_ctrl_val |= short_pkg->cmd << MCS_COMMANDS_POS;
+-	gen_ctrl_val |= 0 << DCS_CHANNEL_NUMBER_POS;
+-	gen_ctrl_val |= pkg->pkg_type;
+-	gen_ctrl_val |= short_pkg->param << MCS_PARAMETER_POS;
+-
+-	if (pkg->transmission_type == MDFLD_DSI_HS_TRANSMISSION) {
+-		/* wait for hs fifo empty */
+-		/* wait_for_hs_fifos_empty(sender); */
+-		/* Send pkg */
+-		REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
+-	} else if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION) {
+-		/* wait_for_lp_fifos_empty(sender); */
+-		/* Send pkg*/
+-		REG_WRITE(lp_gen_ctrl_reg, gen_ctrl_val);
+-	} else {
+-		dev_err(dev->dev, "Unknown transmission type %d\n",
+-							pkg->transmission_type);
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-}
+-
+-static int __send_long_pkg(struct mdfld_dsi_pkg_sender *sender,
+-				struct mdfld_dsi_pkg *pkg)
+-{
+-	struct drm_device *dev = sender->dev;
+-	u32 hs_gen_ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
+-	u32 hs_gen_data_reg = sender->mipi_hs_gen_data_reg;
+-	u32 lp_gen_ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
+-	u32 lp_gen_data_reg = sender->mipi_lp_gen_data_reg;
+-	u32 gen_ctrl_val = 0;
+-	u32 *dp;
+-	int i;
+-	struct mdfld_dsi_gen_long_pkg *long_pkg = &pkg->pkg.long_pkg;
+-
+-	dp = long_pkg->data;
+-
+-	/*
+-	 * Set up word count for long pkg
+-	 * FIXME: double check word count field.
+-	 * currently, using the byte counts of the payload as the word count.
+-	 * ------------------------------------------------------------
+-	 * | DI |   WC   | ECC|         PAYLOAD              |CHECKSUM|
+-	 * ------------------------------------------------------------
+-	 */
+-	gen_ctrl_val |= (long_pkg->len << 2) << WORD_COUNTS_POS;
+-	gen_ctrl_val |= 0 << DCS_CHANNEL_NUMBER_POS;
+-	gen_ctrl_val |= pkg->pkg_type;
+-
+-	if (pkg->transmission_type == MDFLD_DSI_HS_TRANSMISSION) {
+-		/* Wait for hs ctrl and data fifos to be empty */
+-		/* wait_for_hs_fifos_empty(sender); */
+-		for (i = 0; i < long_pkg->len; i++)
+-			REG_WRITE(hs_gen_data_reg, *(dp + i));
+-		REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
+-	} else if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION) {
+-		/* wait_for_lp_fifos_empty(sender); */
+-		for (i = 0; i < long_pkg->len; i++)
+-			REG_WRITE(lp_gen_data_reg, *(dp + i));
+-		REG_WRITE(lp_gen_ctrl_reg, gen_ctrl_val);
+-	} else {
+-		dev_err(dev->dev, "Unknown transmission type %d\n",
+-						pkg->transmission_type);
+-		return -EINVAL;
+-	}
+-
+-	return 0;
+-
+-}
+-
+-static int send_mcs_short_pkg(struct mdfld_dsi_pkg_sender *sender,
+-				struct mdfld_dsi_pkg *pkg)
+-{
+-	return __send_short_pkg(sender, pkg);
+-}
+-
+-static int send_mcs_long_pkg(struct mdfld_dsi_pkg_sender *sender,
+-				struct mdfld_dsi_pkg *pkg)
+-{
+-	return __send_long_pkg(sender, pkg);
+-}
+-
+-static int send_gen_short_pkg(struct mdfld_dsi_pkg_sender *sender,
+-				struct mdfld_dsi_pkg *pkg)
+-{
+-	return __send_short_pkg(sender, pkg);
+-}
+-
+-static int send_gen_long_pkg(struct mdfld_dsi_pkg_sender *sender,
+-				struct mdfld_dsi_pkg *pkg)
+-{
+-	return __send_long_pkg(sender, pkg);
+-}
+-
+-static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender,
+-				struct mdfld_dsi_pkg *pkg)
+-{
+-	u8 cmd;
+-	u8 *data;
+-
+-	switch (pkg->pkg_type) {
+-	case MDFLD_DSI_PKG_DCS:
+-		cmd = pkg->pkg.dcs_pkg.cmd;
+-		break;
+-	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
+-	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
+-		cmd = pkg->pkg.short_pkg.cmd;
+-		break;
+-	case MDFLD_DSI_PKG_MCS_LONG_WRITE:
+-		data = (u8 *)pkg->pkg.long_pkg.data;
+-		cmd = *data;
+-		break;
+-	default:
+-		return 0;
+-	}
+-
+-	/* This prevents other package sending while doing msleep */
+-	sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
+-
+-	/* Check panel mode v.s. sending command */
+-	if ((sender->panel_mode & MDFLD_DSI_PANEL_MODE_SLEEP) &&
+-		cmd != exit_sleep_mode) {
+-		dev_err(sender->dev->dev,
+-				"sending 0x%x when panel sleep in\n", cmd);
+-		sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+-		return -EINVAL;
+-	}
+-
+-	/* Wait for 120 milliseconds in case exit_sleep_mode just be sent */
+-	if (cmd == DCS_ENTER_SLEEP_MODE) {
+- 		/*TODO: replace it with msleep later*/
+-		mdelay(120);
+-	}
+-	return 0;
+-}
+-
+-static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender,
+-				struct mdfld_dsi_pkg *pkg)
+-{
+-	u8 cmd;
+-	u8 *data;
+-
+-	switch (pkg->pkg_type) {
+-	case MDFLD_DSI_PKG_DCS:
+-		cmd = pkg->pkg.dcs_pkg.cmd;
+-		break;
+-	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
+-	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
+-		cmd = pkg->pkg.short_pkg.cmd;
+-		break;
+-	case MDFLD_DSI_PKG_MCS_LONG_WRITE:
+-		data = (u8 *)pkg->pkg.long_pkg.data;
+-		cmd = *data;
+-		break;
+-	default:
+-		return 0;
+-	}
+-
+-	/* Update panel status */
+-	if (cmd == DCS_ENTER_SLEEP_MODE) {
+-		sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
+-		/*TODO: replace it with msleep later*/
+-		mdelay(120);
+-	} else if (cmd == DCS_EXIT_SLEEP_MODE) {
+-		sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
+-		/*TODO: replace it with msleep later*/
+-		mdelay(120);
+-	} else if (unlikely(cmd == DCS_SOFT_RESET)) {
+-		/*TODO: replace it with msleep later*/
+-		mdelay(5);
+- 	}
+-	sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+-	return 0;
+-
+-}
+-
+-static int do_send_pkg(struct mdfld_dsi_pkg_sender *sender,
+-			struct mdfld_dsi_pkg *pkg)
+-{
+-	int ret;
+-
+-	if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
+-		dev_err(sender->dev->dev, "sender is busy\n");
+-		return -EAGAIN;
+-	}
+-
+-	ret = send_pkg_prepare(sender, pkg);
+-	if (ret) {
+-		dev_err(sender->dev->dev, "send_pkg_prepare error\n");
+-		return ret;
+-	}
+-
+-	switch (pkg->pkg_type) {
+-	case MDFLD_DSI_PKG_DCS:
+-		ret = send_dcs_pkg(sender, pkg);
+-		break;
+-	case MDFLD_DSI_PKG_GEN_SHORT_WRITE_0:
+-	case MDFLD_DSI_PKG_GEN_SHORT_WRITE_1:
+-	case MDFLD_DSI_PKG_GEN_SHORT_WRITE_2:
+-	case MDFLD_DSI_PKG_GEN_READ_0:
+-	case MDFLD_DSI_PKG_GEN_READ_1:
+-	case MDFLD_DSI_PKG_GEN_READ_2:
+-		ret = send_gen_short_pkg(sender, pkg);
+-		break;
+-	case MDFLD_DSI_PKG_GEN_LONG_WRITE:
+-		ret = send_gen_long_pkg(sender, pkg);
+-		break;
+-	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
+-	case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
+-	case MDFLD_DSI_PKG_MCS_READ:
+-		ret = send_mcs_short_pkg(sender, pkg);
+-		break;
+-	case MDFLD_DSI_PKG_MCS_LONG_WRITE:
+-		ret = send_mcs_long_pkg(sender, pkg);
+-		break;
+-	default:
+-		dev_err(sender->dev->dev, "Invalid pkg type 0x%x\n",
+-							pkg->pkg_type);
+-		ret = -EINVAL;
+-	}
+-	send_pkg_done(sender, pkg);
+-	return ret;
+-}
+-
+-static int send_pkg(struct mdfld_dsi_pkg_sender *sender,
+-			struct mdfld_dsi_pkg *pkg)
+-{
+-	int err ;
+-
+-	/* Handle DSI error */
+-	err = dsi_error_handler(sender);
+-	if (err) {
+-		dev_err(sender->dev->dev, "Error handling failed\n");
+-		err = -EAGAIN;
+-		goto send_pkg_err;
+-	}
+-
+-	/* Send pkg */
+-	err = do_send_pkg(sender, pkg);
+-	if (err) {
+-		dev_err(sender->dev->dev, "sent pkg failed\n");
+-		err = -EAGAIN;
+-		goto send_pkg_err;
+-	}
+-
+-	/* FIXME: should I query complete and fifo empty here? */
+-send_pkg_err:
+-	return err;
+-}
+-
+-static struct mdfld_dsi_pkg *pkg_sender_get_pkg_locked(
+-					struct mdfld_dsi_pkg_sender *sender)
+-{
+-	struct mdfld_dsi_pkg *pkg;
+-
+-	if (list_empty(&sender->free_list)) {
+-		dev_err(sender->dev->dev, "No free pkg left\n");
+-		return NULL;
+-	}
+-	pkg = list_first_entry(&sender->free_list, struct mdfld_dsi_pkg, entry);
+-	/* Detach from free list */
+-	list_del_init(&pkg->entry);
+-	return pkg;
+-}
+-
+-static void pkg_sender_put_pkg_locked(struct mdfld_dsi_pkg_sender *sender,
+-					struct mdfld_dsi_pkg *pkg)
+-{
+-	memset(pkg, 0, sizeof(struct mdfld_dsi_pkg));
+-	INIT_LIST_HEAD(&pkg->entry);
+-	list_add_tail(&pkg->entry, &sender->free_list);
+-}
+-
+-static int mdfld_dbi_cb_init(struct mdfld_dsi_pkg_sender *sender,
+-					struct psb_gtt *pg, int pipe)
+-{
+-	unsigned long phys;
+-	void *virt_addr = NULL;
+-
+-	switch (pipe) {
+-	case 0:
+-		/* FIXME: Doesn't this collide with stolen space ? */
+-		phys = pg->gtt_phys_start - 0x1000;
+-		break;
+-	case 2:
+-		phys = pg->gtt_phys_start - 0x800;
+-		break;
+-	default:
+-		dev_err(sender->dev->dev, "Unsupported channel %d\n", pipe);
+-		return -EINVAL;
+-	}
+-
+-	virt_addr = ioremap_nocache(phys, 0x800);
+-	if (!virt_addr) {
+-		dev_err(sender->dev->dev, "Map DBI command buffer error\n");
+-		return -ENOMEM;
+-	}
+-	sender->dbi_cb_phy = phys;
+-	sender->dbi_cb_addr = virt_addr;
+-	return 0;
+-}
+-
+-static void mdfld_dbi_cb_destroy(struct mdfld_dsi_pkg_sender *sender)
+-{
+-	if (sender && sender->dbi_cb_addr)
+-		iounmap(sender->dbi_cb_addr);
+-}
+-
+-static void pkg_sender_queue_pkg(struct mdfld_dsi_pkg_sender *sender,
+-					struct mdfld_dsi_pkg *pkg,
+-					int delay)
+-{
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&sender->lock, flags);
+-
+-	if (!delay) {
+-		send_pkg(sender, pkg);
+-		pkg_sender_put_pkg_locked(sender, pkg);
+-	} else {
+-		/* Queue it */
+-		list_add_tail(&pkg->entry, &sender->pkg_list);
+-	}
+-	spin_unlock_irqrestore(&sender->lock, flags);
+-}
+-
+-static void process_pkg_list(struct mdfld_dsi_pkg_sender *sender)
+-{
+-	struct mdfld_dsi_pkg *pkg;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&sender->lock, flags);
+-
+-	while (!list_empty(&sender->pkg_list)) {
+-		pkg = list_first_entry(&sender->pkg_list,
+-					struct mdfld_dsi_pkg, entry);
+-		send_pkg(sender, pkg);
+-		list_del_init(&pkg->entry);
+-		pkg_sender_put_pkg_locked(sender, pkg);
+-	}
+-
+-	spin_unlock_irqrestore(&sender->lock, flags);
+-}
+-
+-static int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender,
+-	u32 *data, u32 len, u8 transmission, int delay)
+-{
+-	struct mdfld_dsi_pkg *pkg;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&sender->lock, flags);
+-	pkg = pkg_sender_get_pkg_locked(sender);
+-	spin_unlock_irqrestore(&sender->lock, flags);
+-
+-	if (!pkg) {
+-		dev_err(sender->dev->dev, "No memory\n");
+-		return -ENOMEM;
+-	}
+-	pkg->pkg_type = MDFLD_DSI_PKG_MCS_LONG_WRITE;
+-	pkg->transmission_type = transmission;
+-	pkg->pkg.long_pkg.data = data;
+-	pkg->pkg.long_pkg.len = len;
+-	INIT_LIST_HEAD(&pkg->entry);
+-
+-	pkg_sender_queue_pkg(sender, pkg, delay);
+-	return 0;
+-}
+-
+-static int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender,
+-					u8 cmd, u8 param, u8 param_num,
+-					u8 transmission,
+-					int delay)
+-{
+-	struct mdfld_dsi_pkg *pkg;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&sender->lock, flags);
+-	pkg = pkg_sender_get_pkg_locked(sender);
+-	spin_unlock_irqrestore(&sender->lock, flags);
+-
+-	if (!pkg) {
+-		dev_err(sender->dev->dev, "No memory\n");
+-		return -ENOMEM;
+-	}
+-
+-	if (param_num) {
+-		pkg->pkg_type = MDFLD_DSI_PKG_MCS_SHORT_WRITE_1;
+-		pkg->pkg.short_pkg.param = param;
+-	} else {
+-		pkg->pkg_type = MDFLD_DSI_PKG_MCS_SHORT_WRITE_0;
+-		pkg->pkg.short_pkg.param = 0;
+-	}
+-	pkg->transmission_type = transmission;
+-	pkg->pkg.short_pkg.cmd = cmd;
+-	INIT_LIST_HEAD(&pkg->entry);
+-
+-	pkg_sender_queue_pkg(sender, pkg, delay);
+-	return 0;
+-}
+-
+-static int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender,
+-					u8 param0, u8 param1, u8 param_num,
+-					u8 transmission,
+-					int delay)
+-{
+-	struct mdfld_dsi_pkg *pkg;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&sender->lock, flags);
+-	pkg = pkg_sender_get_pkg_locked(sender);
+-	spin_unlock_irqrestore(&sender->lock, flags);
+-
+-	if (!pkg) {
+-		dev_err(sender->dev->dev, "No pkg memory\n");
+-		return -ENOMEM;
+-	}
+-
+-	switch (param_num) {
+-	case 0:
+-		pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_0;
+-		pkg->pkg.short_pkg.cmd = 0;
+-		pkg->pkg.short_pkg.param = 0;
+-		break;
+-	case 1:
+-		pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_1;
+-		pkg->pkg.short_pkg.cmd = param0;
+-		pkg->pkg.short_pkg.param = 0;
+-		break;
+-	case 2:
+-		pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_2;
+-		pkg->pkg.short_pkg.cmd = param0;
+-		pkg->pkg.short_pkg.param = param1;
+-		break;
+-	}
+-
+-	pkg->transmission_type = transmission;
+-	INIT_LIST_HEAD(&pkg->entry);
+-
+-	pkg_sender_queue_pkg(sender, pkg, delay);
+-	return 0;
+-}
+-
+-static int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender,
+-				u32 *data, u32 len, u8 transmission, int delay)
+-{
+-	struct mdfld_dsi_pkg *pkg;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&sender->lock, flags);
+-	pkg = pkg_sender_get_pkg_locked(sender);
+-	spin_unlock_irqrestore(&sender->lock, flags);
+-
+-	if (!pkg) {
+-		dev_err(sender->dev->dev, "No pkg memory\n");
+-		return -ENOMEM;
+-	}
+-
+-	pkg->pkg_type = MDFLD_DSI_PKG_GEN_LONG_WRITE;
+-	pkg->transmission_type = transmission;
+-	pkg->pkg.long_pkg.data = data;
+-	pkg->pkg.long_pkg.len = len;
+-
+-	INIT_LIST_HEAD(&pkg->entry);
+-
+-	pkg_sender_queue_pkg(sender, pkg, delay);
+-
+-	return 0;
+-}
+-
+-static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender,
+-				struct mdfld_dsi_pkg *pkg,
+-				u32 *data,
+-				u16 len)
+-{
+-	unsigned long flags;
+-	struct drm_device *dev = sender->dev;
+-	int i;
+-	u32 gen_data_reg;
+-	int retry = MDFLD_DSI_READ_MAX_COUNT;
+-	u8 transmission = pkg->transmission_type;
+-
+-	/*
+-	 * do reading.
+-	 * 0) send out generic read request
+-	 * 1) polling read data avail interrupt
+-	 * 2) read data
+-	 */
+-	spin_lock_irqsave(&sender->lock, flags);
+-
+-	REG_WRITE(sender->mipi_intr_stat_reg, 1 << 29);
+-
+-	if ((REG_READ(sender->mipi_intr_stat_reg) & (1 << 29)))
+-		DRM_ERROR("Can NOT clean read data valid interrupt\n");
+-
+-	/*send out read request*/
+-	send_pkg(sender, pkg);
+-
+-	pkg_sender_put_pkg_locked(sender, pkg);
+-
+-	/*polling read data avail interrupt*/
+-	while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & (1 << 29))) {
+-		udelay(100);
+-		retry--;
+-	}
+-
+-	if (!retry) {
+-		spin_unlock_irqrestore(&sender->lock, flags);
+-		return -ETIMEDOUT;
+-	}
+-
+-	REG_WRITE(sender->mipi_intr_stat_reg, (1 << 29));
+-
+-	/*read data*/
+-	if (transmission == MDFLD_DSI_HS_TRANSMISSION)
+-		gen_data_reg = sender->mipi_hs_gen_data_reg;
+-	else if (transmission == MDFLD_DSI_LP_TRANSMISSION)
+-		gen_data_reg = sender->mipi_lp_gen_data_reg;
+-	else {
+-		DRM_ERROR("Unknown transmission");
+-		spin_unlock_irqrestore(&sender->lock, flags);
+-		return -EINVAL;
+-	}
+-
+-	for (i=0; i<len; i++)
+-		*(data + i) = REG_READ(gen_data_reg);
+-
+- 	spin_unlock_irqrestore(&sender->lock, flags);
+- 
+-	return 0;
+-}
+-
+-static int mdfld_dsi_read_gen(struct mdfld_dsi_pkg_sender *sender,
+-				u8 param0,
+-				u8 param1,
+-				u8 param_num,
+-				u32 *data,
+-				u16 len,
+-				u8 transmission)
+-{
+-	struct mdfld_dsi_pkg *pkg;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&sender->lock, flags);
+-
+-	pkg = pkg_sender_get_pkg_locked(sender);
+-
+-	spin_unlock_irqrestore(&sender->lock,flags);
+-
+-	if (!pkg) {
+-		dev_err(sender->dev->dev, "No pkg memory\n");
+-		return -ENOMEM;
+-	}
+-
+-	switch (param_num) {
+-	case 0:
+-		pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_0;
+-		pkg->pkg.short_pkg.cmd = 0;
+-		pkg->pkg.short_pkg.param = 0;
+-		break;
+-	case 1:
+-		pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_1;
+-		pkg->pkg.short_pkg.cmd = param0;
+-		pkg->pkg.short_pkg.param = 0;
+-		break;
+- 	case 2:
+-		pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_2;
+-		pkg->pkg.short_pkg.cmd = param0;
+-		pkg->pkg.short_pkg.param = param1;
+-		break;
+-	}
+-
+-	pkg->transmission_type = transmission;
+-
+-	INIT_LIST_HEAD(&pkg->entry);
+-
+-	return __read_panel_data(sender, pkg, data, len);
+-}
+- 
+-static int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender,
+-				u8 cmd,
+-				u32 *data,
+-				u16 len,
+-				u8 transmission)
+-{
+-	struct mdfld_dsi_pkg *pkg;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&sender->lock, flags);
+-
+-	pkg = pkg_sender_get_pkg_locked(sender);
+-
+- 	spin_unlock_irqrestore(&sender->lock, flags);
+- 
+- 	if (!pkg) {
+-		dev_err(sender->dev->dev, "No pkg memory\n");
+- 		return -ENOMEM;
+-	}
+-
+-	pkg->pkg_type = MDFLD_DSI_PKG_MCS_READ;
+-	pkg->pkg.short_pkg.cmd = cmd;
+-	pkg->pkg.short_pkg.param = 0;
+-
+-	pkg->transmission_type = transmission;
+- 
+-	INIT_LIST_HEAD(&pkg->entry);
+-
+-	return __read_panel_data(sender, pkg, data, len);
+-}
+-
+-void dsi_controller_dbi_init(struct mdfld_dsi_config * dsi_config, int pipe)
+-{
+-	struct drm_device * dev = dsi_config->dev;
+-	u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+-	int lane_count = dsi_config->lane_count;
+-	u32 val = 0;
+-
+-	/*un-ready device*/
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+-
+-	/*init dsi adapter before kicking off*/
+-	REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+-
+-	/*TODO: figure out how to setup these registers*/
+-	REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
+-	REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), 0x000a0014);
+-	REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
+-	REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000001);
+-	REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
+-
+-	/*enable all interrupts*/
+-	REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+-	/*max value: 20 clock cycles of txclkesc*/
+-	REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
+-	/*min 21 txclkesc, max: ffffh*/
+-	REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
+-	/*min: 7d0 max: 4e20*/
+-	REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
+-
+-	/*set up max return packet size*/
+-	REG_WRITE((MIPIA_MAX_RETURN_PACK_SIZE_REG + reg_offset),
+-			MDFLD_DSI_MAX_RETURN_PACKET_SIZE);
+-
+-	/*set up func_prg*/
+-	val |= lane_count;
+-	val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
+-	val |= DSI_DBI_COLOR_FORMAT_OPTION2;
+-	REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+-
+-	REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
+-	REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
+-
+-	REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+-	REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
+-	REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+-}
+-
+-void dsi_controller_dpi_init(struct mdfld_dsi_config * dsi_config, int pipe)
+-{
+-	struct drm_device * dev = dsi_config->dev;
+-	u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+-	int lane_count = dsi_config->lane_count;
+-	struct mdfld_dsi_dpi_timing dpi_timing;
+-	struct drm_display_mode * mode = dsi_config->mode;
+-	u32 val = 0;
+-
+-	/*un-ready device*/
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+-
+-	/*init dsi adapter before kicking off*/
+-	REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+-
+-	/*enable all interrupts*/
+-	REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+-
+-	/*set up func_prg*/
+-	val |= lane_count;
+-	val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
+-
+-	switch(dsi_config->bpp) {
+-	case 16:
+-		val |= DSI_DPI_COLOR_FORMAT_RGB565;
+-		break;
+-	case 18:
+-		val |= DSI_DPI_COLOR_FORMAT_RGB666;
+-		break;
+-	case 24:
+-		val |= DSI_DPI_COLOR_FORMAT_RGB888;
+-		break;
+-	default:
+-		DRM_ERROR("unsupported color format, bpp = %d\n", dsi_config->bpp);
+-	}
+-
+-	REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+-
+-	REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset),
+-			(mode->vtotal * mode->htotal * dsi_config->bpp / (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
+-	REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff & DSI_LP_RX_TIMEOUT_MASK);
+-
+-	/*max value: 20 clock cycles of txclkesc*/
+-	REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
+-
+-	/*min 21 txclkesc, max: ffffh*/
+-	REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0xffff & DSI_RESET_TIMER_MASK);
+-
+-	REG_WRITE((MIPIA_DPI_RESOLUTION_REG + reg_offset), mode->vdisplay << 16 | mode->hdisplay);
+-
+-	/*set DPI timing registers*/
+-	mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp);
+-
+-	REG_WRITE((MIPIA_HSYNC_COUNT_REG + reg_offset), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_HBP_COUNT_REG + reg_offset), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_HFP_COUNT_REG + reg_offset), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_HACTIVE_COUNT_REG + reg_offset), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_VSYNC_COUNT_REG + reg_offset), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_VBP_COUNT_REG + reg_offset), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
+-	REG_WRITE((MIPIA_VFP_COUNT_REG + reg_offset), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
+-
+-	REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+-
+-	/*min: 7d0 max: 4e20*/
+-	REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x000007d0);
+-
+-	/*set up video mode*/
+-	val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
+-	REG_WRITE((MIPIA_VIDEO_MODE_FORMAT_REG + reg_offset), val);
+-
+-	REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
+-
+-	REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+-
+-	/*TODO: figure out how to setup these registers*/
+-	REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
+-
+-	REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), (0xa << 16) | 0x14);
+-
+-	/*set device ready*/
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+-}
+-
+-static void dsi_controller_init(struct mdfld_dsi_config * dsi_config, int pipe)
+-{
+-	if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
+-		DRM_ERROR("Invalid parameters\n");
+-		return;
+-	}
+-
+-	if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
+-		dsi_controller_dpi_init(dsi_config, pipe);
+-	else if (dsi_config->type == MDFLD_DSI_ENCODER_DBI)
+-		dsi_controller_dbi_init(dsi_config, pipe);
+-	else
+-		DRM_ERROR("Bad DSI encoder type\n");
+-}
+-
+-void mdfld_dsi_cmds_kick_out(struct mdfld_dsi_pkg_sender *sender)
+-{
+-	process_pkg_list(sender);
+-}
+-
+-int mdfld_dsi_send_dcs(struct mdfld_dsi_pkg_sender *sender,
+-			u8 dcs, u8 *param, u32 param_num, u8 data_src,
+-			int delay)
+-{
+-	struct mdfld_dsi_pkg *pkg;
+-	u32 cb_phy = sender->dbi_cb_phy;
+-	struct drm_device *dev = sender->dev;
+-	u32 index = 0;
+-	u8 *cb = (u8 *)sender->dbi_cb_addr;
+-	unsigned long flags;
+-	int retry;
+-	u8 *dst = NULL;
+-	u32 len;
+-
+-	if (!sender) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-
+-	if (!sender->dbi_pkg_support) {
+-		dev_err(dev->dev, "No DBI pkg sending on this sender\n");
+-		return -ENOTSUPP;
+-	}
+-
+-	if (param_num > MDFLD_MAX_DCS_PARAM) {
+-		dev_err(dev->dev, "Sender only supports up to %d DCS params\n",
+-							MDFLD_MAX_DCS_PARAM);
+-		return -EINVAL;
+-	}
+-
+-	/*
+-	 * If dcs is write_mem_start, send it directly using DSI adapter
+-	 * interface
+-	 */
+-	if (dcs == DCS_WRITE_MEM_START) {
+-		if (!spin_trylock(&sender->lock))
+-			return -EAGAIN;
+-
+-		/*
+-		 * query whether DBI FIFO is empty,
+-		 * if not wait it becoming empty
+-		 */
+-		retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
+-		while (retry &&
+-		    !(REG_READ(sender->mipi_gen_fifo_stat_reg) & (1 << 27))) {
+-			udelay(500);
+-			retry--;
+-		}
+-
+-		/* If DBI FIFO timeout, drop this frame */
+-		if (!retry) {
+-			spin_unlock(&sender->lock);
+-			return 0;
+-		}
+-
+-		*(cb + (index++)) = write_mem_start;
+-
+-		REG_WRITE(sender->mipi_cmd_len_reg, 1);
+-		REG_WRITE(sender->mipi_cmd_addr_reg,
+-					cb_phy | (1 << 0) | (1 << 1));
+-
+-		retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
+-		while (retry &&
+-			(REG_READ(sender->mipi_cmd_addr_reg) & (1 << 0))) {
+-			udelay(1);
+-			retry--;
+-		}
+-
+-		spin_unlock(&sender->lock);
+-		return 0;
+-	}
+-
+-	/* Get a free pkg */
+-	spin_lock_irqsave(&sender->lock, flags);
+-	pkg = pkg_sender_get_pkg_locked(sender);
+-	spin_unlock_irqrestore(&sender->lock, flags);
+-
+-	if (!pkg) {
+-		dev_err(dev->dev, "No packages memory\n");
+-		return -ENOMEM;
+-	}
+-
+-	dst = pkg->pkg.dcs_pkg.param;
+-	memcpy(dst, param, param_num);
+-
+-	pkg->pkg_type = MDFLD_DSI_PKG_DCS;
+-	pkg->transmission_type = MDFLD_DSI_DCS;
+-	pkg->pkg.dcs_pkg.cmd = dcs;
+-	pkg->pkg.dcs_pkg.param_num = param_num;
+-	pkg->pkg.dcs_pkg.data_src = data_src;
+-
+-	INIT_LIST_HEAD(&pkg->entry);
+-
+-	if (param_num == 0)
+-		return mdfld_dsi_send_mcs_short_hs(sender, dcs, 0, 0, delay);
+-	else if (param_num == 1)
+-		return mdfld_dsi_send_mcs_short_hs(sender, dcs,
+-							param[0], 1, delay);
+-	else if (param_num > 1) {
+-		len = (param_num + 1) / 4;
+-		if ((param_num + 1) % 4)
+-			len++;
+-		return mdfld_dsi_send_mcs_long_hs(sender,
+-				(u32 *)&pkg->pkg.dcs_pkg, len, delay);
+-	}
+-	return 0;
+-}
+-
+-int mdfld_dsi_send_mcs_short_hs(struct mdfld_dsi_pkg_sender *sender,
+-				u8 cmd, u8 param, u8 param_num, int delay)
+-{
+-	if (!sender) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-	return mdfld_dsi_send_mcs_short(sender, cmd, param, param_num,
+-					MDFLD_DSI_HS_TRANSMISSION, delay);
+-}
+-
+-int mdfld_dsi_send_mcs_short_lp(struct mdfld_dsi_pkg_sender *sender,
+-				u8 cmd, u8 param, u8 param_num, int delay)
+-{
+-	if (!sender) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-	return mdfld_dsi_send_mcs_short(sender, cmd, param, param_num,
+-					MDFLD_DSI_LP_TRANSMISSION, delay);
+-}
+-
+-int mdfld_dsi_send_mcs_long_hs(struct mdfld_dsi_pkg_sender *sender,
+-				u32 *data,
+-				u32 len,
+-				int delay)
+-{
+-	if (!sender || !data || !len) {
+-		DRM_ERROR("Invalid parameters\n");
+-		return -EINVAL;
+-	}
+-	return mdfld_dsi_send_mcs_long(sender, data, len,
+-					MDFLD_DSI_HS_TRANSMISSION, delay);
+-}
+-
+-int mdfld_dsi_send_mcs_long_lp(struct mdfld_dsi_pkg_sender *sender,
+-				u32 *data,
+-				u32 len,
+-				int delay)
+-{
+-	if (!sender || !data || !len) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-	return mdfld_dsi_send_mcs_long(sender, data, len,
+-				MDFLD_DSI_LP_TRANSMISSION, delay);
+-}
+-
+-int mdfld_dsi_send_gen_short_hs(struct mdfld_dsi_pkg_sender *sender,
+-				u8 param0, u8 param1, u8 param_num, int delay)
+-{
+-	if (!sender) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-	return mdfld_dsi_send_gen_short(sender, param0, param1, param_num,
+-					MDFLD_DSI_HS_TRANSMISSION, delay);
+-}
+-
+-int mdfld_dsi_send_gen_short_lp(struct mdfld_dsi_pkg_sender *sender,
+-				u8 param0, u8 param1, u8 param_num, int delay)
+-{
+-	if (!sender || param_num < 0 || param_num > 2) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-	return mdfld_dsi_send_gen_short(sender, param0, param1, param_num,
+-					MDFLD_DSI_LP_TRANSMISSION, delay);
+-}
+-
+-int mdfld_dsi_send_gen_long_hs(struct mdfld_dsi_pkg_sender *sender,
+-				u32 *data,
+-				u32 len,
+-				int delay)
+-{
+-	if (!sender || !data || !len) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-	return mdfld_dsi_send_gen_long(sender, data, len,
+-					MDFLD_DSI_HS_TRANSMISSION, delay);
+-}
+-
+-int mdfld_dsi_send_gen_long_lp(struct mdfld_dsi_pkg_sender *sender,
+-				u32 *data,
+-				u32 len,
+-				int delay)
+-{
+-	if (!sender || !data || !len) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-	return mdfld_dsi_send_gen_long(sender, data, len,
+-					MDFLD_DSI_LP_TRANSMISSION, delay);
+-}
+-
+-int mdfld_dsi_read_gen_hs(struct mdfld_dsi_pkg_sender *sender,
+-			u8 param0,
+-			u8 param1,
+-			u8 param_num,
+-			u32 *data,
+-			u16 len)
+-{
+-	if (!sender || !data || param_num < 0 || param_num > 2
+-		|| !data || !len) {
+-		DRM_ERROR("Invalid parameters\n");
+-		return -EINVAL;
+-	}
+-
+-	return mdfld_dsi_read_gen(sender, param0, param1, param_num,
+-				data, len, MDFLD_DSI_HS_TRANSMISSION);
+-
+-}
+-
+-int mdfld_dsi_read_gen_lp(struct mdfld_dsi_pkg_sender *sender,
+-			u8 param0,
+-			u8 param1,
+-			u8 param_num,
+-			u32 *data,
+-			u16 len)
+-{
+-	if (!sender || !data || param_num < 0 || param_num > 2
+-		|| !data || !len) {
+-		DRM_ERROR("Invalid parameters\n");
+-		return -EINVAL;
+-	}
+-
+-	return mdfld_dsi_read_gen(sender, param0, param1, param_num,
+-				data, len, MDFLD_DSI_LP_TRANSMISSION);
+-}
+-
+-int mdfld_dsi_read_mcs_hs(struct mdfld_dsi_pkg_sender *sender,
+-			u8 cmd,
+-			u32 *data,
+-			u16 len)
+-{
+-	if (!sender || !data || !len) {
+-		DRM_ERROR("Invalid parameters\n");
+-		return -EINVAL;
+-	}
+-
+-	return mdfld_dsi_read_mcs(sender, cmd, data, len,
+-				MDFLD_DSI_HS_TRANSMISSION);
+-}
+-
+-int mdfld_dsi_read_mcs_lp(struct mdfld_dsi_pkg_sender *sender,
+-			u8 cmd,
+-			u32 *data,
+-			u16 len)
+-{
+-	if (!sender || !data || !len) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-
+-	return mdfld_dsi_read_mcs(sender, cmd, data, len,
+-				MDFLD_DSI_LP_TRANSMISSION);
+-}
+- 
+-int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+-								int pipe)
+-{
+-	int ret;
+-	struct mdfld_dsi_pkg_sender *pkg_sender;
+-	struct mdfld_dsi_config *dsi_config =
+-					mdfld_dsi_get_config(dsi_connector);
+-	struct drm_device *dev = dsi_config->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct psb_gtt *pg = &dev_priv->gtt;
+-	int i;
+-	struct mdfld_dsi_pkg *pkg, *tmp;
+-	u32 mipi_val = 0;
+-
+-	if (!dsi_connector) {
+-		WARN_ON(1);
+-		return -EINVAL;
+-	}
+-
+-	pkg_sender = dsi_connector->pkg_sender;
+-
+-	if (!pkg_sender || IS_ERR(pkg_sender)) {
+-		pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
+-								GFP_KERNEL);
+-		if (!pkg_sender) {
+-			dev_err(dev->dev, "Create DSI pkg sender failed\n");
+-			return -ENOMEM;
+-		}
+-
+-		dsi_connector->pkg_sender = (void *)pkg_sender;
+-	}
+-
+-	pkg_sender->dev = dev;
+-	pkg_sender->dsi_connector = dsi_connector;
+-	pkg_sender->pipe = pipe;
+-	pkg_sender->pkg_num = 0;
+-	pkg_sender->panel_mode = 0;
+-	pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
+-
+-	/* Init dbi command buffer*/
+-
+-	if (dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
+-		pkg_sender->dbi_pkg_support = 1;
+-		ret = mdfld_dbi_cb_init(pkg_sender, pg, pipe);
+-		if (ret) {
+-			dev_err(dev->dev, "DBI command buffer map failed\n");
+-			goto mapping_err;
+-		}
+-	}
+-
+-	/* Init regs */
+-	if (pipe == 0) {
+-		pkg_sender->dpll_reg = MRST_DPLL_A;
+-		pkg_sender->dspcntr_reg = DSPACNTR;
+-		pkg_sender->pipeconf_reg = PIPEACONF;
+-		pkg_sender->dsplinoff_reg = DSPALINOFF;
+-		pkg_sender->dspsurf_reg = DSPASURF;
+-		pkg_sender->pipestat_reg = PIPEASTAT;
+-
+-		pkg_sender->mipi_intr_stat_reg = MIPIA_INTR_STAT_REG;
+-		pkg_sender->mipi_lp_gen_data_reg = MIPIA_LP_GEN_DATA_REG;
+-		pkg_sender->mipi_hs_gen_data_reg = MIPIA_HS_GEN_DATA_REG;
+-		pkg_sender->mipi_lp_gen_ctrl_reg = MIPIA_LP_GEN_CTRL_REG;
+-		pkg_sender->mipi_hs_gen_ctrl_reg = MIPIA_HS_GEN_CTRL_REG;
+-		pkg_sender->mipi_gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
+-		pkg_sender->mipi_data_addr_reg = MIPIA_DATA_ADD_REG;
+-		pkg_sender->mipi_data_len_reg = MIPIA_DATA_LEN_REG;
+-		pkg_sender->mipi_cmd_addr_reg = MIPIA_CMD_ADD_REG;
+-		pkg_sender->mipi_cmd_len_reg = MIPIA_CMD_LEN_REG;
+-	} else if (pipe == 2) {
+-		pkg_sender->dpll_reg = MRST_DPLL_A;
+-		pkg_sender->dspcntr_reg = DSPCCNTR;
+-		pkg_sender->pipeconf_reg = PIPECCONF;
+-		pkg_sender->dsplinoff_reg = DSPCLINOFF;
+-		pkg_sender->dspsurf_reg = DSPCSURF;
+-		pkg_sender->pipestat_reg = PIPECSTAT;
+-
+-		pkg_sender->mipi_intr_stat_reg =
+-				MIPIA_INTR_STAT_REG + MIPIC_REG_OFFSET;
+-		pkg_sender->mipi_lp_gen_data_reg =
+-				MIPIA_LP_GEN_DATA_REG + MIPIC_REG_OFFSET;
+-		pkg_sender->mipi_hs_gen_data_reg =
+-				MIPIA_HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
+-		pkg_sender->mipi_lp_gen_ctrl_reg =
+-				MIPIA_LP_GEN_CTRL_REG + MIPIC_REG_OFFSET;
+-		pkg_sender->mipi_hs_gen_ctrl_reg =
+-				MIPIA_HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
+-		pkg_sender->mipi_gen_fifo_stat_reg =
+-				MIPIA_GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+-		pkg_sender->mipi_data_addr_reg =
+-				MIPIA_DATA_ADD_REG + MIPIC_REG_OFFSET;
+-		pkg_sender->mipi_data_len_reg =
+-				MIPIA_DATA_LEN_REG + MIPIC_REG_OFFSET;
+-		pkg_sender->mipi_cmd_addr_reg =
+-				MIPIA_CMD_ADD_REG + MIPIC_REG_OFFSET;
+-		pkg_sender->mipi_cmd_len_reg =
+-				MIPIA_CMD_LEN_REG + MIPIC_REG_OFFSET;
+-	}
+-
+-	/* Init pkg list */
+-	INIT_LIST_HEAD(&pkg_sender->pkg_list);
+-	INIT_LIST_HEAD(&pkg_sender->free_list);
+-
+-	spin_lock_init(&pkg_sender->lock);
+-
+-	/* Allocate free pkg pool */
+-	for (i = 0; i < MDFLD_MAX_PKG_NUM; i++) {
+-		pkg = kzalloc(sizeof(struct mdfld_dsi_pkg), GFP_KERNEL);
+-		if (!pkg) {
+-			dev_err(dev->dev, "Out of memory allocating pkg pool");
+-			ret = -ENOMEM;
+-			goto pkg_alloc_err;
+-		}
+-		INIT_LIST_HEAD(&pkg->entry);
+-		list_add_tail(&pkg->entry, &pkg_sender->free_list);
+-	}
+-
+-	/*
+-	 * For video mode, don't enable DPI timing output here,
+-	 * will init the DPI timing output during mode setting.
+-	 */
+-	if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
+-		mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+-	else if (dsi_config->type == MDFLD_DSI_ENCODER_DBI)
+-		mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX
+-			| TE_TRIGGER_GPIO_PIN;
+-	else
+-		DRM_ERROR("Bad DSI encoder type\n");
+-
+-	if (pipe == 0) {
+-		mipi_val |= 0x2;
+-		REG_WRITE(MIPI, mipi_val);
+-		REG_READ(MIPI);
+-	} else if (pipe == 2) {
+-		REG_WRITE(MIPI_C, mipi_val);
+-		REG_READ(MIPI_C);
+-	}
+-
+-	/*do dsi controller init*/
+-	dsi_controller_init(dsi_config, pipe);
+-	
+-	return 0;
+-
+-pkg_alloc_err:
+-	list_for_each_entry_safe(pkg, tmp, &pkg_sender->free_list, entry) {
+-		list_del(&pkg->entry);
+-		kfree(pkg);
+-	}
+-
+-	/* Free mapped command buffer */
+-	mdfld_dbi_cb_destroy(pkg_sender);
+-mapping_err:
+-	kfree(pkg_sender);
+-	dsi_connector->pkg_sender = NULL;
+-	return ret;
+-}
+-
+-void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
+-{
+-	struct mdfld_dsi_pkg *pkg, *tmp;
+-
+-	if (!sender || IS_ERR(sender))
+-		return;
+-
+-	/* Free pkg pool */
+-	list_for_each_entry_safe(pkg, tmp, &sender->free_list, entry) {
+-		list_del(&pkg->entry);
+-		kfree(pkg);
+-	}
+-	/* Free pkg list */
+-	list_for_each_entry_safe(pkg, tmp, &sender->pkg_list, entry) {
+-		list_del(&pkg->entry);
+-		kfree(pkg);
+-	}
+-	mdfld_dbi_cb_destroy(sender);	/* free mapped command buffer */
+-	kfree(sender);
+-}
+diff --git a/drivers/staging/gma500/mdfld_dsi_pkg_sender.h b/drivers/staging/gma500/mdfld_dsi_pkg_sender.h
+deleted file mode 100644
+index f24abc7..0000000
+--- a/drivers/staging/gma500/mdfld_dsi_pkg_sender.h
++++ /dev/null
+@@ -1,184 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Jackie Li<yaodong.li at intel.com>
+- */
+-#ifndef __MDFLD_DSI_PKG_SENDER_H__
+-#define __MDFLD_DSI_PKG_SENDER_H__
+-
+-#include <linux/kthread.h>
+-
+-#define MDFLD_MAX_DCS_PARAM	8
+-#define MDFLD_MAX_PKG_NUM	2048
+-
+-enum {
+-	MDFLD_DSI_PKG_DCS,
+-	MDFLD_DSI_PKG_GEN_SHORT_WRITE_0 = 0x03,
+-	MDFLD_DSI_PKG_GEN_SHORT_WRITE_1 = 0x13,
+-	MDFLD_DSI_PKG_GEN_SHORT_WRITE_2 = 0x23,
+-	MDFLD_DSI_PKG_GEN_READ_0 = 0x04,
+-	MDFLD_DSI_PKG_GEN_READ_1 = 0x14,
+-	MDFLD_DSI_PKG_GEN_READ_2 = 0x24,
+-	MDFLD_DSI_PKG_GEN_LONG_WRITE = 0x29,
+-	MDFLD_DSI_PKG_MCS_SHORT_WRITE_0 = 0x05,
+-	MDFLD_DSI_PKG_MCS_SHORT_WRITE_1 = 0x15,
+-	MDFLD_DSI_PKG_MCS_READ = 0x06,
+-	MDFLD_DSI_PKG_MCS_LONG_WRITE = 0x39,
+-};
+-
+-enum {
+-	MDFLD_DSI_LP_TRANSMISSION,
+-	MDFLD_DSI_HS_TRANSMISSION,
+-	MDFLD_DSI_DCS,
+-};
+-
+-enum {
+-	MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
+-};
+-
+-enum {
+-	MDFLD_DSI_PKG_SENDER_FREE = 0x0,
+-	MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
+-};
+-
+-enum {
+-	MDFLD_DSI_SEND_PACKAGE,
+-	MDFLD_DSI_QUEUE_PACKAGE,
+-};
+-
+-struct mdfld_dsi_gen_short_pkg {
+-	u8 cmd;
+-	u8 param;
+-};
+-
+-struct mdfld_dsi_gen_long_pkg {
+-	u32 *data;
+-	u32 len;
+-};
+-
+-struct mdfld_dsi_dcs_pkg {
+-	u8 cmd;
+-	u8 param[MDFLD_MAX_DCS_PARAM];
+-	u32 param_num;
+-	u8 data_src;
+-};
+-
+-struct mdfld_dsi_pkg {
+-	u8 pkg_type;
+-	u8 transmission_type;
+-
+-	union {
+-		struct mdfld_dsi_gen_short_pkg short_pkg;
+-		struct mdfld_dsi_gen_long_pkg long_pkg;
+-		struct mdfld_dsi_dcs_pkg dcs_pkg;
+-	} pkg;
+-
+-	struct list_head entry;
+-};
+-
+-struct mdfld_dsi_pkg_sender {
+-	struct drm_device *dev;
+-	struct mdfld_dsi_connector *dsi_connector;
+-	u32 status;
+-
+-	u32 panel_mode;
+-
+-	int pipe;
+-
+-	spinlock_t lock;
+-	struct list_head pkg_list;
+-	struct list_head free_list;
+-
+-	u32 pkg_num;
+-
+-	int dbi_pkg_support;
+-
+-	u32 dbi_cb_phy;
+-	void *dbi_cb_addr;
+-
+-	/* Registers */
+-	u32 dpll_reg;
+-	u32 dspcntr_reg;
+-	u32 pipeconf_reg;
+-	u32 pipestat_reg;
+-	u32 dsplinoff_reg;
+-	u32 dspsurf_reg;
+-
+-	u32 mipi_intr_stat_reg;
+-	u32 mipi_lp_gen_data_reg;
+-	u32 mipi_hs_gen_data_reg;
+-	u32 mipi_lp_gen_ctrl_reg;
+-	u32 mipi_hs_gen_ctrl_reg;
+-	u32 mipi_gen_fifo_stat_reg;
+-	u32 mipi_data_addr_reg;
+-	u32 mipi_data_len_reg;
+-	u32 mipi_cmd_addr_reg;
+-	u32 mipi_cmd_len_reg;
+-};
+-
+-/* DCS definitions */
+-#define DCS_SOFT_RESET			0x01
+-#define DCS_ENTER_SLEEP_MODE		0x10
+-#define DCS_EXIT_SLEEP_MODE		0x11
+-#define DCS_SET_DISPLAY_OFF		0x28
+-#define DCS_SET_DISPLAY_ON		0x29
+-#define DCS_SET_COLUMN_ADDRESS		0x2a
+-#define DCS_SET_PAGE_ADDRESS		0x2b
+-#define DCS_WRITE_MEM_START		0x2c
+-#define DCS_SET_TEAR_OFF		0x34
+-#define DCS_SET_TEAR_ON 		0x35
+-
+-extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
+-			int pipe);
+-extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
+-extern int mdfld_dsi_send_dcs(struct mdfld_dsi_pkg_sender *sender, u8 dcs,
+-			u8 *param, u32 param_num, u8 data_src, int delay);
+-extern int mdfld_dsi_send_mcs_short_hs(struct mdfld_dsi_pkg_sender *sender,
+-			u8 cmd, u8 param, u8 param_num, int delay);
+-extern int mdfld_dsi_send_mcs_short_lp(struct mdfld_dsi_pkg_sender *sender,
+-			u8 cmd, u8 param, u8 param_num, int delay);
+-extern int mdfld_dsi_send_mcs_long_hs(struct mdfld_dsi_pkg_sender *sender,
+-			u32 *data, u32 len, int delay);
+-extern int mdfld_dsi_send_mcs_long_lp(struct mdfld_dsi_pkg_sender *sender,
+-			u32 *data, u32 len, int delay);
+-extern int mdfld_dsi_send_gen_short_hs(struct mdfld_dsi_pkg_sender *sender,
+-			u8 param0, u8 param1, u8 param_num, int delay);
+-extern int mdfld_dsi_send_gen_short_lp(struct mdfld_dsi_pkg_sender *sender,
+-			u8 param0, u8 param1, u8 param_num, int delay);
+-extern int mdfld_dsi_send_gen_long_hs(struct mdfld_dsi_pkg_sender *sender,
+-			u32 *data, u32 len, int delay);
+-extern int mdfld_dsi_send_gen_long_lp(struct mdfld_dsi_pkg_sender *sender,
+-			u32 *data, u32 len, int delay);
+-
+-extern int mdfld_dsi_read_gen_hs(struct mdfld_dsi_pkg_sender *sender,
+-			u8 param0, u8 param1, u8 param_num, u32 *data, u16 len);
+-extern int mdfld_dsi_read_gen_lp(struct mdfld_dsi_pkg_sender *sender,
+-			u8 param0, u8 param1, u8 param_num, u32 *data, u16 len);
+-extern int mdfld_dsi_read_mcs_hs(struct mdfld_dsi_pkg_sender *sender,
+-			u8 cmd, u32 *data, u16 len);
+-extern int mdfld_dsi_read_mcs_lp(struct mdfld_dsi_pkg_sender *sender,
+-			u8 cmd, u32 *data, u16 len);
+-
+-extern void mdfld_dsi_cmds_kick_out(struct mdfld_dsi_pkg_sender *sender);
+-
+-#endif /* __MDFLD_DSI_PKG_SENDER_H__ */
+diff --git a/drivers/staging/gma500/mdfld_intel_display.c b/drivers/staging/gma500/mdfld_intel_display.c
+deleted file mode 100644
+index 8eb827e..0000000
+--- a/drivers/staging/gma500/mdfld_intel_display.c
++++ /dev/null
+@@ -1,1404 +0,0 @@
+-/*
+- * Copyright © 2006-2011 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- *	Eric Anholt <eric at anholt.net>
+- */
+-
+-#include "framebuffer.h"
+-#include "psb_intel_display.h"
+-#include "mdfld_dsi_dbi.h"
+-#include "mdfld_dsi_dpi.h"
+-#include "mdfld_dsi_dbi_dpu.h"
+-
+-#include <linux/pm_runtime.h>
+-
+-#ifdef MIN
+-#undef MIN
+-#endif
+-
+-#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+-
+-/* Hardcoded currently */
+-static int ksel = KSEL_CRYSTAL_19;
+-
+-extern void mdfld_save_display(struct drm_device *dev);
+-extern bool gbgfxsuspended;
+-
+-struct psb_intel_range_t {
+-	int min, max;
+-};
+-
+-struct mdfld_limit_t {
+-	struct psb_intel_range_t dot, m, p1;
+-};
+-
+-struct mdfld_intel_clock_t {
+-	/* given values */
+-	int n;
+-	int m1, m2;
+-	int p1, p2;
+-	/* derived values */
+-	int dot;
+-	int vco;
+-	int m;
+-	int p;
+-};
+-
+-
+-
+-#define COUNT_MAX 0x10000000
+-
+-void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
+-{
+-	int count, temp;
+-	u32 pipeconf_reg = PIPEACONF;
+-	
+-	switch (pipe) {
+-	case 0:
+-		break;
+-	case 1:
+-		pipeconf_reg = PIPEBCONF;
+-		break;
+-	case 2:
+-		pipeconf_reg = PIPECCONF;
+-		break;
+-	default:
+-		DRM_ERROR("Illegal Pipe Number. \n");
+-		return;
+-	}
+-
+-	/* FIXME JLIU7_PO */
+-	psb_intel_wait_for_vblank(dev);
+-	return;
+-
+-	/* Wait for for the pipe disable to take effect. */
+-	for (count = 0; count < COUNT_MAX; count++) {
+-		temp = REG_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_PIPE_STATE) == 0)
+-			break;
+-	}
+-}
+-
+-void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
+-{
+-	int count, temp;
+-	u32 pipeconf_reg = PIPEACONF;
+-	
+-	switch (pipe) {
+-	case 0:
+-		break;
+-	case 1:
+-		pipeconf_reg = PIPEBCONF;
+-		break;
+-	case 2:
+-		pipeconf_reg = PIPECCONF;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Illegal Pipe Number.\n");
+-		return;
+-	}
+-
+-	/* FIXME JLIU7_PO */
+-	psb_intel_wait_for_vblank(dev);
+-	return;
+-
+-	/* Wait for for the pipe enable to take effect. */
+-	for (count = 0; count < COUNT_MAX; count++) {
+-		temp = REG_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_PIPE_STATE) == 1)
+-			break;
+-	}
+-}
+-
+-
+-static int mdfld_intel_crtc_cursor_set(struct drm_crtc *crtc,
+-				 struct drm_file *file_priv,
+-				 uint32_t handle,
+-				 uint32_t width, uint32_t height)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	uint32_t control = CURACNTR;
+-	uint32_t base = CURABASE;
+-	uint32_t temp;
+-	size_t addr = 0;
+-	struct gtt_range *gt;
+-	struct drm_gem_object *obj;
+-	int ret;
+-
+-	switch (pipe) {
+-	case 0:
+-		break;
+-	case 1:
+-		control = CURBCNTR;
+-		base = CURBBASE;
+-		break;
+-	case 2:
+-		control = CURCCNTR;
+-		base = CURCBASE;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Illegal Pipe Number. \n");
+-		return -EINVAL;
+-	}
+-	
+-#if 1 /* FIXME_JLIU7 can't enalbe cursorB/C HW issue. need to remove after HW fix */
+-	if (pipe != 0)
+-		return 0;
+-#endif 
+-	/* if we want to turn of the cursor ignore width and height */
+-	if (!handle) {
+-		dev_dbg(dev->dev, "cursor off\n");
+-		/* turn off the cursor */
+-		temp = 0;
+-		temp |= CURSOR_MODE_DISABLE;
+-
+-		if (gma_power_begin(dev, true)) {
+-			REG_WRITE(control, temp);
+-			REG_WRITE(base, 0);
+-			gma_power_end(dev);
+-		}
+-		/* Unpin the old GEM object */
+-		if (psb_intel_crtc->cursor_obj) {
+-			gt = container_of(psb_intel_crtc->cursor_obj,
+-							struct gtt_range, gem);
+-			psb_gtt_unpin(gt);
+-			drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+-			psb_intel_crtc->cursor_obj = NULL;
+-		}
+-		return 0;
+-	}
+-
+-	/* Currently we only support 64x64 cursors */
+-	if (width != 64 || height != 64) {
+-		DRM_ERROR("we currently only support 64x64 cursors\n");
+-		return -EINVAL;
+-	}
+-
+-	obj = drm_gem_object_lookup(dev, file_priv, handle);
+-	if (!obj)
+-		return -ENOENT;
+-
+-	if (obj->size < width * height * 4) {
+-		dev_dbg(dev->dev, "buffer is to small\n");
+-		return -ENOMEM;
+-	}
+-
+-	gt = container_of(obj, struct gtt_range, gem);
+-
+-	/* Pin the memory into the GTT */
+-	ret = psb_gtt_pin(gt);
+-	if (ret) {
+-		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+-		return ret;
+-	}
+-
+-
+-	addr = gt->offset;	/* Or resource.start ??? */
+-
+-	psb_intel_crtc->cursor_addr = addr;
+-
+-	temp = 0;
+-	/* set the pipe for the cursor */
+-	temp |= (pipe << 28);
+-	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+-
+-	if (gma_power_begin(dev, true)) {
+-		REG_WRITE(control, temp);
+-		REG_WRITE(base, addr);
+-		gma_power_end(dev);
+-	}
+-	/* unpin the old GEM object */
+-	if (psb_intel_crtc->cursor_obj) {
+-		gt = container_of(psb_intel_crtc->cursor_obj,
+-							struct gtt_range, gem);
+-		psb_gtt_unpin(gt);
+-		drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+-		psb_intel_crtc->cursor_obj = obj;
+-	}
+-	return 0;
+-}
+-
+-static int mdfld_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
+-	struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
+-	struct psb_drm_dpu_rect rect;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	uint32_t pos = CURAPOS;
+-	uint32_t base = CURABASE;
+-	uint32_t temp = 0;
+-	uint32_t addr;
+-
+-	switch (pipe) {
+-	case 0:
+-		if (dpu_info) {
+-			rect.x = x;
+-			rect.y = y;
+-		
+-			mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORA, &rect);
+-			mdfld_dpu_exit_dsr(dev);
+-		} else if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_0))
+-			mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_CURSOR_0);
+-		break;
+-	case 1:
+-		pos = CURBPOS;
+-		base = CURBBASE;
+-		break;
+-	case 2:
+-		if (dpu_info) {
+-			mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORC, &rect);
+-			mdfld_dpu_exit_dsr(dev);
+-		} else if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_2))
+-			mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_CURSOR_2);
+-		pos = CURCPOS;
+-		base = CURCBASE;
+-		break;
+-	default:
+-		DRM_ERROR("Illegal Pipe Number. \n");
+-		return -EINVAL;
+-	}
+-		
+-#if 1 /* FIXME_JLIU7 can't enable cursorB/C HW issue. need to remove after HW fix */
+-	if (pipe != 0)
+-		return 0;
+-#endif 
+-	if (x < 0) {
+-		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+-		x = -x;
+-	}
+-	if (y < 0) {
+-		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+-		y = -y;
+-	}
+-
+-	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+-	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+-
+-	addr = psb_intel_crtc->cursor_addr;
+-
+-	if (gma_power_begin(dev, true)) {
+-		REG_WRITE(pos, temp);
+-		REG_WRITE(base, addr);
+-		gma_power_end(dev);
+-	}
+-
+-	return 0;
+-}
+-
+-const struct drm_crtc_funcs mdfld_intel_crtc_funcs = {
+-	.cursor_set = mdfld_intel_crtc_cursor_set,
+-	.cursor_move = mdfld_intel_crtc_cursor_move,
+-	.gamma_set = psb_intel_crtc_gamma_set,
+-	.set_config = drm_crtc_helper_set_config,
+-	.destroy = psb_intel_crtc_destroy,
+-};
+-
+-static struct drm_device globle_dev;
+-
+-void mdfld__intel_plane_set_alpha(int enable)
+-{
+-	struct drm_device *dev = &globle_dev;
+-	int dspcntr_reg = DSPACNTR;
+-	u32 dspcntr;
+-
+-	dspcntr = REG_READ(dspcntr_reg);
+-
+-	if (enable) {
+-		dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
+-		dspcntr |= DISPPLANE_32BPP;
+-	} else {
+-		dspcntr &= ~DISPPLANE_32BPP;
+-		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+-	}
+-
+-	REG_WRITE(dspcntr_reg, dspcntr);
+-}
+-
+-int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	/* struct drm_i915_master_private *master_priv; */
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+-	int pipe = psb_intel_crtc->pipe;
+-	unsigned long start, offset;
+-	int dsplinoff = DSPALINOFF;
+-	int dspsurf = DSPASURF;
+-	int dspstride = DSPASTRIDE;
+-	int dspcntr_reg = DSPACNTR;
+-	u32 dspcntr;
+-	int ret = 0;
+-
+-	memcpy(&globle_dev, dev, sizeof(struct drm_device));
+-
+-	if (!gma_power_begin(dev, true))
+-		return 0;
+-
+-	/* no fb bound */
+-	if (!crtc->fb) {
+-		dev_err(dev->dev, "No FB bound\n");
+-		goto psb_intel_pipe_cleaner;
+-	}
+-
+-	switch (pipe) {
+-	case 0:
+-		dsplinoff = DSPALINOFF;
+-		break;
+-	case 1:
+-		dsplinoff = DSPBLINOFF;
+-		dspsurf = DSPBSURF;
+-		dspstride = DSPBSTRIDE;
+-		dspcntr_reg = DSPBCNTR;
+-		break;
+-	case 2:
+-		dsplinoff = DSPCLINOFF;
+-		dspsurf = DSPCSURF;
+-		dspstride = DSPCSTRIDE;
+-		dspcntr_reg = DSPCCNTR;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Illegal Pipe Number.\n");
+-		return -EINVAL;
+-	}
+-
+-	ret = psb_gtt_pin(psbfb->gtt);
+-	if (ret < 0)
+-	        goto psb_intel_pipe_set_base_exit;
+-
+-	start = psbfb->gtt->offset;
+-	offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+-
+-	REG_WRITE(dspstride, crtc->fb->pitch);
+-	dspcntr = REG_READ(dspcntr_reg);
+-	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+-
+-	switch (crtc->fb->bits_per_pixel) {
+-	case 8:
+-		dspcntr |= DISPPLANE_8BPP;
+-		break;
+-	case 16:
+-		if (crtc->fb->depth == 15)
+-			dspcntr |= DISPPLANE_15_16BPP;
+-		else
+-			dspcntr |= DISPPLANE_16BPP;
+-		break;
+-	case 24:
+-	case 32:
+-		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Unknown color depth\n");
+-		ret = -EINVAL;
+-		goto psb_intel_pipe_set_base_exit;
+-	}
+-	REG_WRITE(dspcntr_reg, dspcntr);
+-
+-	dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
+-	                                        start, offset, x, y);
+-
+-	REG_WRITE(dsplinoff, offset);
+-	REG_READ(dsplinoff);
+-	REG_WRITE(dspsurf, start);
+-	REG_READ(dspsurf);
+-
+-psb_intel_pipe_cleaner:
+-	/* If there was a previous display we can now unpin it */
+-	if (old_fb)
+-		psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+-
+-psb_intel_pipe_set_base_exit:
+-	gma_power_end(dev);
+-	return ret;
+-}
+-
+-/**
+- * Disable the pipe, plane and pll.
+- *
+- */
+-void mdfld_disable_crtc (struct drm_device *dev, int pipe)
+-{
+-	int dpll_reg = MRST_DPLL_A;
+-	int dspcntr_reg = DSPACNTR;
+-	int dspbase_reg = MRST_DSPABASE;
+-	int pipeconf_reg = PIPEACONF;
+-	u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+-	u32 temp;
+-
+-	switch (pipe) {
+-	case 0:
+-		break;
+-	case 1:
+-		dpll_reg = MDFLD_DPLL_B;
+-		dspcntr_reg = DSPBCNTR;
+-		dspbase_reg = DSPBSURF;
+-		pipeconf_reg = PIPEBCONF;
+-		break;
+-	case 2:
+-		dpll_reg = MRST_DPLL_A;
+-		dspcntr_reg = DSPCCNTR;
+-		dspbase_reg = MDFLD_DSPCBASE;
+-		pipeconf_reg = PIPECCONF;
+-		gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Illegal Pipe Number. \n");
+-		return;
+-	}
+-
+-	if (pipe != 1)
+-		mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+-
+-	/* Disable display plane */
+-	temp = REG_READ(dspcntr_reg);
+-	if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+-		REG_WRITE(dspcntr_reg,
+-			  temp & ~DISPLAY_PLANE_ENABLE);
+-		/* Flush the plane changes */
+-		REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-		REG_READ(dspbase_reg);
+-	}
+-
+-	/* FIXME_JLIU7 MDFLD_PO revisit */
+-	/* Wait for vblank for the disable to take effect */
+-/* MDFLD_PO_JLIU7		psb_intel_wait_for_vblank(dev); */
+-
+-	/* Next, disable display pipes */
+-	temp = REG_READ(pipeconf_reg);
+-	if ((temp & PIPEACONF_ENABLE) != 0) {
+-		temp &= ~PIPEACONF_ENABLE;
+-		temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+-		REG_WRITE(pipeconf_reg, temp);
+-		REG_READ(pipeconf_reg);
+-
+-		/* Wait for for the pipe disable to take effect. */
+-		mdfldWaitForPipeDisable(dev, pipe);
+-	}
+-
+-	temp = REG_READ(dpll_reg);
+-	if (temp & DPLL_VCO_ENABLE) {
+-		if (((pipe != 1) && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
+-				|| (pipe == 1)){
+-			temp &= ~(DPLL_VCO_ENABLE);
+-			REG_WRITE(dpll_reg, temp);
+-			REG_READ(dpll_reg);
+-			/* Wait for the clocks to turn off. */
+-			/* FIXME_MDFLD PO may need more delay */
+-			udelay(500);
+-
+-			if (!(temp & MDFLD_PWR_GATE_EN)) {
+-				/* gating power of DPLL */
+-				REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+-				/* FIXME_MDFLD PO - change 500 to 1 after PO */
+-				udelay(5000);
+-			}
+-		}
+-	}
+-
+-}
+-
+-/**
+- * Sets the power management mode of the pipe and plane.
+- *
+- * This code should probably grow support for turning the cursor off and back
+- * on appropriately at the same time as we're turning the pipe off/on.
+- */
+-static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	int dpll_reg = MRST_DPLL_A;
+-	int dspcntr_reg = DSPACNTR;
+-	int dspbase_reg = MRST_DSPABASE;
+-	int pipeconf_reg = PIPEACONF;
+-	u32 pipestat_reg = PIPEASTAT;
+-	u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+-	u32 pipeconf = dev_priv->pipeconf;
+-	u32 dspcntr = dev_priv->dspcntr;
+-	u32 mipi_enable_reg = MIPIA_DEVICE_READY_REG;
+-	u32 temp;
+-	bool enabled;
+-	int timeout = 0;
+-
+-	if (!gma_power_begin(dev, true))
+-		return;
+-
+-	 /* Ignore if system is already in DSR and in suspended state. */
+-	if(/*gbgfxsuspended */0 && dev_priv->dispstatus == false && mode == 3){
+-	    if(dev_priv->rpm_enabled && pipe == 1){
+-	//          dev_priv->is_mipi_on = false;
+-	          pm_request_idle(&dev->pdev->dev);
+-	    }
+-	    return;
+-	}else if(mode == 0) {
+-		//do not need to set gbdispstatus=true in crtc.
+-		//this will be set in encoder such as mdfld_dsi_dbi_dpms
+-	    //gbdispstatus = true;
+-	}
+-
+-/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
+-/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
+-
+-	switch (pipe) {
+-	case 0:
+-		break;
+-	case 1:
+-		dpll_reg = DPLL_B;
+-		dspcntr_reg = DSPBCNTR;
+-		dspbase_reg = MRST_DSPBBASE;
+-		pipeconf_reg = PIPEBCONF;
+-		pipeconf = dev_priv->pipeconf1;
+-		dspcntr = dev_priv->dspcntr1;
+-		dpll_reg = MDFLD_DPLL_B;
+-		break;
+-	case 2:
+-		dpll_reg = MRST_DPLL_A;
+-		dspcntr_reg = DSPCCNTR;
+-		dspbase_reg = MDFLD_DSPCBASE;
+-		pipeconf_reg = PIPECCONF;
+-		pipestat_reg = PIPECSTAT;
+-		pipeconf = dev_priv->pipeconf2;
+-		dspcntr = dev_priv->dspcntr2;
+-		gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
+-		mipi_enable_reg = MIPIA_DEVICE_READY_REG + MIPIC_REG_OFFSET;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Illegal Pipe Number.\n");
+-		return;
+-	}
+-
+-	/* XXX: When our outputs are all unaware of DPMS modes other than off
+-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+-	 */
+-	switch (mode) {
+-	case DRM_MODE_DPMS_ON:
+-	case DRM_MODE_DPMS_STANDBY:
+-	case DRM_MODE_DPMS_SUSPEND:
+-		/* Enable the DPLL */
+-		temp = REG_READ(dpll_reg);
+-
+-		if ((temp & DPLL_VCO_ENABLE) == 0) {
+-			/* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
+-			if (temp & MDFLD_PWR_GATE_EN) {
+-				temp &= ~MDFLD_PWR_GATE_EN;
+-				REG_WRITE(dpll_reg, temp);
+-				/* FIXME_MDFLD PO - change 500 to 1 after PO */
+-				udelay(500);
+-			}
+-
+-			REG_WRITE(dpll_reg, temp);
+-			REG_READ(dpll_reg);
+-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+-			udelay(500);
+-			
+-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+-			REG_READ(dpll_reg);
+-
+-			/**
+-			 * wait for DSI PLL to lock
+-			 * NOTE: only need to poll status of pipe 0 and pipe 1,
+-			 * since both MIPI pipes share the same PLL.
+-			 */
+-			while ((pipe != 2) && (timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+-				udelay(150);
+-				timeout ++;
+-			}
+-		}
+-
+-		/* Enable the plane */
+-		temp = REG_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+-			REG_WRITE(dspcntr_reg,
+-				temp | DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-		}
+-
+-		/* Enable the pipe */
+-		temp = REG_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) == 0) {
+-			REG_WRITE(pipeconf_reg, pipeconf);
+-
+-			/* Wait for for the pipe enable to take effect. */
+-			mdfldWaitForPipeEnable(dev, pipe);
+-		}
+-
+-		/*workaround for sighting 3741701 Random X blank display*/
+-		/*perform w/a in video mode only on pipe A or C*/
+-		if ((pipe == 0 || pipe == 2) &&
+-			(mdfld_panel_dpi(dev) == true)) {
+-			REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
+-			msleep(100);
+-			if(PIPE_VBLANK_STATUS & REG_READ(pipestat_reg)) {
+-				printk(KERN_ALERT "OK");
+-			} else {
+-				printk(KERN_ALERT "STUCK!!!!");
+-				/*shutdown controller*/
+-				temp = REG_READ(dspcntr_reg);
+-				REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
+-				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-				/*mdfld_dsi_dpi_shut_down(dev, pipe);*/
+-				REG_WRITE(0xb048, 1);
+-				msleep(100);
+-				temp = REG_READ(pipeconf_reg);
+-				temp &= ~PIPEACONF_ENABLE;
+-				REG_WRITE(pipeconf_reg, temp);
+-				msleep(100); /*wait for pipe disable*/
+-			/*printk(KERN_ALERT "70008 is %x\n", REG_READ(0x70008));
+-			printk(KERN_ALERT "b074 is %x\n", REG_READ(0xb074));*/
+-				REG_WRITE(mipi_enable_reg, 0);
+-				msleep(100);
+-			printk(KERN_ALERT "70008 is %x\n", REG_READ(0x70008));
+-			printk(KERN_ALERT "b074 is %x\n", REG_READ(0xb074));
+-				REG_WRITE(0xb004, REG_READ(0xb004));
+-				/* try to bring the controller back up again*/
+-				REG_WRITE(mipi_enable_reg, 1);
+-				temp = REG_READ(dspcntr_reg);
+-				REG_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
+-				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-				/*mdfld_dsi_dpi_turn_on(dev, pipe);*/
+-				REG_WRITE(0xb048, 2);
+-				msleep(100);
+-				temp = REG_READ(pipeconf_reg);
+-				temp |= PIPEACONF_ENABLE;
+-				REG_WRITE(pipeconf_reg, temp);
+-			}
+-		}
+-
+-		psb_intel_crtc_load_lut(crtc);
+-
+-		/* Give the overlay scaler a chance to enable
+-		   if it's on this pipe */
+-		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+-
+-		break;
+-	case DRM_MODE_DPMS_OFF:
+-		/* Give the overlay scaler a chance to disable
+-		 * if it's on this pipe */
+-		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+-		if (pipe != 1)
+-			mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+-
+-		/* Disable the VGA plane that we never use */
+-		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+-
+-		/* Disable display plane */
+-		temp = REG_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+-			REG_WRITE(dspcntr_reg,
+-				  temp & ~DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-			REG_READ(dspbase_reg);
+-		}
+-
+-		/* FIXME_JLIU7 MDFLD_PO revisit */
+-		/* Wait for vblank for the disable to take effect */
+-// MDFLD_PO_JLIU7		psb_intel_wait_for_vblank(dev);
+-
+-		/* Next, disable display pipes */
+-		temp = REG_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) != 0) {
+-			temp &= ~PIPEACONF_ENABLE;
+-			temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
+-			REG_WRITE(pipeconf_reg, temp);
+-//			REG_WRITE(pipeconf_reg, 0);
+-			REG_READ(pipeconf_reg);
+-
+-			/* Wait for for the pipe disable to take effect. */
+-			mdfldWaitForPipeDisable(dev, pipe);
+-		}
+-
+-		temp = REG_READ(dpll_reg);
+-		if (temp & DPLL_VCO_ENABLE) {
+-			if (((pipe != 1) && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
+-					|| (pipe == 1)){
+-				temp &= ~(DPLL_VCO_ENABLE);
+-				REG_WRITE(dpll_reg, temp);
+-				REG_READ(dpll_reg);
+-				/* Wait for the clocks to turn off. */
+-				/* FIXME_MDFLD PO may need more delay */
+-				udelay(500);
+-#if 0 /* MDFLD_PO_JLIU7 */	
+-		if (!(temp & MDFLD_PWR_GATE_EN)) {
+-			/* gating power of DPLL */
+-			REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
+-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+-			udelay(5000);
+-		}
+-#endif  /* MDFLD_PO_JLIU7 */	
+-			}
+-		}
+-		break;
+-	}
+-
+-	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+-
+-#if 0				/* JB: Add vblank support later */
+-	if (enabled)
+-		dev_priv->vblank_pipe |= (1 << pipe);
+-	else
+-		dev_priv->vblank_pipe &= ~(1 << pipe);
+-#endif
+-
+-	gma_power_end(dev);
+-}
+-
+-
+-#define MDFLD_LIMT_DPLL_19	    0
+-#define MDFLD_LIMT_DPLL_25	    1
+-#define MDFLD_LIMT_DPLL_83	    2
+-#define MDFLD_LIMT_DPLL_100	    3
+-#define MDFLD_LIMT_DSIPLL_19	    4
+-#define MDFLD_LIMT_DSIPLL_25	    5
+-#define MDFLD_LIMT_DSIPLL_83	    6
+-#define MDFLD_LIMT_DSIPLL_100	    7
+-
+-#define MDFLD_DOT_MIN		  19750  /* FIXME_MDFLD JLIU7 need to find out  min & max for MDFLD */
+-#define MDFLD_DOT_MAX		  120000
+-#define MDFLD_DPLL_M_MIN_19	    113
+-#define MDFLD_DPLL_M_MAX_19	    155
+-#define MDFLD_DPLL_P1_MIN_19	    2
+-#define MDFLD_DPLL_P1_MAX_19	    10
+-#define MDFLD_DPLL_M_MIN_25	    101
+-#define MDFLD_DPLL_M_MAX_25	    130
+-#define MDFLD_DPLL_P1_MIN_25	    2
+-#define MDFLD_DPLL_P1_MAX_25	    10
+-#define MDFLD_DPLL_M_MIN_83	    64
+-#define MDFLD_DPLL_M_MAX_83	    64
+-#define MDFLD_DPLL_P1_MIN_83	    2
+-#define MDFLD_DPLL_P1_MAX_83	    2
+-#define MDFLD_DPLL_M_MIN_100	    64
+-#define MDFLD_DPLL_M_MAX_100	    64
+-#define MDFLD_DPLL_P1_MIN_100	    2
+-#define MDFLD_DPLL_P1_MAX_100	    2
+-#define MDFLD_DSIPLL_M_MIN_19	    131
+-#define MDFLD_DSIPLL_M_MAX_19	    175
+-#define MDFLD_DSIPLL_P1_MIN_19	    3
+-#define MDFLD_DSIPLL_P1_MAX_19	    8
+-#define MDFLD_DSIPLL_M_MIN_25	    97
+-#define MDFLD_DSIPLL_M_MAX_25	    140
+-#define MDFLD_DSIPLL_P1_MIN_25	    3
+-#define MDFLD_DSIPLL_P1_MAX_25	    9
+-#define MDFLD_DSIPLL_M_MIN_83	    33
+-#define MDFLD_DSIPLL_M_MAX_83	    92
+-#define MDFLD_DSIPLL_P1_MIN_83	    2
+-#define MDFLD_DSIPLL_P1_MAX_83	    3
+-#define MDFLD_DSIPLL_M_MIN_100	    97
+-#define MDFLD_DSIPLL_M_MAX_100	    140
+-#define MDFLD_DSIPLL_P1_MIN_100	    3
+-#define MDFLD_DSIPLL_P1_MAX_100	    9
+-
+-static const struct mdfld_limit_t mdfld_limits[] = {
+-	{			/* MDFLD_LIMT_DPLL_19 */
+-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+-	 .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
+-	 .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
+-	 },
+-	{			/* MDFLD_LIMT_DPLL_25 */
+-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+-	 .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
+-	 .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
+-	 },
+-	{			/* MDFLD_LIMT_DPLL_83 */
+-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+-	 .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
+-	 .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
+-	 },
+-	{			/* MDFLD_LIMT_DPLL_100 */
+-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+-	 .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
+-	 .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
+-	 },
+-	{			/* MDFLD_LIMT_DSIPLL_19 */
+-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+-	 .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
+-	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
+-	 },
+-	{			/* MDFLD_LIMT_DSIPLL_25 */
+-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+-	 .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
+-	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
+-	 },
+-	{			/* MDFLD_LIMT_DSIPLL_83 */
+-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+-	 .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
+-	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
+-	 },
+-	{			/* MDFLD_LIMT_DSIPLL_100 */
+-	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
+-	 .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
+-	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
+-	 },
+-};
+-
+-#define MDFLD_M_MIN	    21
+-#define MDFLD_M_MAX	    180
+-static const u32 mdfld_m_converts[] = {
+-/* M configuration table from 9-bit LFSR table */
+-	224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
+-	173, 342, 171, 85, 298, 149, 74, 37, 18, 265,   /* 31 - 40 */
+-	388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
+-	83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
+-	341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
+-	461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
+-	106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
+-	71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
+-	253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
+-	478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
+-	477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
+-	210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
+-	145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
+-	380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
+-	103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
+-	396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
+-};
+-
+-static const struct mdfld_limit_t *mdfld_limit(struct drm_crtc *crtc)
+-{
+-	const struct mdfld_limit_t *limit = NULL;
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
+-	    || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
+-		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+-			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
+-		else if (ksel == KSEL_BYPASS_25) 
+-			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
+-		else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166))
+-			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
+-		else if ((ksel == KSEL_BYPASS_83_100) &&
+-			 (dev_priv->core_freq == 100 || dev_priv->core_freq == 200))
+-			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
+-	} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+-		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+-			limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
+-		else if (ksel == KSEL_BYPASS_25) 
+-			limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
+-		else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166))
+-			limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
+-		else if ((ksel == KSEL_BYPASS_83_100) &&
+-			 (dev_priv->core_freq == 100 || dev_priv->core_freq == 200))
+-			limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
+-	} else {
+-		limit = NULL;
+-		dev_err(dev->dev, "mdfld_limit Wrong display type.\n");
+-	}
+-
+-	return limit;
+-}
+-
+-/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+-static void mdfld_clock(int refclk, struct mdfld_intel_clock_t *clock)
+-{
+-	clock->dot = (refclk * clock->m) / clock->p1;
+-}
+-
+-/**
+- * Returns a set of divisors for the desired target clock with the given refclk,
+- * or FALSE.  Divisor values are the actual divisors for
+- */
+-static bool
+-mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+-		struct mdfld_intel_clock_t *best_clock)
+-{
+-	struct mdfld_intel_clock_t clock;
+-	const struct mdfld_limit_t *limit = mdfld_limit(crtc);
+-	int err = target;
+-
+-	memset(best_clock, 0, sizeof(*best_clock));
+-
+-	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+-		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+-		     clock.p1++) {
+-			int this_err;
+-
+-			mdfld_clock(refclk, &clock);
+-
+-			this_err = abs(clock.dot - target);
+-			if (this_err < err) {
+-				*best_clock = clock;
+-				err = this_err;
+-			}
+-		}
+-	}
+-	return err != target;
+-}
+-
+-/**
+- * Return the pipe currently connected to the panel fitter,
+- * or -1 if the panel fitter is not present or not in use
+- */
+-static int mdfld_panel_fitter_pipe(struct drm_device *dev)
+-{
+-	u32 pfit_control;
+-
+-	pfit_control = REG_READ(PFIT_CONTROL);
+-
+-	/* See if the panel fitter is in use */
+-	if ((pfit_control & PFIT_ENABLE) == 0)
+-		return -1;
+-	return (pfit_control >> 29) & 3;
+-}
+-
+-static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
+-			      struct drm_display_mode *mode,
+-			      struct drm_display_mode *adjusted_mode,
+-			      int x, int y,
+-			      struct drm_framebuffer *old_fb)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int pipe = psb_intel_crtc->pipe;
+-	int fp_reg = MRST_FPA0;
+-	int dpll_reg = MRST_DPLL_A;
+-	int dspcntr_reg = DSPACNTR;
+-	int pipeconf_reg = PIPEACONF;
+-	int htot_reg = HTOTAL_A;
+-	int hblank_reg = HBLANK_A;
+-	int hsync_reg = HSYNC_A;
+-	int vtot_reg = VTOTAL_A;
+-	int vblank_reg = VBLANK_A;
+-	int vsync_reg = VSYNC_A;
+-	int dspsize_reg = DSPASIZE; 
+-	int dsppos_reg = DSPAPOS; 
+-	int pipesrc_reg = PIPEASRC;
+-	u32 *pipeconf = &dev_priv->pipeconf;
+-	u32 *dspcntr = &dev_priv->dspcntr;
+-	int refclk = 0;
+-	int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, clk_tmp = 0;
+-	struct mdfld_intel_clock_t clock;
+-	bool ok;
+-	u32 dpll = 0, fp = 0;
+-	bool is_crt = false, is_lvds = false, is_tv = false;
+-	bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
+-	struct drm_mode_config *mode_config = &dev->mode_config;
+-	struct psb_intel_output *psb_intel_output = NULL;
+-	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+-	struct drm_encoder *encoder;
+-	struct drm_connector *connector;
+-	int timeout = 0;
+-
+-	dev_dbg(dev->dev, "pipe = 0x%x \n", pipe);
+-
+-	switch (pipe) {
+-	case 0:
+-		break;
+-	case 1:
+-		fp_reg = FPB0;
+-		dpll_reg = DPLL_B;
+-		dspcntr_reg = DSPBCNTR;
+-		pipeconf_reg = PIPEBCONF;
+-		htot_reg = HTOTAL_B;
+-		hblank_reg = HBLANK_B;
+-		hsync_reg = HSYNC_B;
+-		vtot_reg = VTOTAL_B;
+-		vblank_reg = VBLANK_B;
+-		vsync_reg = VSYNC_B;
+-		dspsize_reg = DSPBSIZE; 
+-		dsppos_reg = DSPBPOS; 
+-		pipesrc_reg = PIPEBSRC;
+-		pipeconf = &dev_priv->pipeconf1;
+-		dspcntr = &dev_priv->dspcntr1;
+-		fp_reg = MDFLD_DPLL_DIV0;
+-		dpll_reg = MDFLD_DPLL_B;
+-		break;
+-	case 2:
+-		dpll_reg = MRST_DPLL_A;
+-		dspcntr_reg = DSPCCNTR;
+-		pipeconf_reg = PIPECCONF;
+-		htot_reg = HTOTAL_C;
+-		hblank_reg = HBLANK_C;
+-		hsync_reg = HSYNC_C;
+-		vtot_reg = VTOTAL_C;
+-		vblank_reg = VBLANK_C;
+-		vsync_reg = VSYNC_C;
+-		dspsize_reg = DSPCSIZE; 
+-		dsppos_reg = DSPCPOS; 
+-		pipesrc_reg = PIPECSRC;
+-		pipeconf = &dev_priv->pipeconf2;
+-		dspcntr = &dev_priv->dspcntr2;
+-		break;
+-	default:
+-		DRM_ERROR("Illegal Pipe Number. \n");
+-		return 0;
+-	}
+-
+-	dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
+-		 adjusted_mode->hdisplay);
+-	dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
+-		 adjusted_mode->vdisplay);
+-	dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
+-		 adjusted_mode->hsync_start);
+-	dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
+-		 adjusted_mode->hsync_end);
+-	dev_dbg(dev->dev, "adjusted_htotal = %d\n",
+-		 adjusted_mode->htotal);
+-	dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
+-		 adjusted_mode->vsync_start);
+-	dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
+-		 adjusted_mode->vsync_end);
+-	dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
+-		 adjusted_mode->vtotal);
+-	dev_dbg(dev->dev, "adjusted_clock = %d\n",
+-		 adjusted_mode->clock);
+-	dev_dbg(dev->dev, "hdisplay = %d\n",
+-		 mode->hdisplay);
+-	dev_dbg(dev->dev, "vdisplay = %d\n",
+-		 mode->vdisplay);
+-
+-	if (!gma_power_begin(dev, true))
+-		return 0;
+-
+-	memcpy(&psb_intel_crtc->saved_mode, mode, sizeof(struct drm_display_mode));
+-	memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode));
+-
+-	list_for_each_entry(connector, &mode_config->connector_list, head) {
+-			
+-		encoder = connector->encoder;
+-		
+-		if(!encoder)
+-			continue;
+-
+-		if (encoder->crtc != crtc)
+-			continue;
+-
+-		psb_intel_output = to_psb_intel_output(connector);
+-		
+-		dev_dbg(dev->dev, "output->type = 0x%x \n", psb_intel_output->type);
+-
+-		switch (psb_intel_output->type) {
+-		case INTEL_OUTPUT_LVDS:
+-			is_lvds = true;
+-			break;
+-		case INTEL_OUTPUT_TVOUT:
+-			is_tv = true;
+-			break;
+-		case INTEL_OUTPUT_ANALOG:
+-			is_crt = true;
+-			break;
+-		case INTEL_OUTPUT_MIPI:
+-			is_mipi = true;
+-			break;
+-		case INTEL_OUTPUT_MIPI2:
+-			is_mipi2 = true;
+-			break;
+-		case INTEL_OUTPUT_HDMI:
+-			is_hdmi = true;
+-			break;
+-		}
+-	}
+-
+-	/* Disable the VGA plane that we never use */
+-	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+-
+-	/* Disable the panel fitter if it was on our pipe */
+-	if (mdfld_panel_fitter_pipe(dev) == pipe)
+-		REG_WRITE(PFIT_CONTROL, 0);
+-
+-	/* pipesrc and dspsize control the size that is scaled from,
+-	 * which should always be the user's requested size.
+-	 */
+-	if (pipe == 1) {
+-		/* FIXME: To make HDMI display with 864x480 (TPO), 480x864 (PYR) or 480x854 (TMD), set the sprite
+-		 * width/height and souce image size registers with the adjusted mode for pipe B. */
+-
+-		/* The defined sprite rectangle must always be completely contained within the displayable
+-		 * area of the screen image (frame buffer). */
+-		REG_WRITE(dspsize_reg, ((MIN(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
+-				| (MIN(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
+-		/* Set the CRTC with encoder mode. */
+-		REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
+-				 | (mode->crtc_vdisplay - 1));
+-	} else {
+-		REG_WRITE(dspsize_reg, ((mode->crtc_vdisplay - 1) << 16) | (mode->crtc_hdisplay - 1));
+-		REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
+-	}
+-
+-	REG_WRITE(dsppos_reg, 0);
+-
+-	if (psb_intel_output)
+-		drm_connector_property_get_value(&psb_intel_output->base,
+-			dev->mode_config.scaling_mode_property, &scalingType);
+-
+-	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+-		/*
+-		 *	Medfield doesn't have register support for centering so
+-		 *	we need to mess with the h/vblank and h/vsync start and
+-		 *	ends to get central
+-		 */
+-		int offsetX = 0, offsetY = 0;
+-
+-		offsetX = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+-		offsetY = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
+-
+-		REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+-			((adjusted_mode->crtc_htotal - 1) << 16));
+-		REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+-			((adjusted_mode->crtc_vtotal - 1) << 16));
+-		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - offsetX - 1) |
+-			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+-		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - offsetX - 1) |
+-			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+-		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - offsetY - 1) |
+-			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+-		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - offsetY - 1) |
+-			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+-	} else {
+-		REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+-			((adjusted_mode->crtc_htotal - 1) << 16));
+-		REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+-			((adjusted_mode->crtc_vtotal - 1) << 16));
+-		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+-			((adjusted_mode->crtc_hblank_end - 1) << 16));
+-		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+-			((adjusted_mode->crtc_hsync_end - 1) << 16));
+-		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+-			((adjusted_mode->crtc_vblank_end - 1) << 16));
+-		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+-			((adjusted_mode->crtc_vsync_end - 1) << 16));
+-	}
+-
+-	/* Flush the plane changes */
+-	{
+-		struct drm_crtc_helper_funcs *crtc_funcs =
+-		    crtc->helper_private;
+-		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+-	}
+-
+-	/* setup pipeconf */
+-	*pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
+-
+-	/* Set up the display plane register */
+- 	*dspcntr = REG_READ(dspcntr_reg);
+-	*dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
+-	*dspcntr |= DISPLAY_PLANE_ENABLE;
+-/* MDFLD_PO_JLIU7	dspcntr |= DISPPLANE_BOTTOM; */
+-/* MDFLD_PO_JLIU7	dspcntr |= DISPPLANE_GAMMA_ENABLE; */
+-
+-	if (is_mipi2)
+-	{
+-		goto mrst_crtc_mode_set_exit;
+-	}
+-/* FIXME JLIU7 Add MDFLD HDMI supports */
+-/* FIXME_MDFLD JLIU7 DSIPLL clock *= 8? */
+-/* FIXME_MDFLD JLIU7 need to revist for dual MIPI supports */
+-	clk = adjusted_mode->clock;
+-
+-	if (is_hdmi) {
+-		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
+-		{
+-			refclk = 19200;
+-
+-			if (is_mipi || is_mipi2)
+-			{
+-				clk_n = 1, clk_p2 = 8;
+-			} else if (is_hdmi) {
+-				clk_n = 1, clk_p2 = 10;
+-			}
+-		} else if (ksel == KSEL_BYPASS_25) { 
+-			refclk = 25000;
+-
+-			if (is_mipi || is_mipi2)
+-			{
+-				clk_n = 1, clk_p2 = 8;
+-			} else if (is_hdmi) {
+-				clk_n = 1, clk_p2 = 10;
+-			}
+-		} else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166)) {
+-			refclk = 83000;
+-
+-			if (is_mipi || is_mipi2)
+-			{
+-				clk_n = 4, clk_p2 = 8;
+-			} else if (is_hdmi) {
+-				clk_n = 4, clk_p2 = 10;
+-			}
+-		} else if ((ksel == KSEL_BYPASS_83_100) &&
+-			   (dev_priv->core_freq == 100 || dev_priv->core_freq == 200)) {
+-			refclk = 100000;
+-			if (is_mipi || is_mipi2)
+-			{
+-				clk_n = 4, clk_p2 = 8;
+-			} else if (is_hdmi) {
+-				clk_n = 4, clk_p2 = 10;
+-			}
+-		}
+-
+-		if (is_mipi)
+-			clk_byte = dev_priv->bpp / 8;
+-		else if (is_mipi2)
+-			clk_byte = dev_priv->bpp2 / 8;
+-	
+-		clk_tmp = clk * clk_n * clk_p2 * clk_byte;
+-
+-		dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d. \n", clk, clk_n, clk_p2);
+-		dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d. \n", adjusted_mode->clock, clk_tmp);
+-
+-		ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
+-
+-		if (!ok) {
+-			dev_err(dev->dev, 
+-			   "mdfldFindBestPLL fail in mdfld_crtc_mode_set. \n");
+-		} else {
+-			m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
+-
+-			dev_dbg(dev->dev, "dot clock = %d,"
+-				 "m = %d, p1 = %d, m_conv = %d. \n", clock.dot, clock.m,
+-				 clock.p1, m_conv);
+-		}
+-
+-		dpll = REG_READ(dpll_reg);
+-
+-		if (dpll & DPLL_VCO_ENABLE) {
+-			dpll &= ~DPLL_VCO_ENABLE;
+-			REG_WRITE(dpll_reg, dpll);
+-			REG_READ(dpll_reg);
+-
+-			/* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
+-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+-			udelay(500);
+-
+-			/* reset M1, N1 & P1 */
+-			REG_WRITE(fp_reg, 0);
+-			dpll &= ~MDFLD_P1_MASK;
+-			REG_WRITE(dpll_reg, dpll);
+-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+-			udelay(500);
+-		}
+-
+-		/* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
+-		if (dpll & MDFLD_PWR_GATE_EN) {
+-			dpll &= ~MDFLD_PWR_GATE_EN;
+-			REG_WRITE(dpll_reg, dpll);
+-			/* FIXME_MDFLD PO - change 500 to 1 after PO */
+-			udelay(500);
+-		}	
+-
+-		dpll = 0; 
+-
+-#if 0 /* FIXME revisit later */
+-		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19) || (ksel == KSEL_BYPASS_25)) {
+-			dpll &= ~MDFLD_INPUT_REF_SEL;	
+-		} else if (ksel == KSEL_BYPASS_83_100) { 
+-			dpll |= MDFLD_INPUT_REF_SEL;	
+-		}
+-#endif /* FIXME revisit later */
+-
+-		if (is_hdmi)
+-			dpll |= MDFLD_VCO_SEL;	
+-
+-		fp = (clk_n / 2) << 16;
+-		fp |= m_conv; 
+-
+-		/* compute bitmask from p1 value */
+-		dpll |= (1 << (clock.p1 - 2)) << 17;
+-
+-#if 0 /* 1080p30 & 720p */
+-        	dpll = 0x00050000;
+-        	fp = 0x000001be;
+-#endif 
+-#if 0 /* 480p */
+-        	dpll = 0x02010000;
+-        	fp = 0x000000d2;
+-#endif 
+-	} else {
+-#if 0 /*DBI_TPO_480x864*/
+-		dpll = 0x00020000;
+-		fp = 0x00000156; 
+-#endif /* DBI_TPO_480x864 */ /* get from spec. */
+-
+-        	dpll = 0x00800000;
+-	        fp = 0x000000c1;
+-}
+-
+-	REG_WRITE(fp_reg, fp);
+-	REG_WRITE(dpll_reg, dpll);
+-	/* FIXME_MDFLD PO - change 500 to 1 after PO */
+-	udelay(500);
+-
+-	dpll |= DPLL_VCO_ENABLE;
+-	REG_WRITE(dpll_reg, dpll);
+-	REG_READ(dpll_reg);
+-
+-	/* wait for DSI PLL to lock */
+-	while ((timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
+-		udelay(150);
+-		timeout ++;
+-	}
+-
+-	if (is_mipi)
+-		goto mrst_crtc_mode_set_exit;
+-
+-	dev_dbg(dev->dev, "is_mipi = 0x%x \n", is_mipi);
+-
+-	REG_WRITE(pipeconf_reg, *pipeconf);
+-	REG_READ(pipeconf_reg);
+-
+-	/* Wait for for the pipe enable to take effect. */
+-//FIXME_JLIU7 HDMI	mrstWaitForPipeEnable(dev);
+-
+-	REG_WRITE(dspcntr_reg, *dspcntr);
+-	psb_intel_wait_for_vblank(dev);
+-
+-mrst_crtc_mode_set_exit:
+-
+-	gma_power_end(dev);
+-
+-	return 0;
+-}
+-
+-static void mdfld_crtc_prepare(struct drm_crtc *crtc)
+-{
+-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+-}
+-
+-static void mdfld_crtc_commit(struct drm_crtc *crtc)
+-{
+-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+-}
+-
+-static bool mdfld_crtc_mode_fixup(struct drm_crtc *crtc,
+-				  struct drm_display_mode *mode,
+-				  struct drm_display_mode *adjusted_mode)
+-{
+-	return true;
+-}
+-
+-const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
+-	.dpms = mdfld_crtc_dpms,
+-	.mode_fixup = mdfld_crtc_mode_fixup,
+-	.mode_set = mdfld_crtc_mode_set,
+-	.mode_set_base = mdfld__intel_pipe_set_base,
+-	.prepare = mdfld_crtc_prepare,
+-	.commit = mdfld_crtc_commit,
+-};
+diff --git a/drivers/staging/gma500/mdfld_msic.h b/drivers/staging/gma500/mdfld_msic.h
+deleted file mode 100644
+index a7ad6547..0000000
+--- a/drivers/staging/gma500/mdfld_msic.h
++++ /dev/null
+@@ -1,31 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- *	Jim Liu <jim.liu at intel.com>
+- */
+-
+-#define MSIC_PCI_DEVICE_ID	0x831
+-
+-int msic_regsiter_driver(void);
+-int msic_unregister_driver(void);
+-extern void hpd_notify_um(void);
+diff --git a/drivers/staging/gma500/mdfld_output.c b/drivers/staging/gma500/mdfld_output.c
+deleted file mode 100644
+index eabf53d..0000000
+--- a/drivers/staging/gma500/mdfld_output.c
++++ /dev/null
+@@ -1,171 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+-*/
+-
+-#include <linux/init.h>
+-#include <linux/moduleparam.h>
+-#include "mdfld_dsi_dbi.h"
+-#include "mdfld_dsi_dpi.h"
+-#include "mdfld_dsi_output.h"
+-#include "mdfld_output.h"
+-#include "mdfld_dsi_dbi_dpu.h"
+-
+-#include "displays/tpo_cmd.h"
+-#include "displays/tpo_vid.h"
+-#include "displays/tmd_cmd.h"
+-#include "displays/tmd_vid.h"
+-#include "displays/pyr_cmd.h"
+-#include "displays/pyr_vid.h"
+-/* #include "displays/hdmi.h" */
+-
+-static int mdfld_dual_mipi;
+-static int mdfld_hdmi;
+-static int mdfld_dpu;
+-
+-module_param(mdfld_dual_mipi, int, 0600);
+-MODULE_PARM_DESC(mdfld_dual_mipi, "Enable dual MIPI configuration");
+-module_param(mdfld_hdmi, int, 0600);
+-MODULE_PARM_DESC(mdfld_hdmi, "Enable Medfield HDMI");
+-module_param(mdfld_dpu, int, 0600);
+-MODULE_PARM_DESC(mdfld_dpu, "Enable Medfield DPU");
+-
+-/* For now a single type per device is all we cope with */
+-int mdfld_get_panel_type(struct drm_device *dev, int pipe)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	return dev_priv->panel_id;
+-}
+-
+-int mdfld_panel_dpi(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	switch (dev_priv->panel_id) {
+-	case TMD_VID:
+-	case TPO_VID:
+-	case PYR_VID:
+-		return true;
+-	case TMD_CMD:
+-	case TPO_CMD:
+-	case PYR_CMD:
+-	default:
+-		return false;
+-	}
+-}
+-
+-static int init_panel(struct drm_device *dev, int mipi_pipe, int p_type)
+-{
+-	struct panel_funcs *p_cmd_funcs;
+-	struct panel_funcs *p_vid_funcs;
+-
+-	/* Oh boy ... FIXME */
+-	p_cmd_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL);
+-	if (p_cmd_funcs == NULL)
+-		return -ENODEV;
+-	p_vid_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL);
+-	if (p_vid_funcs == NULL) {
+-		kfree(p_cmd_funcs);
+-		return -ENODEV;
+-	}
+-
+-	switch (p_type) {
+-	case TPO_CMD:
+-		tpo_cmd_init(dev, p_cmd_funcs);
+-		mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
+-		break;
+-	case TPO_VID:
+-		tpo_vid_init(dev, p_vid_funcs);
+-		mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
+-		break;
+-	case TMD_CMD:
+-		/*tmd_cmd_init(dev, p_cmd_funcs); */
+-		mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
+-		break;
+-	case TMD_VID:
+-		tmd_vid_init(dev, p_vid_funcs);
+-		mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
+-		break;
+-	case PYR_CMD:
+-		pyr_cmd_init(dev, p_cmd_funcs);
+-		mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
+-		break;
+-	case PYR_VID:
+-		mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
+-		break;
+-	case TPO:	/* TPO panel supports both cmd & vid interfaces */
+-		tpo_cmd_init(dev, p_cmd_funcs);
+-		tpo_vid_init(dev, p_vid_funcs);
+-		mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs,
+-				      p_vid_funcs);
+-		break;
+-	case TMD:
+-		break;
+-	case PYR:
+-		break;
+-#if 0
+-	case HDMI:
+-		dev_dbg(dev->dev, "Initializing HDMI");
+-		mdfld_hdmi_init(dev, &dev_priv->mode_dev);
+-		break;
+-#endif
+-	default:
+-		dev_err(dev->dev, "Unsupported interface %d", p_type);
+-		return -ENODEV;
+-	}
+-	return 0;
+-}
+-
+-int mdfld_output_init(struct drm_device *dev)
+-{
+-	int type;
+-
+-	/* MIPI panel 1 */
+-	type = mdfld_get_panel_type(dev, 0);
+-	dev_info(dev->dev, "panel 1: type is %d\n", type);
+-	init_panel(dev, 0, type);
+-
+-	if (mdfld_dual_mipi) {
+-		/* MIPI panel 2 */
+-		type = mdfld_get_panel_type(dev, 2);
+-		dev_info(dev->dev, "panel 2: type is %d\n", type);
+-		init_panel(dev, 2, type);
+-	}
+-	if (mdfld_hdmi)
+-		/* HDMI panel */
+-		init_panel(dev, 0, HDMI);
+-	return 0;
+-}
+-
+-void mdfld_output_setup(struct drm_device *dev)
+-{
+-	/* FIXME: this is not the right place for this stuff ! */
+-	if (IS_MFLD(dev)) {
+-		if (mdfld_dpu)
+-			mdfld_dbi_dpu_init(dev);
+-		else
+-			mdfld_dbi_dsr_init(dev);
+-	}
+-}
+diff --git a/drivers/staging/gma500/mdfld_output.h b/drivers/staging/gma500/mdfld_output.h
+deleted file mode 100644
+index daf33e7..0000000
+--- a/drivers/staging/gma500/mdfld_output.h
++++ /dev/null
+@@ -1,41 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+-*/
+-
+-#ifndef MDFLD_OUTPUT_H
+-#define MDFLD_OUTPUT_H
+-
+-int mdfld_output_init(struct drm_device *dev);
+-int mdfld_panel_dpi(struct drm_device *dev);
+-int mdfld_get_panel_type(struct drm_device *dev, int pipe);
+-void mdfld_disable_crtc (struct drm_device *dev, int pipe);
+-
+-extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
+-extern const struct drm_crtc_funcs mdfld_intel_crtc_funcs;
+-
+-extern void mdfld_output_setup(struct drm_device *dev);
+-
+-#endif
+diff --git a/drivers/staging/gma500/mdfld_pyr_cmd.c b/drivers/staging/gma500/mdfld_pyr_cmd.c
+deleted file mode 100644
+index 523f2d8..0000000
+--- a/drivers/staging/gma500/mdfld_pyr_cmd.c
++++ /dev/null
+@@ -1,558 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+-*/
+-
+-#include "mdfld_dsi_dbi.h"
+-#include "mdfld_dsi_dpi.h"
+-#include "mdfld_dsi_output.h"
+-#include "mdfld_output.h"
+-#include "mdfld_dsi_dbi_dpu.h"
+-#include "mdfld_dsi_pkg_sender.h"
+-
+-#include "displays/pyr_cmd.h"
+-
+-static struct drm_display_mode *pyr_cmd_get_config_mode(struct drm_device *dev)
+-{
+-	struct drm_display_mode *mode;
+-
+-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+-	if (!mode) {
+-		dev_err(dev->dev, "Out of memory\n");
+-		return NULL;
+-	}
+-
+-	dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+-	dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+-	dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+-	dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+-	dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+-	dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+-	dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+-	dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+-	dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+-
+-	mode->hdisplay = 480;
+-	mode->vdisplay = 864;
+-	mode->hsync_start = 487;
+-	mode->hsync_end = 490;
+-	mode->htotal = 499;
+-	mode->vsync_start = 874;
+-	mode->vsync_end = 878;
+-	mode->vtotal = 886;
+-	mode->clock = 25777;
+-
+-	drm_mode_set_name(mode);
+-	drm_mode_set_crtcinfo(mode, 0);
+-
+-	mode->type |= DRM_MODE_TYPE_PREFERRED;
+-
+-	return mode;
+-}
+-
+-static bool pyr_dsi_dbi_mode_fixup(struct drm_encoder *encoder,
+-				struct drm_display_mode *mode,
+-				struct drm_display_mode *adjusted_mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_display_mode *fixed_mode = pyr_cmd_get_config_mode(dev);
+-
+-	if (fixed_mode) {
+-		adjusted_mode->hdisplay = fixed_mode->hdisplay;
+-		adjusted_mode->hsync_start = fixed_mode->hsync_start;
+-		adjusted_mode->hsync_end = fixed_mode->hsync_end;
+-		adjusted_mode->htotal = fixed_mode->htotal;
+-		adjusted_mode->vdisplay = fixed_mode->vdisplay;
+-		adjusted_mode->vsync_start = fixed_mode->vsync_start;
+-		adjusted_mode->vsync_end = fixed_mode->vsync_end;
+-		adjusted_mode->vtotal = fixed_mode->vtotal;
+-		adjusted_mode->clock = fixed_mode->clock;
+-		drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+-		kfree(fixed_mode);
+-	}
+-	return true;
+-}
+-
+-static void pyr_dsi_dbi_set_power(struct drm_encoder *encoder, bool on)
+-{
+-	int ret = 0;
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dbi_output *dbi_output =
+-				MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 reg_offset = 0;
+-	int pipe = (dbi_output->channel_num == 0) ? 0 : 2;
+-
+-	dev_dbg(dev->dev, "pipe %d : %s, panel on: %s\n", pipe,
+-			on ? "On" : "Off",
+-			dbi_output->dbi_panel_on ? "True" : "False");
+-
+-	if (pipe == 2) {
+-		if (on)
+-			dev_priv->dual_mipi = true;
+-		else
+-			dev_priv->dual_mipi = false;
+-
+-		reg_offset = MIPIC_REG_OFFSET;
+-	} else {
+-		if (!on)
+-			dev_priv->dual_mipi = false;
+-	}
+-
+-	if (!gma_power_begin(dev, true)) {
+-		dev_err(dev->dev, "hw begin failed\n");
+-		return;
+-	}
+-
+-
+-	if (on) {
+-		if (dbi_output->dbi_panel_on)
+-			goto out_err;
+-
+-		ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_ON);
+-		if (ret) {
+-			dev_err(dev->dev, "power on error\n");
+-			goto out_err;
+-		}
+-
+-		dbi_output->dbi_panel_on = true;
+-
+-		if (pipe == 2) {
+-			dev_priv->dbi_panel_on2 = true;
+-		} else {
+-			dev_priv->dbi_panel_on = true;
+-			mdfld_enable_te(dev, 0);
+-		}
+-	} else {
+-		if (!dbi_output->dbi_panel_on && !dbi_output->first_boot)
+-			goto out_err;
+-
+-		dbi_output->dbi_panel_on = false;
+-		dbi_output->first_boot = false;
+-
+-		if (pipe == 2) {
+-			dev_priv->dbi_panel_on2 = false;
+-			mdfld_disable_te(dev, 2);
+-		} else {
+-			dev_priv->dbi_panel_on = false;
+-			mdfld_disable_te(dev, 0);
+-
+-			if (dev_priv->dbi_panel_on2)
+-				mdfld_enable_te(dev, 2);
+-		}
+-
+-		ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_OFF);
+-		if (ret) {
+-			dev_err(dev->dev, "power on error\n");
+-			goto out_err;
+-		}
+-	}
+-
+-out_err:
+-	gma_power_end(dev);
+-
+-	if (ret)
+-		dev_err(dev->dev, "failed\n");
+-}
+-
+-static void pyr_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
+-								int pipe)
+-{
+-	struct drm_device *dev = dsi_config->dev;
+-	u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
+-	int lane_count = dsi_config->lane_count;
+-	u32 val = 0;
+-
+-	dev_dbg(dev->dev, "Init DBI interface on pipe %d...\n", pipe);
+-
+-	/* Un-ready device */
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
+-
+-	/* Init dsi adapter before kicking off */
+-	REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
+-
+-	/* TODO: figure out how to setup these registers */
+-	REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c600F);
+-	REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset),
+-								0x000a0014);
+-	REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
+-	REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
+-
+-	/* Enable all interrupts */
+-	REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
+-	/* Max value: 20 clock cycles of txclkesc */
+-	REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
+-	/* Min 21 txclkesc, max: ffffh */
+-	REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
+-	/* Min: 7d0 max: 4e20 */
+-	REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
+-
+-	/* Set up func_prg */
+-	val |= lane_count;
+-	val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
+-	val |= DSI_DBI_COLOR_FORMAT_OPTION2;
+-	REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
+-
+-	REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
+-	REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
+-
+-	/* De-assert dbi_stall when half of DBI FIFO is empty */
+-	/* REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000000); */
+-
+-	REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
+-	REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000002);
+-	REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
+-	REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
+-}
+-
+-static void pyr_dsi_dbi_mode_set(struct drm_encoder *encoder,
+-				struct drm_display_mode *mode,
+-				struct drm_display_mode *adjusted_mode)
+-{
+-	int ret = 0;
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dbi_output *dsi_output =
+-					MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+-	struct mdfld_dsi_config *dsi_config =
+-				mdfld_dsi_encoder_get_config(dsi_encoder);
+-	struct mdfld_dsi_connector *dsi_connector = dsi_config->connector;
+-	int pipe = dsi_connector->pipe;
+-	u8 param = 0;
+-
+-	/* Regs */
+-	u32 mipi_reg = MIPI;
+-	u32 dspcntr_reg = DSPACNTR;
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 reg_offset = 0;
+-
+-	/* Values */
+-	u32 dspcntr_val = dev_priv->dspcntr;
+-	u32 pipeconf_val = dev_priv->pipeconf;
+-	u32 h_active_area = mode->hdisplay;
+-	u32 v_active_area = mode->vdisplay;
+-	u32 mipi_val = (PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX |
+-							TE_TRIGGER_GPIO_PIN);
+-
+-	dev_dbg(dev->dev, "mipi_val =0x%x\n", mipi_val);
+-
+-	dev_dbg(dev->dev, "type %s\n", (pipe == 2) ? "MIPI2" : "MIPI");
+-	dev_dbg(dev->dev, "h %d v %d\n", mode->hdisplay, mode->vdisplay);
+-
+-	if (pipe == 2) {
+-		mipi_reg = MIPI_C;
+-		dspcntr_reg = DSPCCNTR;
+-		pipeconf_reg = PIPECCONF;
+-
+-		reg_offset = MIPIC_REG_OFFSET;
+-
+-		dspcntr_val = dev_priv->dspcntr2;
+-		pipeconf_val = dev_priv->pipeconf2;
+-	} else {
+-		mipi_val |= 0x2; /* Two lanes for port A and C respectively */
+-	}
+-
+-	if (!gma_power_begin(dev, true)) {
+-		dev_err(dev->dev, "hw begin failed\n");
+-		return;
+-	}
+-
+-	/* Set up pipe related registers */
+-	REG_WRITE(mipi_reg, mipi_val);
+-	REG_READ(mipi_reg);
+-
+-	pyr_dsi_controller_dbi_init(dsi_config, pipe);
+-
+-	msleep(20);
+-
+-	REG_WRITE(dspcntr_reg, dspcntr_val);
+-	REG_READ(dspcntr_reg);
+-
+-	/* 20ms delay before sending exit_sleep_mode */
+-	msleep(20);
+-
+-	/* Send exit_sleep_mode DCS */
+-	ret = mdfld_dsi_dbi_send_dcs(dsi_output, exit_sleep_mode, NULL,
+-						0, CMD_DATA_SRC_SYSTEM_MEM);
+-	if (ret) {
+-		dev_err(dev->dev, "sent exit_sleep_mode faild\n");
+-		goto out_err;
+-	}
+-
+-	/*send set_tear_on DCS*/
+-	ret = mdfld_dsi_dbi_send_dcs(dsi_output, set_tear_on,
+-					&param, 1, CMD_DATA_SRC_SYSTEM_MEM);
+-	if (ret) {
+-		dev_err(dev->dev, "%s - sent set_tear_on faild\n", __func__);
+-		goto out_err;
+-	}
+-
+-	/* Do some init stuff */
+-	mdfld_dsi_brightness_init(dsi_config, pipe);
+-	mdfld_dsi_gen_fifo_ready(dev, (MIPIA_GEN_FIFO_STAT_REG + reg_offset),
+-				HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+-
+-	REG_WRITE(pipeconf_reg, pipeconf_val | PIPEACONF_DSR);
+-	REG_READ(pipeconf_reg);
+-
+-	/* TODO: this looks ugly, try to move it to CRTC mode setting */
+-	if (pipe == 2)
+-		dev_priv->pipeconf2 |= PIPEACONF_DSR;
+-	else
+-		dev_priv->pipeconf |= PIPEACONF_DSR;
+-
+-	dev_dbg(dev->dev, "pipeconf %x\n",  REG_READ(pipeconf_reg));
+-
+-	ret = mdfld_dsi_dbi_update_area(dsi_output, 0, 0,
+-				h_active_area - 1, v_active_area - 1);
+-	if (ret) {
+-		dev_err(dev->dev, "update area failed\n");
+-		goto out_err;
+-	}
+-
+-out_err:
+-	gma_power_end(dev);
+-
+-	if (ret)
+-		dev_err(dev->dev, "mode set failed\n");
+-	else
+-		dev_dbg(dev->dev, "mode set done successfully\n");
+-}
+-
+-static void pyr_dsi_dbi_prepare(struct drm_encoder *encoder)
+-{
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dbi_output *dbi_output =
+-					MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+-
+-	dbi_output->mode_flags |= MODE_SETTING_IN_ENCODER;
+-	dbi_output->mode_flags &= ~MODE_SETTING_ENCODER_DONE;
+-
+-	pyr_dsi_dbi_set_power(encoder, false);
+-}
+-
+-static void pyr_dsi_dbi_commit(struct drm_encoder *encoder)
+-{
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dbi_output *dbi_output =
+-					MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+-	struct drm_device *dev = dbi_output->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct psb_drm_dpu_rect rect;
+-
+-	pyr_dsi_dbi_set_power(encoder, true);
+-
+-	dbi_output->mode_flags &= ~MODE_SETTING_IN_ENCODER;
+-
+-	rect.x = rect.y = 0;
+-	rect.width = 864;
+-	rect.height = 480;
+-
+-	if (dbi_output->channel_num == 1) {
+-		dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_2;
+-		/* If DPU enabled report a fullscreen damage */
+-		mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, &rect);
+-	} else {
+-		dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_0;
+-		mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, &rect);
+-	}
+-	dbi_output->mode_flags |= MODE_SETTING_ENCODER_DONE;
+-}
+-
+-static void pyr_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
+-{
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dbi_output *dbi_output =
+-					MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+-	struct drm_device *dev = dbi_output->dev;
+-
+-	dev_dbg(dev->dev, "%s\n",  (mode == DRM_MODE_DPMS_ON ? "on" : "off"));
+-
+-	if (mode == DRM_MODE_DPMS_ON)
+-		pyr_dsi_dbi_set_power(encoder, true);
+-	else
+-		pyr_dsi_dbi_set_power(encoder, false);
+-}
+-
+-/*
+- * Update the DBI MIPI Panel Frame Buffer.
+- */
+-static void pyr_dsi_dbi_update_fb(struct mdfld_dsi_dbi_output *dbi_output,
+-								int pipe)
+-{
+-	struct mdfld_dsi_pkg_sender *sender =
+-		mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+-	struct drm_device *dev = dbi_output->dev;
+-	struct drm_crtc *crtc = dbi_output->base.base.crtc;
+-	struct psb_intel_crtc *psb_crtc = (crtc) ?
+-				to_psb_intel_crtc(crtc) : NULL;
+-
+-	u32 dpll_reg = MRST_DPLL_A;
+-	u32 dspcntr_reg = DSPACNTR;
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 dsplinoff_reg = DSPALINOFF;
+-	u32 dspsurf_reg = DSPASURF;
+-	u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
+-	u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
+-	u32 reg_offset = 0;
+-
+-	u32 intr_status;
+-	u32 fifo_stat_reg_val;
+-	u32 dpll_reg_val;
+-	u32 dspcntr_reg_val;
+-	u32 pipeconf_reg_val;
+-
+-	/* If mode setting on-going, back off */
+-	if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+-		(psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING) ||
+-		!(dbi_output->mode_flags & MODE_SETTING_ENCODER_DONE))
+-		return;
+-
+-	/*
+-	 * Look for errors here.  In particular we're checking for whatever
+-	 * error status might have appeared during the last frame transmit
+-	 * (memory write).
+-	 *
+-	 * Normally, the bits we're testing here would be set infrequently,
+-	 * if at all.  However, one panel (at least) returns at least one
+-	 * error bit on most frames.  So we've disabled the kernel message
+-	 * for now.
+-	 *
+-	 * Still clear whatever error bits are set, except don't clear the
+-	 * ones that would make the Penwell DSI controller reset if we
+-	 * cleared them.
+-	 */
+-	intr_status = REG_READ(INTR_STAT_REG);
+-	if ((intr_status & 0x26FFFFFF) != 0) {
+-		/* dev_err(dev->dev, "DSI status: 0x%08X\n", intr_status); */
+-		intr_status &= 0x26F3FFFF;
+-		REG_WRITE(INTR_STAT_REG, intr_status);
+-	}
+-
+-	if (pipe == 2) {
+-		dspcntr_reg = DSPCCNTR;
+-		pipeconf_reg = PIPECCONF;
+-		dsplinoff_reg = DSPCLINOFF;
+-		dspsurf_reg = DSPCSURF;
+-
+-		hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
+-		gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET,
+-
+-		reg_offset = MIPIC_REG_OFFSET;
+-	}
+-
+-	if (!gma_power_begin(dev, true)) {
+-		dev_err(dev->dev, "hw begin failed\n");
+-		return;
+-	}
+-
+-	fifo_stat_reg_val = REG_READ(MIPIA_GEN_FIFO_STAT_REG + reg_offset);
+-	dpll_reg_val = REG_READ(dpll_reg);
+-	dspcntr_reg_val = REG_READ(dspcntr_reg);
+-	pipeconf_reg_val = REG_READ(pipeconf_reg);
+-
+-	if (!(fifo_stat_reg_val & (1 << 27)) ||
+-		(dpll_reg_val & DPLL_VCO_ENABLE) ||
+-		!(dspcntr_reg_val & DISPLAY_PLANE_ENABLE) ||
+-		!(pipeconf_reg_val & DISPLAY_PLANE_ENABLE)) {
+-		goto update_fb_out0;
+-	}
+-
+-	/* Refresh plane changes */
+-	REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
+-	REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
+-	REG_READ(dspsurf_reg);
+-
+-	mdfld_dsi_send_dcs(sender,
+-			   write_mem_start,
+-			   NULL,
+-			   0,
+-			   CMD_DATA_SRC_PIPE,
+-			   MDFLD_DSI_SEND_PACKAGE);
+-
+-	/*
+-	 * The idea here is to transmit a Generic Read command after the
+-	 * Write Memory Start/Continue commands finish.  This asks for
+-	 * the panel to return an "ACK No Errors," or (if it has errors
+-	 * to report) an Error Report.  This allows us to monitor the
+-	 * panel's perception of the health of the DSI.
+-	 */
+-	mdfld_dsi_gen_fifo_ready(dev, gen_fifo_stat_reg,
+-				HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
+-	REG_WRITE(hs_gen_ctrl_reg, (1 << WORD_COUNTS_POS) | GEN_READ_0);
+-
+-	dbi_output->dsr_fb_update_done = true;
+-update_fb_out0:
+-	gma_power_end(dev);
+-}
+-
+-/*
+- * TODO: will be removed later, should work out display interfaces for power
+- */
+-void pyr_dsi_adapter_init(struct mdfld_dsi_config *dsi_config, int pipe)
+-{
+-	if (!dsi_config || (pipe != 0 && pipe != 2)) {
+-		WARN_ON(1);
+-		return;
+-	}
+-	pyr_dsi_controller_dbi_init(dsi_config, pipe);
+-}
+-
+-static int pyr_cmd_get_panel_info(struct drm_device *dev, int pipe,
+-							struct panel_info *pi)
+-{
+-	if (!dev || !pi)
+-		return -EINVAL;
+-
+-	pi->width_mm = PYR_PANEL_WIDTH;
+-	pi->height_mm = PYR_PANEL_HEIGHT;
+-
+-	return 0;
+-}
+-
+-/* PYR DBI encoder helper funcs */
+-static const struct drm_encoder_helper_funcs pyr_dsi_dbi_helper_funcs = {
+-	.dpms = pyr_dsi_dbi_dpms,
+-	.mode_fixup = pyr_dsi_dbi_mode_fixup,
+-	.prepare = pyr_dsi_dbi_prepare,
+-	.mode_set = pyr_dsi_dbi_mode_set,
+-	.commit = pyr_dsi_dbi_commit,
+-};
+-
+-/* PYR DBI encoder funcs */
+-static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
+-	.destroy = drm_encoder_cleanup,
+-};
+-
+-void pyr_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+-{
+-	p_funcs->encoder_funcs = &mdfld_dsi_dbi_encoder_funcs;
+-	p_funcs->encoder_helper_funcs = &pyr_dsi_dbi_helper_funcs;
+-	p_funcs->get_config_mode = &pyr_cmd_get_config_mode;
+-	p_funcs->update_fb = pyr_dsi_dbi_update_fb;
+-	p_funcs->get_panel_info = pyr_cmd_get_panel_info;
+-}
+diff --git a/drivers/staging/gma500/mdfld_tmd_vid.c b/drivers/staging/gma500/mdfld_tmd_vid.c
+deleted file mode 100644
+index affdc09..0000000
+--- a/drivers/staging/gma500/mdfld_tmd_vid.c
++++ /dev/null
+@@ -1,206 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Jim Liu <jim.liu at intel.com>
+- * Jackie Li<yaodong.li at intel.com>
+- * Gideon Eaton <eaton.
+- * Scott Rowe <scott.m.rowe at intel.com>
+- */
+-
+-#include "mdfld_dsi_dbi.h"
+-#include "mdfld_dsi_dpi.h"
+-#include "mdfld_dsi_output.h"
+-#include "mdfld_output.h"
+-
+-#include "mdfld_dsi_pkg_sender.h"
+-
+-#include "displays/tmd_vid.h"
+-
+-/* FIXME: static ? */
+-struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
+-{
+-	struct drm_display_mode *mode;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+-	bool use_gct = false; /*Disable GCT for now*/
+-
+-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+-	if (!mode) {
+-		dev_err(dev->dev, "Out of memory\n");
+-		return NULL;
+-	}
+-
+-	if (use_gct) {
+-		dev_dbg(dev->dev, "gct find MIPI panel.\n");
+-
+-		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+-		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+-		mode->hsync_start = mode->hdisplay +
+-				((ti->hsync_offset_hi << 8) |
+-				ti->hsync_offset_lo);
+-		mode->hsync_end = mode->hsync_start +
+-				((ti->hsync_pulse_width_hi << 8) |
+-				ti->hsync_pulse_width_lo);
+-		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) |
+-								ti->hblank_lo);
+-		mode->vsync_start = \
+-			mode->vdisplay + ((ti->vsync_offset_hi << 8) |
+-						ti->vsync_offset_lo);
+-		mode->vsync_end = \
+-			mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+-						ti->vsync_pulse_width_lo);
+-		mode->vtotal = mode->vdisplay +
+-				((ti->vblank_hi << 8) | ti->vblank_lo);
+-		mode->clock = ti->pixel_clock * 10;
+-
+-		dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+-		dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+-		dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+-		dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+-		dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+-		dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+-		dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+-		dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+-		dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+-	} else {
+-		mode->hdisplay = 480;
+-		mode->vdisplay = 854;
+-		mode->hsync_start = 487;
+-		mode->hsync_end = 490;
+-		mode->htotal = 499;
+-		mode->vsync_start = 861;
+-		mode->vsync_end = 865;
+-		mode->vtotal = 873;
+-		mode->clock = 33264;
+-	}
+-	drm_mode_set_name(mode);
+-	drm_mode_set_crtcinfo(mode, 0);
+-
+-	mode->type |= DRM_MODE_TYPE_PREFERRED;
+-
+-	return mode;
+-}
+-
+-static int tmd_vid_get_panel_info(struct drm_device *dev,
+-				int pipe,
+-				struct panel_info *pi)
+-{
+-	if (!dev || !pi)
+-		return -EINVAL;
+-
+-	pi->width_mm = TMD_PANEL_WIDTH;
+-	pi->height_mm = TMD_PANEL_HEIGHT;
+-
+-	return 0;
+-}
+-
+-/*
+- *	mdfld_init_TMD_MIPI	-	initialise a TMD interface
+- *	@dsi_config: configuration
+- *	@pipe: pipe to configure
+- *
+- *	This function is called only by mrst_dsi_mode_set and
+- *	restore_display_registers.  since this function does not
+- *	acquire the mutex, it is important that the calling function
+- *	does!
+- */
+-
+-
+-static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
+-				      int pipe)
+-{
+-	static u32 tmd_cmd_mcap_off[] = {0x000000b2};
+-	static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
+-	static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
+-	static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
+-	static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
+-	static u32 tmd_cmd_set_mode[] = {0x000000b3};
+-	static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
+-	static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
+-	static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
+-	static u32 tmd_cmd_set_video_mode[] = {0x00000153};
+-	/*no auto_bl,need add in furture*/
+-	static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
+-	static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
+-
+-	struct mdfld_dsi_pkg_sender *sender
+-			= mdfld_dsi_get_pkg_sender(dsi_config);
+-
+-	DRM_INFO("Enter mdfld init TMD MIPI display.\n");
+-
+-	if (!sender) {
+-		DRM_ERROR("Cannot get sender\n");
+-		return;
+-	}
+-
+-	if (dsi_config->dvr_ic_inited)
+-		return;
+-
+-	msleep(3);
+-
+-	mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_mcap_off, 1, 0);
+-	mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_enable_lane_switch, 1, 0);
+-	mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_lane_num, 1, 0);
+-	mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_pushing_clock0, 1, 0);
+-	mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_pushing_clock1, 1, 0);
+-	mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_mode, 1, 0);
+-	mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_sync_pulse_mode, 1, 0);
+-	mdfld_dsi_send_mcs_long_lp(sender, tmd_cmd_set_column, 2, 0);
+-	mdfld_dsi_send_mcs_long_lp(sender, tmd_cmd_set_page, 2, 0);
+-	mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_video_mode, 1, 0);
+-	mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_enable_backlight, 1, 0);
+-	mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_backlight_dimming, 1, 0);
+-
+-	dsi_config->dvr_ic_inited = 1;
+-}
+-
+-/* TMD DPI encoder helper funcs */
+-static const struct drm_encoder_helper_funcs
+-					mdfld_tpo_dpi_encoder_helper_funcs = {
+-	.dpms = mdfld_dsi_dpi_dpms,
+-	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
+-	.prepare = mdfld_dsi_dpi_prepare,
+-	.mode_set = mdfld_dsi_dpi_mode_set,
+-	.commit = mdfld_dsi_dpi_commit,
+-};
+-
+-/* TMD DPI encoder funcs */
+-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
+-	.destroy = drm_encoder_cleanup,
+-};
+-
+-void tmd_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+-{
+-	if (!dev || !p_funcs) {
+-		dev_err(dev->dev, "Invalid parameters\n");
+-		return;
+-	}
+-
+-	p_funcs->encoder_funcs = &mdfld_tpo_dpi_encoder_funcs;
+-	p_funcs->encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs;
+-	p_funcs->get_config_mode = &tmd_vid_get_config_mode;
+-	p_funcs->update_fb = NULL;
+-	p_funcs->get_panel_info = tmd_vid_get_panel_info;
+-	p_funcs->reset = mdfld_dsi_panel_reset;
+-	p_funcs->drv_ic_init = mdfld_dsi_tmd_drv_ic_init;
+-}
+diff --git a/drivers/staging/gma500/mdfld_tpo_cmd.c b/drivers/staging/gma500/mdfld_tpo_cmd.c
+deleted file mode 100644
+index c7f7c9c..0000000
+--- a/drivers/staging/gma500/mdfld_tpo_cmd.c
++++ /dev/null
+@@ -1,509 +0,0 @@
+-/*
+- * Copyright (c)  2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * Thomas Eaton <thomas.g.eaton at intel.com>
+- * Scott Rowe <scott.m.rowe at intel.com>
+- */
+-
+-#include "mdfld_dsi_dbi.h"
+-#include "mdfld_dsi_dpi.h"
+-#include "mdfld_dsi_output.h"
+-#include "mdfld_output.h"
+-#include "mdfld_dsi_dbi_dpu.h"
+-#include "mdfld_dsi_pkg_sender.h"
+-
+-#include "displays/tpo_cmd.h"
+-
+-static struct drm_display_mode *tpo_cmd_get_config_mode(struct drm_device *dev)
+-{
+-	struct drm_display_mode *mode;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+-	bool use_gct = false;
+-
+-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+-	if (!mode)
+-		return NULL;
+-
+-	if (use_gct) {
+-		dev_dbg(dev->dev, "gct find MIPI panel.\n");
+-
+-		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+-		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+-		mode->hsync_start = mode->hdisplay + \
+-				((ti->hsync_offset_hi << 8) | \
+-				ti->hsync_offset_lo);
+-		mode->hsync_end = mode->hsync_start + \
+-				((ti->hsync_pulse_width_hi << 8) | \
+-				ti->hsync_pulse_width_lo);
+-		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+-								ti->hblank_lo);
+-		mode->vsync_start = \
+-			mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
+-						ti->vsync_offset_lo);
+-		mode->vsync_end = \
+-			mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+-						ti->vsync_pulse_width_lo);
+-		mode->vtotal = mode->vdisplay + \
+-				((ti->vblank_hi << 8) | ti->vblank_lo);
+-		mode->clock = ti->pixel_clock * 10;
+-
+-		dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+-		dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+-		dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+-		dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+-		dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+-		dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+-		dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+-		dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+-		dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+-	} else {
+-		mode->hdisplay = 864;
+-		mode->vdisplay = 480;
+-		mode->hsync_start = 872;
+-		mode->hsync_end = 876;
+-		mode->htotal = 884;
+-		mode->vsync_start = 482;
+-		mode->vsync_end = 494;
+-		mode->vtotal = 486;
+-		mode->clock = 25777;
+-	}
+-
+-	drm_mode_set_name(mode);
+-	drm_mode_set_crtcinfo(mode, 0);
+-
+-	mode->type |= DRM_MODE_TYPE_PREFERRED;
+-
+-	return mode;
+-}
+-
+-static bool mdfld_dsi_dbi_mode_fixup(struct drm_encoder *encoder,
+-				     struct drm_display_mode *mode,
+-				     struct drm_display_mode *adjusted_mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_display_mode *fixed_mode = tpo_cmd_get_config_mode(dev);
+-
+-	if (fixed_mode) {
+-		adjusted_mode->hdisplay = fixed_mode->hdisplay;
+-		adjusted_mode->hsync_start = fixed_mode->hsync_start;
+-		adjusted_mode->hsync_end = fixed_mode->hsync_end;
+-		adjusted_mode->htotal = fixed_mode->htotal;
+-		adjusted_mode->vdisplay = fixed_mode->vdisplay;
+-		adjusted_mode->vsync_start = fixed_mode->vsync_start;
+-		adjusted_mode->vsync_end = fixed_mode->vsync_end;
+-		adjusted_mode->vtotal = fixed_mode->vtotal;
+-		adjusted_mode->clock = fixed_mode->clock;
+-		drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+-		kfree(fixed_mode);
+-	}
+-	return true;
+-}
+-
+-static void mdfld_dsi_dbi_set_power(struct drm_encoder *encoder, bool on)
+-{
+-	int ret = 0;
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dbi_output *dbi_output =
+-				MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+-	struct mdfld_dsi_config *dsi_config =
+-		mdfld_dsi_encoder_get_config(dsi_encoder);
+-	struct mdfld_dsi_pkg_sender *sender =
+-		mdfld_dsi_encoder_get_pkg_sender(dsi_encoder);
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 reg_offset = 0;
+-	int pipe = (dbi_output->channel_num == 0) ? 0 : 2;
+-	u32 data = 0;
+-
+-	dev_dbg(dev->dev, "pipe %d : %s, panel on: %s\n",
+-			pipe, on ? "On" : "Off",
+-			dbi_output->dbi_panel_on ? "True" : "False");
+-
+-	if (pipe == 2) {
+-		if (on)
+-			dev_priv->dual_mipi = true;
+-		else
+-			dev_priv->dual_mipi = false;
+-		reg_offset = MIPIC_REG_OFFSET;
+-	} else {
+-		if (!on)
+-			dev_priv->dual_mipi = false;
+-	}
+-
+-	if (!gma_power_begin(dev, true)) {
+-		dev_err(dev->dev, "hw begin failed\n");
+-		return;
+-	}
+-
+-	if (on) {
+-		if (dbi_output->dbi_panel_on)
+-			goto out_err;
+-
+-		ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_ON);
+-		if (ret) {
+-			dev_err(dev->dev, "power on error\n");
+-			goto out_err;
+-		}
+-
+-		dbi_output->dbi_panel_on = true;
+-
+-		if (pipe == 2)
+-			dev_priv->dbi_panel_on2 = true;
+-		else
+-			dev_priv->dbi_panel_on = true;
+-		mdfld_enable_te(dev, pipe);
+-	} else {
+-		if (!dbi_output->dbi_panel_on && !dbi_output->first_boot)
+-			goto out_err;
+-
+-		dbi_output->dbi_panel_on = false;
+-		dbi_output->first_boot = false;
+-
+-		if (pipe == 2)
+-			dev_priv->dbi_panel_on2 = false;
+-		else
+-			dev_priv->dbi_panel_on = false;
+-
+-		mdfld_disable_te(dev, pipe);
+-
+-		ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_OFF);
+-		if (ret) {
+-			dev_err(dev->dev, "power on error\n");
+-			goto out_err;
+-		}
+-	}
+-
+-	/*
+-	 * FIXME: this is a WA for TPO panel crash on DPMS on & off around
+-	 * 83 times. the root cause of this issue is that Booster in
+-	 * drvIC crashed. Add this WA so that we can resume the driver IC
+-	 * once we found that booster has a fault
+-	 */
+-	mdfld_dsi_get_power_mode(dsi_config,
+-				&data,
+-				MDFLD_DSI_HS_TRANSMISSION);
+-
+-	if (on && data && !(data & (1 << 7))) {
+-		/* Soft reset */
+-		mdfld_dsi_send_dcs(sender,
+-				   DCS_SOFT_RESET,
+-				   NULL,
+-				   0,
+-				   CMD_DATA_SRC_PIPE,
+-				   MDFLD_DSI_SEND_PACKAGE);
+-
+-		/* Init drvIC */
+-		if (dbi_output->p_funcs->drv_ic_init)
+-			dbi_output->p_funcs->drv_ic_init(dsi_config,
+-							 pipe);
+-	}
+- 
+-out_err:
+-	gma_power_end(dev);
+-	if (ret)
+-		dev_err(dev->dev, "failed\n");
+-}
+-
+-
+-static void mdfld_dsi_dbi_mode_set(struct drm_encoder *encoder,
+-				   struct drm_display_mode *mode,
+-				   struct drm_display_mode *adjusted_mode)
+-{
+-	int ret = 0;
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dbi_output *dsi_output =
+-					MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+-	struct mdfld_dsi_config *dsi_config =
+-				mdfld_dsi_encoder_get_config(dsi_encoder);
+-	struct mdfld_dsi_connector *dsi_connector = dsi_config->connector;
+-	int pipe = dsi_connector->pipe;
+-	u8 param = 0;
+-
+-	/* Regs */
+-	u32 mipi_reg = MIPI;
+-	u32 dspcntr_reg = DSPACNTR;
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 reg_offset = 0;
+-
+-	/* Values */
+-	u32 dspcntr_val = dev_priv->dspcntr;
+-	u32 pipeconf_val = dev_priv->pipeconf;
+-	u32 h_active_area = mode->hdisplay;
+-	u32 v_active_area = mode->vdisplay;
+-	u32 mipi_val;
+-
+-	mipi_val = (PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX |
+-						TE_TRIGGER_GPIO_PIN);
+-
+-	dev_dbg(dev->dev, "mipi_val =0x%x\n", mipi_val);
+-
+-	dev_dbg(dev->dev, "type %s\n", (pipe == 2) ? "MIPI2" : "MIPI");
+-	dev_dbg(dev->dev, "h %d v %d\n", mode->hdisplay, mode->vdisplay);
+-
+-	if (pipe == 2) {
+-		mipi_reg = MIPI_C;
+-		dspcntr_reg = DSPCCNTR;
+-		pipeconf_reg = PIPECCONF;
+-
+-		reg_offset = MIPIC_REG_OFFSET;
+-
+-		dspcntr_val = dev_priv->dspcntr2;
+-		pipeconf_val = dev_priv->pipeconf2;
+-	} else {
+-		mipi_val |= 0x2; /*two lanes for port A and C respectively*/
+-	}
+-
+-	if (!gma_power_begin(dev, true)) {
+-		dev_err(dev->dev, "hw begin failed\n");
+-		return;
+-	}
+-
+-	REG_WRITE(dspcntr_reg, dspcntr_val);
+-	REG_READ(dspcntr_reg);
+-
+-	/* 20ms delay before sending exit_sleep_mode */
+-	msleep(20);
+-
+-	/* Send exit_sleep_mode DCS */
+-	ret = mdfld_dsi_dbi_send_dcs(dsi_output, DCS_EXIT_SLEEP_MODE,
+-					NULL, 0, CMD_DATA_SRC_SYSTEM_MEM);
+-	if (ret) {
+-		dev_err(dev->dev, "sent exit_sleep_mode faild\n");
+-		goto out_err;
+-	}
+-
+-	/* Send set_tear_on DCS */
+-	ret = mdfld_dsi_dbi_send_dcs(dsi_output, DCS_SET_TEAR_ON,
+-					&param, 1, CMD_DATA_SRC_SYSTEM_MEM);
+-	if (ret) {
+-		dev_err(dev->dev, "%s - sent set_tear_on faild\n", __func__);
+-		goto out_err;
+-	}
+-
+-	/* Do some init stuff */
+-	REG_WRITE(pipeconf_reg, pipeconf_val | PIPEACONF_DSR);
+-	REG_READ(pipeconf_reg);
+-
+-	/* TODO: this looks ugly, try to move it to CRTC mode setting*/
+-	if (pipe == 2)
+-		dev_priv->pipeconf2 |= PIPEACONF_DSR;
+-	else
+-		dev_priv->pipeconf |= PIPEACONF_DSR;
+-
+-	dev_dbg(dev->dev, "pipeconf %x\n",  REG_READ(pipeconf_reg));
+-
+-	ret = mdfld_dsi_dbi_update_area(dsi_output, 0, 0,
+-				h_active_area - 1, v_active_area - 1);
+-	if (ret) {
+-		dev_err(dev->dev, "update area failed\n");
+-		goto out_err;
+-	}
+-
+-out_err:
+-	gma_power_end(dev);
+-
+-	if (ret)
+-		dev_err(dev->dev, "mode set failed\n");
+-}
+-
+-static void mdfld_dsi_dbi_prepare(struct drm_encoder *encoder)
+-{
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dbi_output *dbi_output
+-				= MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+-
+-	dbi_output->mode_flags |= MODE_SETTING_IN_ENCODER;
+-	dbi_output->mode_flags &= ~MODE_SETTING_ENCODER_DONE;
+-
+-	mdfld_dsi_dbi_set_power(encoder, false);
+-}
+-
+-static void mdfld_dsi_dbi_commit(struct drm_encoder *encoder)
+-{
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dbi_output *dbi_output =
+-					MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+-	struct drm_device *dev = dbi_output->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct psb_drm_dpu_rect rect;
+-
+-	mdfld_dsi_dbi_set_power(encoder, true);
+-	dbi_output->mode_flags &= ~MODE_SETTING_IN_ENCODER;
+-
+-	rect.x = rect.y = 0;
+-	rect.width = 864;
+-	rect.height = 480;
+-
+-	if (dbi_output->channel_num == 1) {
+-		dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_2;
+-		/*if dpu enabled report a fullscreen damage*/
+-		mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, &rect);
+-	} else {
+-		dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_0;
+-		mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, &rect);
+-	}
+-	dbi_output->mode_flags |= MODE_SETTING_ENCODER_DONE;
+-}
+-
+-static void mdfld_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
+-{
+-	struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
+-	struct mdfld_dsi_dbi_output *dbi_output
+-				= MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
+-	struct drm_device *dev = dbi_output->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	static bool bdispoff;
+-
+-	dev_dbg(dev->dev, "%s\n", (mode == DRM_MODE_DPMS_ON ? "on" : "off"));
+-
+-	if (mode == DRM_MODE_DPMS_ON) {
+-		/*
+-		 * FIXME: in case I am wrong!
+-		 * we don't need to exit dsr here to wake up plane/pipe/pll
+-		 * if everything goes right, hw_begin will resume them all
+-		 * during set_power.
+-		 */
+-		if (bdispoff /* FIXME && gbgfxsuspended */) {
+-			mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_2D_3D);
+-			bdispoff = false;
+-			dev_priv->dispstatus = true;
+-		}
+-
+-		mdfld_dsi_dbi_set_power(encoder, true);
+-		/* FIXME if (gbgfxsuspended)
+-			gbgfxsuspended = false; */
+-	} else {
+-		/*
+-		 * I am not sure whether this is the perfect place to
+-		 * turn rpm on since we still have a lot of CRTC turnning
+-		 * on work to do.
+-		 */
+-		bdispoff = true;
+-		dev_priv->dispstatus = false;
+-		mdfld_dsi_dbi_set_power(encoder, false);
+-	}
+-}
+-
+-
+-/*
+- * Update the DBI MIPI Panel Frame Buffer.
+- */
+-static void mdfld_dsi_dbi_update_fb(struct mdfld_dsi_dbi_output *dbi_output,
+-								int pipe)
+-{
+-	struct mdfld_dsi_pkg_sender *sender =
+-		mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
+-	struct drm_device *dev = dbi_output->dev;
+-	struct drm_crtc *crtc = dbi_output->base.base.crtc;
+-	struct psb_intel_crtc *psb_crtc = (crtc) ?
+-					to_psb_intel_crtc(crtc) : NULL;
+-	u32 dpll_reg = MRST_DPLL_A;
+-	u32 dspcntr_reg = DSPACNTR;
+-	u32 pipeconf_reg = PIPEACONF;
+-	u32 dsplinoff_reg = DSPALINOFF;
+-	u32 dspsurf_reg = DSPASURF;
+-	u32 reg_offset = 0;
+-
+-	/* If mode setting on-going, back off */
+-	if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
+-		(psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING) ||
+-		!(dbi_output->mode_flags & MODE_SETTING_ENCODER_DONE))
+-		return;
+-
+-	if (pipe == 2) {
+-		dspcntr_reg = DSPCCNTR;
+-		pipeconf_reg = PIPECCONF;
+-		dsplinoff_reg = DSPCLINOFF;
+-		dspsurf_reg = DSPCSURF;
+-		reg_offset = MIPIC_REG_OFFSET;
+-	}
+-
+-	if (!gma_power_begin(dev, true)) {
+-		dev_err(dev->dev, "hw begin failed\n");
+-		return;
+-	}
+-
+-	/* Check DBI FIFO status */
+-	if (!(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
+-	   !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
+-	   !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE))
+-		goto update_fb_out0;
+-
+-	/* Refresh plane changes */
+-	REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
+-	REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
+-	REG_READ(dspsurf_reg);
+-
+-	mdfld_dsi_send_dcs(sender,
+-			   DCS_WRITE_MEM_START,
+-			   NULL,
+-			   0,
+-			   CMD_DATA_SRC_PIPE,
+-			   MDFLD_DSI_SEND_PACKAGE);
+-
+-	dbi_output->dsr_fb_update_done = true;
+-update_fb_out0:
+-	gma_power_end(dev);
+-}
+-
+-static int tpo_cmd_get_panel_info(struct drm_device *dev,
+-				int pipe,
+-				struct panel_info *pi)
+-{
+-	if (!dev || !pi)
+-		return -EINVAL;
+-
+-	pi->width_mm = TPO_PANEL_WIDTH;
+-	pi->height_mm = TPO_PANEL_HEIGHT;
+-
+-	return 0;
+-}
+-
+-
+-/* TPO DBI encoder helper funcs */
+-static const struct drm_encoder_helper_funcs mdfld_dsi_dbi_helper_funcs = {
+-	.dpms = mdfld_dsi_dbi_dpms,
+-	.mode_fixup = mdfld_dsi_dbi_mode_fixup,
+-	.prepare = mdfld_dsi_dbi_prepare,
+-	.mode_set = mdfld_dsi_dbi_mode_set,
+-	.commit = mdfld_dsi_dbi_commit,
+-};
+-
+-/* TPO DBI encoder funcs */
+-static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
+-	.destroy = drm_encoder_cleanup,
+-};
+-
+-void tpo_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+-{
+-	p_funcs->encoder_funcs = &mdfld_dsi_dbi_encoder_funcs;
+-	p_funcs->encoder_helper_funcs = &mdfld_dsi_dbi_helper_funcs;
+-	p_funcs->get_config_mode = &tpo_cmd_get_config_mode;
+-	p_funcs->update_fb = mdfld_dsi_dbi_update_fb;
+-	p_funcs->get_panel_info = tpo_cmd_get_panel_info;
+-	p_funcs->reset = mdfld_dsi_panel_reset;
+-	p_funcs->drv_ic_init = mdfld_dsi_brightness_init;
+-}
+diff --git a/drivers/staging/gma500/mdfld_tpo_vid.c b/drivers/staging/gma500/mdfld_tpo_vid.c
+deleted file mode 100644
+index 9549017..0000000
+--- a/drivers/staging/gma500/mdfld_tpo_vid.c
++++ /dev/null
+@@ -1,140 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- * jim liu <jim.liu at intel.com>
+- * Jackie Li<yaodong.li at intel.com>
+- */
+-
+-#include "mdfld_dsi_dbi.h"
+-#include "mdfld_dsi_dpi.h"
+-#include "mdfld_dsi_output.h"
+-#include "mdfld_output.h"
+-
+-#include "mdfld_dsi_pkg_sender.h"
+-
+-#include "displays/tpo_vid.h"
+-
+-static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
+-{
+-	struct drm_display_mode *mode;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+-	bool use_gct = false;
+-
+-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+-	if (!mode) {
+-		dev_err(dev->dev, "out of memory\n");
+-		return NULL;
+-	}
+-
+-	if (use_gct) {
+-		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+-		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+-		mode->hsync_start = mode->hdisplay + \
+-				((ti->hsync_offset_hi << 8) | \
+-				ti->hsync_offset_lo);
+-		mode->hsync_end = mode->hsync_start + \
+-				((ti->hsync_pulse_width_hi << 8) | \
+-				ti->hsync_pulse_width_lo);
+-		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+-								ti->hblank_lo);
+-		mode->vsync_start = \
+-			mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
+-						ti->vsync_offset_lo);
+-		mode->vsync_end = \
+-			mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
+-						ti->vsync_pulse_width_lo);
+-		mode->vtotal = mode->vdisplay + \
+-				((ti->vblank_hi << 8) | ti->vblank_lo);
+-		mode->clock = ti->pixel_clock * 10;
+-
+-		dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
+-		dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
+-		dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
+-		dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
+-		dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
+-		dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
+-		dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
+-		dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
+-		dev_dbg(dev->dev, "clock is %d\n", mode->clock);
+-	} else {
+-		mode->hdisplay = 864;
+-		mode->vdisplay = 480;
+-		mode->hsync_start = 873;
+-		mode->hsync_end = 876;
+-		mode->htotal = 887;
+-		mode->vsync_start = 487;
+-		mode->vsync_end = 490;
+-		mode->vtotal = 499;
+-		mode->clock = 33264;
+-	}
+-
+-	drm_mode_set_name(mode);
+-	drm_mode_set_crtcinfo(mode, 0);
+-
+-	mode->type |= DRM_MODE_TYPE_PREFERRED;
+-
+-	return mode;
+-}
+-
+-static int tpo_vid_get_panel_info(struct drm_device *dev,
+-				int pipe,
+-				struct panel_info *pi)
+-{
+-	if (!dev || !pi)
+-		return -EINVAL;
+-
+-	pi->width_mm = TPO_PANEL_WIDTH;
+-	pi->height_mm = TPO_PANEL_HEIGHT;
+-
+-	return 0;
+-}
+-
+-/*TPO DPI encoder helper funcs*/
+-static const struct drm_encoder_helper_funcs
+-					mdfld_tpo_dpi_encoder_helper_funcs = {
+-	.dpms = mdfld_dsi_dpi_dpms,
+-	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
+-	.prepare = mdfld_dsi_dpi_prepare,
+-	.mode_set = mdfld_dsi_dpi_mode_set,
+-	.commit = mdfld_dsi_dpi_commit,
+-};
+-
+-/*TPO DPI encoder funcs*/
+-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
+-	.destroy = drm_encoder_cleanup,
+-};
+-
+-void tpo_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
+-{
+-	if (!dev || !p_funcs) {
+-		dev_err(dev->dev, "tpo_vid_init: Invalid parameters\n");
+-		return;
+-	}
+-
+-	p_funcs->encoder_funcs = &mdfld_tpo_dpi_encoder_funcs;
+-	p_funcs->encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs;
+-	p_funcs->get_config_mode = &tpo_vid_get_config_mode;
+-	p_funcs->update_fb = NULL;
+-	p_funcs->get_panel_info = tpo_vid_get_panel_info;
+-}
+diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h
+deleted file mode 100644
+index 09e9687..0000000
+--- a/drivers/staging/gma500/medfield.h
++++ /dev/null
+@@ -1,268 +0,0 @@
+-/*
+- * Copyright © 2011 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- */
+-
+-/* Medfield DSI controller registers */
+-
+-#define MIPIA_DEVICE_READY_REG				0xb000
+-#define MIPIA_INTR_STAT_REG				0xb004
+-#define MIPIA_INTR_EN_REG				0xb008
+-#define MIPIA_DSI_FUNC_PRG_REG				0xb00c
+-#define MIPIA_HS_TX_TIMEOUT_REG				0xb010
+-#define MIPIA_LP_RX_TIMEOUT_REG				0xb014
+-#define MIPIA_TURN_AROUND_TIMEOUT_REG			0xb018
+-#define MIPIA_DEVICE_RESET_TIMER_REG			0xb01c
+-#define MIPIA_DPI_RESOLUTION_REG			0xb020
+-#define MIPIA_DBI_FIFO_THROTTLE_REG			0xb024
+-#define MIPIA_HSYNC_COUNT_REG				0xb028
+-#define MIPIA_HBP_COUNT_REG				0xb02c
+-#define MIPIA_HFP_COUNT_REG				0xb030
+-#define MIPIA_HACTIVE_COUNT_REG				0xb034
+-#define MIPIA_VSYNC_COUNT_REG				0xb038
+-#define MIPIA_VBP_COUNT_REG				0xb03c
+-#define MIPIA_VFP_COUNT_REG				0xb040
+-#define MIPIA_HIGH_LOW_SWITCH_COUNT_REG			0xb044
+-#define MIPIA_DPI_CONTROL_REG				0xb048
+-#define MIPIA_DPI_DATA_REG				0xb04c
+-#define MIPIA_INIT_COUNT_REG				0xb050
+-#define MIPIA_MAX_RETURN_PACK_SIZE_REG			0xb054
+-#define MIPIA_VIDEO_MODE_FORMAT_REG			0xb058
+-#define MIPIA_EOT_DISABLE_REG				0xb05c
+-#define MIPIA_LP_BYTECLK_REG				0xb060
+-#define MIPIA_LP_GEN_DATA_REG				0xb064
+-#define MIPIA_HS_GEN_DATA_REG				0xb068
+-#define MIPIA_LP_GEN_CTRL_REG				0xb06c
+-#define MIPIA_HS_GEN_CTRL_REG				0xb070
+-#define MIPIA_GEN_FIFO_STAT_REG				0xb074
+-#define MIPIA_HS_LS_DBI_ENABLE_REG			0xb078
+-#define MIPIA_DPHY_PARAM_REG				0xb080
+-#define MIPIA_DBI_BW_CTRL_REG				0xb084
+-#define MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG		0xb088
+-
+-#define DSI_DEVICE_READY				(0x1)
+-#define DSI_POWER_STATE_ULPS_ENTER			(0x2 << 1)
+-#define DSI_POWER_STATE_ULPS_EXIT			(0x1 << 1)
+-#define DSI_POWER_STATE_ULPS_OFFSET			(0x1)
+-
+-
+-#define DSI_ONE_DATA_LANE				(0x1)
+-#define DSI_TWO_DATA_LANE				(0x2)
+-#define DSI_THREE_DATA_LANE				(0X3)
+-#define DSI_FOUR_DATA_LANE				(0x4)
+-#define DSI_DPI_VIRT_CHANNEL_OFFSET			(0x3)
+-#define DSI_DBI_VIRT_CHANNEL_OFFSET			(0x5)
+-#define DSI_DPI_COLOR_FORMAT_RGB565			(0x01 << 7)
+-#define DSI_DPI_COLOR_FORMAT_RGB666			(0x02 << 7)
+-#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK		(0x03 << 7)
+-#define DSI_DPI_COLOR_FORMAT_RGB888			(0x04 << 7)
+-#define DSI_DBI_COLOR_FORMAT_OPTION2			(0x05 << 13)
+-
+-#define DSI_INTR_STATE_RXSOTERROR			1
+-
+-#define DSI_INTR_STATE_SPL_PKG_SENT			(1 << 30)
+-#define DSI_INTR_STATE_TE				(1 << 31)
+-
+-#define DSI_HS_TX_TIMEOUT_MASK				(0xffffff)
+-
+-#define DSI_LP_RX_TIMEOUT_MASK				(0xffffff)
+-
+-#define DSI_TURN_AROUND_TIMEOUT_MASK			(0x3f)
+-
+-#define DSI_RESET_TIMER_MASK				(0xffff)
+-
+-#define DSI_DBI_FIFO_WM_HALF				(0x0)
+-#define DSI_DBI_FIFO_WM_QUARTER				(0x1)
+-#define DSI_DBI_FIFO_WM_LOW				(0x2)
+-
+-#define DSI_DPI_TIMING_MASK				(0xffff)
+-
+-#define DSI_INIT_TIMER_MASK				(0xffff)
+-
+-#define DSI_DBI_RETURN_PACK_SIZE_MASK			(0x3ff)
+-
+-#define DSI_LP_BYTECLK_MASK				(0x0ffff)
+-
+-#define DSI_HS_CTRL_GEN_SHORT_W0			(0x03)
+-#define DSI_HS_CTRL_GEN_SHORT_W1			(0x13)
+-#define DSI_HS_CTRL_GEN_SHORT_W2			(0x23)
+-#define DSI_HS_CTRL_GEN_R0				(0x04)
+-#define DSI_HS_CTRL_GEN_R1				(0x14)
+-#define DSI_HS_CTRL_GEN_R2				(0x24)
+-#define DSI_HS_CTRL_GEN_LONG_W				(0x29)
+-#define DSI_HS_CTRL_MCS_SHORT_W0			(0x05)
+-#define DSI_HS_CTRL_MCS_SHORT_W1			(0x15)
+-#define DSI_HS_CTRL_MCS_R0				(0x06)
+-#define DSI_HS_CTRL_MCS_LONG_W				(0x39)
+-#define DSI_HS_CTRL_VC_OFFSET				(0x06)
+-#define DSI_HS_CTRL_WC_OFFSET				(0x08)
+-
+-#define	DSI_FIFO_GEN_HS_DATA_FULL			(1 << 0)
+-#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY			(1 << 1)
+-#define DSI_FIFO_GEN_HS_DATA_EMPTY			(1 << 2)
+-#define DSI_FIFO_GEN_LP_DATA_FULL			(1 << 8)
+-#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY			(1 << 9)
+-#define DSI_FIFO_GEN_LP_DATA_EMPTY			(1 << 10)
+-#define DSI_FIFO_GEN_HS_CTRL_FULL			(1 << 16)
+-#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY			(1 << 17)
+-#define DSI_FIFO_GEN_HS_CTRL_EMPTY			(1 << 18)
+-#define DSI_FIFO_GEN_LP_CTRL_FULL			(1 << 24)
+-#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY			(1 << 25)
+-#define DSI_FIFO_GEN_LP_CTRL_EMPTY			(1 << 26)
+-#define DSI_FIFO_DBI_EMPTY				(1 << 27)
+-#define DSI_FIFO_DPI_EMPTY				(1 << 28)
+-
+-#define DSI_DBI_HS_LP_SWITCH_MASK			(0x1)
+-
+-#define DSI_HS_LP_SWITCH_COUNTER_OFFSET			(0x0)
+-#define DSI_LP_HS_SWITCH_COUNTER_OFFSET			(0x16)
+-
+-#define DSI_DPI_CTRL_HS_SHUTDOWN			(0x00000001)
+-#define DSI_DPI_CTRL_HS_TURN_ON				(0x00000002)
+-
+-/* Medfield DSI adapter registers */
+-#define MIPIA_CONTROL_REG				0xb104
+-#define MIPIA_DATA_ADD_REG				0xb108
+-#define MIPIA_DATA_LEN_REG				0xb10c
+-#define MIPIA_CMD_ADD_REG				0xb110
+-#define MIPIA_CMD_LEN_REG				0xb114
+-
+-/*dsi power modes*/
+-#define DSI_POWER_MODE_DISPLAY_ON	(1 << 2)
+-#define DSI_POWER_MODE_NORMAL_ON	(1 << 3)
+-#define DSI_POWER_MODE_SLEEP_OUT	(1 << 4)
+-#define DSI_POWER_MODE_PARTIAL_ON	(1 << 5)
+-#define DSI_POWER_MODE_IDLE_ON		(1 << 6)
+-
+-enum {
+-	MDFLD_DSI_ENCODER_DBI = 0,
+-	MDFLD_DSI_ENCODER_DPI,
+-};
+-
+-enum {
+-	MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
+-	MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
+-	MDFLD_DSI_VIDEO_BURST_MODE = 3,
+-};
+-
+-#define DSI_DPI_COMPLETE_LAST_LINE			(1 << 2)
+-#define DSI_DPI_DISABLE_BTA				(1 << 3)
+-/* Panel types */
+-enum {
+-	TPO_CMD,
+-	TPO_VID,
+-	TMD_CMD,
+-	TMD_VID,
+-	PYR_CMD,
+-	PYR_VID,
+-	TPO,
+-	TMD,
+-	PYR,
+-	HDMI,
+-	GCT_DETECT
+-};
+-
+-/* Junk that belongs elsewhere */
+-#define TPO_PANEL_WIDTH		84
+-#define TPO_PANEL_HEIGHT	46
+-#define TMD_PANEL_WIDTH		39
+-#define TMD_PANEL_HEIGHT	71
+-#define PYR_PANEL_WIDTH		53
+-#define PYR_PANEL_HEIGHT	95
+-
+-/* Panel interface */
+-struct panel_info {
+-	u32 width_mm;
+-	u32 height_mm;
+-};
+-
+-struct mdfld_dsi_dbi_output;
+-
+-struct mdfld_dsi_connector_state {
+-	u32 mipi_ctrl_reg;
+-};
+-
+-struct mdfld_dsi_encoder_state {
+-
+-};
+-
+-struct mdfld_dsi_connector {
+-	/*
+-	 * This is ugly, but I have to use connector in it! :-(
+-	 * FIXME: use drm_connector instead.
+-	 */
+-	struct psb_intel_output base;
+-
+-	int pipe;
+-	void *private;
+-	void *pkg_sender;
+-
+-	/* Connection status */
+-	enum drm_connector_status status;
+-};
+-
+-struct mdfld_dsi_encoder {
+-	struct drm_encoder base;
+-	void *private;
+-};
+-
+-/*
+- * DSI config, consists of one DSI connector, two DSI encoders.
+- * DRM will pick up on DSI encoder basing on differents configs.
+- */
+-struct mdfld_dsi_config {
+-	struct drm_device *dev;
+-	struct drm_display_mode *fixed_mode;
+-	struct drm_display_mode *mode;
+-
+-	struct mdfld_dsi_connector *connector;
+-	struct mdfld_dsi_encoder *encoders[DRM_CONNECTOR_MAX_ENCODER];
+-	struct mdfld_dsi_encoder *encoder;
+-
+-	int changed;
+-
+-	int bpp;
+-	int type;
+-	int lane_count;
+-	/*Virtual channel number for this encoder*/
+-	int channel_num;
+-	/*video mode configure*/
+-	int video_mode;
+-
+-	int dvr_ic_inited;
+-};
+-
+-#define MDFLD_DSI_CONNECTOR(psb_output) \
+-		(container_of(psb_output, struct mdfld_dsi_connector, base))
+-
+-#define MDFLD_DSI_ENCODER(encoder) \
+-		(container_of(encoder, struct mdfld_dsi_encoder, base))
+-
+-struct panel_funcs {
+-	const struct drm_encoder_funcs *encoder_funcs;
+-	const struct drm_encoder_helper_funcs *encoder_helper_funcs;
+-	struct drm_display_mode *(*get_config_mode) (struct drm_device *);
+-	void (*update_fb) (struct mdfld_dsi_dbi_output *, int);
+-	int (*get_panel_info) (struct drm_device *, int, struct panel_info *);
+-	int (*reset)(int pipe);
+-	void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
+-};
+-
+diff --git a/drivers/staging/gma500/mid_bios.c b/drivers/staging/gma500/mid_bios.c
+deleted file mode 100644
+index ee3c036..0000000
+--- a/drivers/staging/gma500/mid_bios.c
++++ /dev/null
+@@ -1,270 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-/* TODO
+- * - Split functions by vbt type
+- * - Make them all take drm_device
+- * - Check ioremap failures
+- */
+-
+-#include <linux/moduleparam.h>
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include "psb_drm.h"
+-#include "psb_drv.h"
+-#include "mid_bios.h"
+-#include "mdfld_output.h"
+-
+-static int panel_id = GCT_DETECT;
+-module_param_named(panel_id, panel_id, int, 0600);
+-MODULE_PARM_DESC(panel_id, "Panel Identifier");
+-
+-
+-static void mid_get_fuse_settings(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+-	uint32_t fuse_value = 0;
+-	uint32_t fuse_value_tmp = 0;
+-
+-#define FB_REG06 0xD0810600
+-#define FB_MIPI_DISABLE  (1 << 11)
+-#define FB_REG09 0xD0810900
+-#define FB_REG09 0xD0810900
+-#define FB_SKU_MASK  0x7000
+-#define FB_SKU_SHIFT 12
+-#define FB_SKU_100 0
+-#define FB_SKU_100L 1
+-#define FB_SKU_83 2
+-	pci_write_config_dword(pci_root, 0xD0, FB_REG06);
+-	pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+-
+-	/* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
+-	if (IS_MRST(dev))
+-		dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
+-
+-	DRM_INFO("internal display is %s\n",
+-		 dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
+-
+-	 /* Prevent runtime suspend at start*/
+-	 if (dev_priv->iLVDS_enable) {
+-		dev_priv->is_lvds_on = true;
+-		dev_priv->is_mipi_on = false;
+-	} else {
+-		dev_priv->is_mipi_on = true;
+-		dev_priv->is_lvds_on = false;
+-	}
+-
+-	dev_priv->video_device_fuse = fuse_value;
+-
+-	pci_write_config_dword(pci_root, 0xD0, FB_REG09);
+-	pci_read_config_dword(pci_root, 0xD4, &fuse_value);
+-
+-	dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
+-	fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
+-
+-	dev_priv->fuse_reg_value = fuse_value;
+-
+-	switch (fuse_value_tmp) {
+-	case FB_SKU_100:
+-		dev_priv->core_freq = 200;
+-		break;
+-	case FB_SKU_100L:
+-		dev_priv->core_freq = 100;
+-		break;
+-	case FB_SKU_83:
+-		dev_priv->core_freq = 166;
+-		break;
+-	default:
+-		dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
+-								fuse_value_tmp);
+-		dev_priv->core_freq = 0;
+-	}
+-	dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
+-	pci_dev_put(pci_root);
+-}
+-
+-/*
+- *	Get the revison ID, B0:D2:F0;0x08
+- */
+-static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
+-{
+-	uint32_t platform_rev_id = 0;
+-	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+-
+-	pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
+-	dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
+-	pci_dev_put(pci_gfx_root);
+-	dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
+-					dev_priv->platform_rev_id);
+-}
+-
+-static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
+-{
+-	struct drm_device *dev = dev_priv->dev;
+-	struct mrst_vbt *vbt = &dev_priv->vbt_data;
+-	u32 addr;
+-	u16 new_size;
+-	u8 *vbt_virtual;
+-	u8 bpi;
+-	u8 number_desc = 0;
+-	struct mrst_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+-	struct gct_r10_timing_info ti;
+-	void *pGCT;
+-	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+-
+-	/* Get the address of the platform config vbt, B0:D2:F0;0xFC */
+-	pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
+-	pci_dev_put(pci_gfx_root);
+-
+-	dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
+-
+-	/* check for platform config address == 0. */
+-	/* this means fw doesn't support vbt */
+-
+-	if (addr == 0) {
+-		vbt->size = 0;
+-		return;
+-	}
+-
+-	/* get the virtual address of the vbt */
+-	vbt_virtual = ioremap(addr, sizeof(*vbt));
+-
+-	memcpy(vbt, vbt_virtual, sizeof(*vbt));
+-	iounmap(vbt_virtual); /* Free virtual address space */
+-
+-	dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
+-
+-	switch (vbt->revision) {
+-	case 0:
+-		vbt->mrst_gct = ioremap(addr + sizeof(*vbt) - 4,
+-					vbt->size - sizeof(*vbt) + 4);
+-		pGCT = vbt->mrst_gct;
+-		bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex;
+-		dev_priv->gct_data.bpi = bpi;
+-		dev_priv->gct_data.pt =
+-			((struct mrst_gct_v1 *)pGCT)->PD.PanelType;
+-		memcpy(&dev_priv->gct_data.DTD,
+-			&((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD,
+-				sizeof(struct mrst_timing_info));
+-		dev_priv->gct_data.Panel_Port_Control =
+-		  ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
+-		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+-			((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+-		break;
+-	case 1:
+-		vbt->mrst_gct = ioremap(addr + sizeof(*vbt) - 4,
+-					vbt->size - sizeof(*vbt) + 4);
+-		pGCT = vbt->mrst_gct;
+-		bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex;
+-		dev_priv->gct_data.bpi = bpi;
+-		dev_priv->gct_data.pt =
+-			((struct mrst_gct_v2 *)pGCT)->PD.PanelType;
+-		memcpy(&dev_priv->gct_data.DTD,
+-			&((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD,
+-				sizeof(struct mrst_timing_info));
+-		dev_priv->gct_data.Panel_Port_Control =
+-		  ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
+-		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+-			((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+-		break;
+-	case 0x10:
+-		/*header definition changed from rev 01 (v2) to rev 10h. */
+-		/*so, some values have changed location*/
+-		new_size = vbt->checksum; /*checksum contains lo size byte*/
+-		/*LSB of mrst_gct contains hi size byte*/
+-		new_size |= ((0xff & (unsigned int)vbt->mrst_gct)) << 8;
+-
+-		vbt->checksum = vbt->size; /*size contains the checksum*/
+-		if (new_size > 0xff)
+-			vbt->size = 0xff; /*restrict size to 255*/
+-		else
+-			vbt->size = new_size;
+-
+-		/* number of descriptors defined in the GCT */
+-		number_desc = ((0xff00 & (unsigned int)vbt->mrst_gct)) >> 8;
+-		bpi = ((0xff0000 & (unsigned int)vbt->mrst_gct)) >> 16;
+-		vbt->mrst_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
+-				GCT_R10_DISPLAY_DESC_SIZE * number_desc);
+-		pGCT = vbt->mrst_gct;
+-		pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
+-		dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
+-
+-		/*copy the GCT display timings into a temp structure*/
+-		memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
+-
+-		/*now copy the temp struct into the dev_priv->gct_data*/
+-		dp_ti->pixel_clock = ti.pixel_clock;
+-		dp_ti->hactive_hi = ti.hactive_hi;
+-		dp_ti->hactive_lo = ti.hactive_lo;
+-		dp_ti->hblank_hi = ti.hblank_hi;
+-		dp_ti->hblank_lo = ti.hblank_lo;
+-		dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
+-		dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
+-		dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
+-		dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
+-		dp_ti->vactive_hi = ti.vactive_hi;
+-		dp_ti->vactive_lo = ti.vactive_lo;
+-		dp_ti->vblank_hi = ti.vblank_hi;
+-		dp_ti->vblank_lo = ti.vblank_lo;
+-		dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
+-		dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
+-		dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
+-		dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
+-
+-		/* Move the MIPI_Display_Descriptor data from GCT to dev priv */
+-		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
+-							*((u8 *)pGCT + 0x0d);
+-		dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
+-						(*((u8 *)pGCT + 0x0e)) << 8;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Unknown revision of GCT!\n");
+-		vbt->size = 0;
+-	}
+-	if (IS_MFLD(dev_priv->dev)) {
+-		if (panel_id == GCT_DETECT) {
+-			if (dev_priv->gct_data.bpi == 2) {
+-				dev_info(dev->dev, "[GFX] PYR Panel Detected\n");
+-				dev_priv->panel_id = PYR_CMD;
+-				panel_id = PYR_CMD;
+-			} else if (dev_priv->gct_data.bpi == 0) {
+-				dev_info(dev->dev, "[GFX] TMD Panel Detected.\n");
+-				dev_priv->panel_id = TMD_VID;
+-				panel_id = TMD_VID;
+-			} else {
+-				dev_info(dev->dev, "[GFX] Default Panel (TPO)\n");
+-				dev_priv->panel_id = TPO_CMD;
+-				panel_id = TPO_CMD;
+-			}
+-		} else {
+-			dev_info(dev->dev, "[GFX] Panel Parameter Passed in through cmd line\n");
+-			dev_priv->panel_id = panel_id;
+-		}
+-	}
+-}
+-
+-int mid_chip_setup(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	mid_get_fuse_settings(dev);
+-	mid_get_vbt_data(dev_priv);
+-	mid_get_pci_revID(dev_priv);
+-	return 0;
+-}
+diff --git a/drivers/staging/gma500/mid_bios.h b/drivers/staging/gma500/mid_bios.h
+deleted file mode 100644
+index 00e7d56..0000000
+--- a/drivers/staging/gma500/mid_bios.h
++++ /dev/null
+@@ -1,21 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-extern int mid_chip_setup(struct drm_device *dev);
+-
+diff --git a/drivers/staging/gma500/mmu.c b/drivers/staging/gma500/mmu.c
+deleted file mode 100644
+index c904d73..0000000
+--- a/drivers/staging/gma500/mmu.c
++++ /dev/null
+@@ -1,858 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2007, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-#include <drm/drmP.h>
+-#include "psb_drv.h"
+-#include "psb_reg.h"
+-
+-/*
+- * Code for the SGX MMU:
+- */
+-
+-/*
+- * clflush on one processor only:
+- * clflush should apparently flush the cache line on all processors in an
+- * SMP system.
+- */
+-
+-/*
+- * kmap atomic:
+- * The usage of the slots must be completely encapsulated within a spinlock, and
+- * no other functions that may be using the locks for other purposed may be
+- * called from within the locked region.
+- * Since the slots are per processor, this will guarantee that we are the only
+- * user.
+- */
+-
+-/*
+- * TODO: Inserting ptes from an interrupt handler:
+- * This may be desirable for some SGX functionality where the GPU can fault in
+- * needed pages. For that, we need to make an atomic insert_pages function, that
+- * may fail.
+- * If it fails, the caller need to insert the page using a workqueue function,
+- * but on average it should be fast.
+- */
+-
+-struct psb_mmu_driver {
+-	/* protects driver- and pd structures. Always take in read mode
+-	 * before taking the page table spinlock.
+-	 */
+-	struct rw_semaphore sem;
+-
+-	/* protects page tables, directory tables and pt tables.
+-	 * and pt structures.
+-	 */
+-	spinlock_t lock;
+-
+-	atomic_t needs_tlbflush;
+-
+-	uint8_t __iomem *register_map;
+-	struct psb_mmu_pd *default_pd;
+-	/*uint32_t bif_ctrl;*/
+-	int has_clflush;
+-	int clflush_add;
+-	unsigned long clflush_mask;
+-
+-	struct drm_psb_private *dev_priv;
+-};
+-
+-struct psb_mmu_pd;
+-
+-struct psb_mmu_pt {
+-	struct psb_mmu_pd *pd;
+-	uint32_t index;
+-	uint32_t count;
+-	struct page *p;
+-	uint32_t *v;
+-};
+-
+-struct psb_mmu_pd {
+-	struct psb_mmu_driver *driver;
+-	int hw_context;
+-	struct psb_mmu_pt **tables;
+-	struct page *p;
+-	struct page *dummy_pt;
+-	struct page *dummy_page;
+-	uint32_t pd_mask;
+-	uint32_t invalid_pde;
+-	uint32_t invalid_pte;
+-};
+-
+-static inline uint32_t psb_mmu_pt_index(uint32_t offset)
+-{
+-	return (offset >> PSB_PTE_SHIFT) & 0x3FF;
+-}
+-
+-static inline uint32_t psb_mmu_pd_index(uint32_t offset)
+-{
+-	return offset >> PSB_PDE_SHIFT;
+-}
+-
+-static inline void psb_clflush(void *addr)
+-{
+-	__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
+-}
+-
+-static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
+-				   void *addr)
+-{
+-	if (!driver->has_clflush)
+-		return;
+-
+-	mb();
+-	psb_clflush(addr);
+-	mb();
+-}
+-
+-static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
+-{
+-	uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
+-	uint32_t clflush_count = PAGE_SIZE / clflush_add;
+-	int i;
+-	uint8_t *clf;
+-
+-	clf = kmap_atomic(page, KM_USER0);
+-	mb();
+-	for (i = 0; i < clflush_count; ++i) {
+-		psb_clflush(clf);
+-		clf += clflush_add;
+-	}
+-	mb();
+-	kunmap_atomic(clf, KM_USER0);
+-}
+-
+-static void psb_pages_clflush(struct psb_mmu_driver *driver,
+-				struct page *page[], unsigned long num_pages)
+-{
+-	int i;
+-
+-	if (!driver->has_clflush)
+-		return ;
+-
+-	for (i = 0; i < num_pages; i++)
+-		psb_page_clflush(driver, *page++);
+-}
+-
+-static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
+-				    int force)
+-{
+-	atomic_set(&driver->needs_tlbflush, 0);
+-}
+-
+-static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
+-{
+-	down_write(&driver->sem);
+-	psb_mmu_flush_pd_locked(driver, force);
+-	up_write(&driver->sem);
+-}
+-
+-void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
+-{
+-	if (rc_prot)
+-		down_write(&driver->sem);
+-	if (rc_prot)
+-		up_write(&driver->sem);
+-}
+-
+-void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
+-{
+-	/*ttm_tt_cache_flush(&pd->p, 1);*/
+-	psb_pages_clflush(pd->driver, &pd->p, 1);
+-	down_write(&pd->driver->sem);
+-	wmb();
+-	psb_mmu_flush_pd_locked(pd->driver, 1);
+-	pd->hw_context = hw_context;
+-	up_write(&pd->driver->sem);
+-
+-}
+-
+-static inline unsigned long psb_pd_addr_end(unsigned long addr,
+-					    unsigned long end)
+-{
+-
+-	addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
+-	return (addr < end) ? addr : end;
+-}
+-
+-static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
+-{
+-	uint32_t mask = PSB_PTE_VALID;
+-
+-	if (type & PSB_MMU_CACHED_MEMORY)
+-		mask |= PSB_PTE_CACHED;
+-	if (type & PSB_MMU_RO_MEMORY)
+-		mask |= PSB_PTE_RO;
+-	if (type & PSB_MMU_WO_MEMORY)
+-		mask |= PSB_PTE_WO;
+-
+-	return (pfn << PAGE_SHIFT) | mask;
+-}
+-
+-struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+-				    int trap_pagefaults, int invalid_type)
+-{
+-	struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+-	uint32_t *v;
+-	int i;
+-
+-	if (!pd)
+-		return NULL;
+-
+-	pd->p = alloc_page(GFP_DMA32);
+-	if (!pd->p)
+-		goto out_err1;
+-	pd->dummy_pt = alloc_page(GFP_DMA32);
+-	if (!pd->dummy_pt)
+-		goto out_err2;
+-	pd->dummy_page = alloc_page(GFP_DMA32);
+-	if (!pd->dummy_page)
+-		goto out_err3;
+-
+-	if (!trap_pagefaults) {
+-		pd->invalid_pde =
+-		    psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
+-				     invalid_type);
+-		pd->invalid_pte =
+-		    psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
+-				     invalid_type);
+-	} else {
+-		pd->invalid_pde = 0;
+-		pd->invalid_pte = 0;
+-	}
+-
+-	v = kmap(pd->dummy_pt);
+-	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+-		v[i] = pd->invalid_pte;
+-
+-	kunmap(pd->dummy_pt);
+-
+-	v = kmap(pd->p);
+-	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+-		v[i] = pd->invalid_pde;
+-
+-	kunmap(pd->p);
+-
+-	clear_page(kmap(pd->dummy_page));
+-	kunmap(pd->dummy_page);
+-
+-	pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
+-	if (!pd->tables)
+-		goto out_err4;
+-
+-	pd->hw_context = -1;
+-	pd->pd_mask = PSB_PTE_VALID;
+-	pd->driver = driver;
+-
+-	return pd;
+-
+-out_err4:
+-	__free_page(pd->dummy_page);
+-out_err3:
+-	__free_page(pd->dummy_pt);
+-out_err2:
+-	__free_page(pd->p);
+-out_err1:
+-	kfree(pd);
+-	return NULL;
+-}
+-
+-void psb_mmu_free_pt(struct psb_mmu_pt *pt)
+-{
+-	__free_page(pt->p);
+-	kfree(pt);
+-}
+-
+-void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
+-{
+-	struct psb_mmu_driver *driver = pd->driver;
+-	struct psb_mmu_pt *pt;
+-	int i;
+-
+-	down_write(&driver->sem);
+-	if (pd->hw_context != -1)
+-		psb_mmu_flush_pd_locked(driver, 1);
+-
+-	/* Should take the spinlock here, but we don't need to do that
+-	   since we have the semaphore in write mode. */
+-
+-	for (i = 0; i < 1024; ++i) {
+-		pt = pd->tables[i];
+-		if (pt)
+-			psb_mmu_free_pt(pt);
+-	}
+-
+-	vfree(pd->tables);
+-	__free_page(pd->dummy_page);
+-	__free_page(pd->dummy_pt);
+-	__free_page(pd->p);
+-	kfree(pd);
+-	up_write(&driver->sem);
+-}
+-
+-static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
+-{
+-	struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
+-	void *v;
+-	uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
+-	uint32_t clflush_count = PAGE_SIZE / clflush_add;
+-	spinlock_t *lock = &pd->driver->lock;
+-	uint8_t *clf;
+-	uint32_t *ptes;
+-	int i;
+-
+-	if (!pt)
+-		return NULL;
+-
+-	pt->p = alloc_page(GFP_DMA32);
+-	if (!pt->p) {
+-		kfree(pt);
+-		return NULL;
+-	}
+-
+-	spin_lock(lock);
+-
+-	v = kmap_atomic(pt->p, KM_USER0);
+-	clf = (uint8_t *) v;
+-	ptes = (uint32_t *) v;
+-	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
+-		*ptes++ = pd->invalid_pte;
+-
+-
+-	if (pd->driver->has_clflush && pd->hw_context != -1) {
+-		mb();
+-		for (i = 0; i < clflush_count; ++i) {
+-			psb_clflush(clf);
+-			clf += clflush_add;
+-		}
+-		mb();
+-	}
+-
+-	kunmap_atomic(v, KM_USER0);
+-	spin_unlock(lock);
+-
+-	pt->count = 0;
+-	pt->pd = pd;
+-	pt->index = 0;
+-
+-	return pt;
+-}
+-
+-struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
+-					     unsigned long addr)
+-{
+-	uint32_t index = psb_mmu_pd_index(addr);
+-	struct psb_mmu_pt *pt;
+-	uint32_t *v;
+-	spinlock_t *lock = &pd->driver->lock;
+-
+-	spin_lock(lock);
+-	pt = pd->tables[index];
+-	while (!pt) {
+-		spin_unlock(lock);
+-		pt = psb_mmu_alloc_pt(pd);
+-		if (!pt)
+-			return NULL;
+-		spin_lock(lock);
+-
+-		if (pd->tables[index]) {
+-			spin_unlock(lock);
+-			psb_mmu_free_pt(pt);
+-			spin_lock(lock);
+-			pt = pd->tables[index];
+-			continue;
+-		}
+-
+-		v = kmap_atomic(pd->p, KM_USER0);
+-		pd->tables[index] = pt;
+-		v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
+-		pt->index = index;
+-		kunmap_atomic((void *) v, KM_USER0);
+-
+-		if (pd->hw_context != -1) {
+-			psb_mmu_clflush(pd->driver, (void *) &v[index]);
+-			atomic_set(&pd->driver->needs_tlbflush, 1);
+-		}
+-	}
+-	pt->v = kmap_atomic(pt->p, KM_USER0);
+-	return pt;
+-}
+-
+-static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
+-					      unsigned long addr)
+-{
+-	uint32_t index = psb_mmu_pd_index(addr);
+-	struct psb_mmu_pt *pt;
+-	spinlock_t *lock = &pd->driver->lock;
+-
+-	spin_lock(lock);
+-	pt = pd->tables[index];
+-	if (!pt) {
+-		spin_unlock(lock);
+-		return NULL;
+-	}
+-	pt->v = kmap_atomic(pt->p, KM_USER0);
+-	return pt;
+-}
+-
+-static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
+-{
+-	struct psb_mmu_pd *pd = pt->pd;
+-	uint32_t *v;
+-
+-	kunmap_atomic(pt->v, KM_USER0);
+-	if (pt->count == 0) {
+-		v = kmap_atomic(pd->p, KM_USER0);
+-		v[pt->index] = pd->invalid_pde;
+-		pd->tables[pt->index] = NULL;
+-
+-		if (pd->hw_context != -1) {
+-			psb_mmu_clflush(pd->driver,
+-					(void *) &v[pt->index]);
+-			atomic_set(&pd->driver->needs_tlbflush, 1);
+-		}
+-		kunmap_atomic(pt->v, KM_USER0);
+-		spin_unlock(&pd->driver->lock);
+-		psb_mmu_free_pt(pt);
+-		return;
+-	}
+-	spin_unlock(&pd->driver->lock);
+-}
+-
+-static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
+-				   unsigned long addr, uint32_t pte)
+-{
+-	pt->v[psb_mmu_pt_index(addr)] = pte;
+-}
+-
+-static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
+-					  unsigned long addr)
+-{
+-	pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
+-}
+-
+-
+-void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
+-			uint32_t mmu_offset, uint32_t gtt_start,
+-			uint32_t gtt_pages)
+-{
+-	uint32_t *v;
+-	uint32_t start = psb_mmu_pd_index(mmu_offset);
+-	struct psb_mmu_driver *driver = pd->driver;
+-	int num_pages = gtt_pages;
+-
+-	down_read(&driver->sem);
+-	spin_lock(&driver->lock);
+-
+-	v = kmap_atomic(pd->p, KM_USER0);
+-	v += start;
+-
+-	while (gtt_pages--) {
+-		*v++ = gtt_start | pd->pd_mask;
+-		gtt_start += PAGE_SIZE;
+-	}
+-
+-	/*ttm_tt_cache_flush(&pd->p, num_pages);*/
+-	psb_pages_clflush(pd->driver, &pd->p, num_pages);
+-	kunmap_atomic(v, KM_USER0);
+-	spin_unlock(&driver->lock);
+-
+-	if (pd->hw_context != -1)
+-		atomic_set(&pd->driver->needs_tlbflush, 1);
+-
+-	up_read(&pd->driver->sem);
+-	psb_mmu_flush_pd(pd->driver, 0);
+-}
+-
+-struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
+-{
+-	struct psb_mmu_pd *pd;
+-
+-	/* down_read(&driver->sem); */
+-	pd = driver->default_pd;
+-	/* up_read(&driver->sem); */
+-
+-	return pd;
+-}
+-
+-/* Returns the physical address of the PD shared by sgx/msvdx */
+-uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver)
+-{
+-	struct psb_mmu_pd *pd;
+-
+-	pd = psb_mmu_get_default_pd(driver);
+-	return page_to_pfn(pd->p) << PAGE_SHIFT;
+-}
+-
+-void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
+-{
+-	psb_mmu_free_pagedir(driver->default_pd);
+-	kfree(driver);
+-}
+-
+-struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+-					int trap_pagefaults,
+-					int invalid_type,
+-					struct drm_psb_private *dev_priv)
+-{
+-	struct psb_mmu_driver *driver;
+-
+-	driver = kmalloc(sizeof(*driver), GFP_KERNEL);
+-
+-	if (!driver)
+-		return NULL;
+-	driver->dev_priv = dev_priv;
+-
+-	driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
+-					      invalid_type);
+-	if (!driver->default_pd)
+-		goto out_err1;
+-
+-	spin_lock_init(&driver->lock);
+-	init_rwsem(&driver->sem);
+-	down_write(&driver->sem);
+-	driver->register_map = registers;
+-	atomic_set(&driver->needs_tlbflush, 1);
+-
+-	driver->has_clflush = 0;
+-
+-	if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+-		uint32_t tfms, misc, cap0, cap4, clflush_size;
+-
+-		/*
+-		 * clflush size is determined at kernel setup for x86_64
+-		 *  but not for i386. We have to do it here.
+-		 */
+-
+-		cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
+-		clflush_size = ((misc >> 8) & 0xff) * 8;
+-		driver->has_clflush = 1;
+-		driver->clflush_add =
+-		    PAGE_SIZE * clflush_size / sizeof(uint32_t);
+-		driver->clflush_mask = driver->clflush_add - 1;
+-		driver->clflush_mask = ~driver->clflush_mask;
+-	}
+-
+-	up_write(&driver->sem);
+-	return driver;
+-
+-out_err1:
+-	kfree(driver);
+-	return NULL;
+-}
+-
+-static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
+-			       unsigned long address, uint32_t num_pages,
+-			       uint32_t desired_tile_stride,
+-			       uint32_t hw_tile_stride)
+-{
+-	struct psb_mmu_pt *pt;
+-	uint32_t rows = 1;
+-	uint32_t i;
+-	unsigned long addr;
+-	unsigned long end;
+-	unsigned long next;
+-	unsigned long add;
+-	unsigned long row_add;
+-	unsigned long clflush_add = pd->driver->clflush_add;
+-	unsigned long clflush_mask = pd->driver->clflush_mask;
+-
+-	if (!pd->driver->has_clflush) {
+-		/*ttm_tt_cache_flush(&pd->p, num_pages);*/
+-		psb_pages_clflush(pd->driver, &pd->p, num_pages);
+-		return;
+-	}
+-
+-	if (hw_tile_stride)
+-		rows = num_pages / desired_tile_stride;
+-	else
+-		desired_tile_stride = num_pages;
+-
+-	add = desired_tile_stride << PAGE_SHIFT;
+-	row_add = hw_tile_stride << PAGE_SHIFT;
+-	mb();
+-	for (i = 0; i < rows; ++i) {
+-
+-		addr = address;
+-		end = addr + add;
+-
+-		do {
+-			next = psb_pd_addr_end(addr, end);
+-			pt = psb_mmu_pt_map_lock(pd, addr);
+-			if (!pt)
+-				continue;
+-			do {
+-				psb_clflush(&pt->v
+-					    [psb_mmu_pt_index(addr)]);
+-			} while (addr +=
+-				 clflush_add,
+-				 (addr & clflush_mask) < next);
+-
+-			psb_mmu_pt_unmap_unlock(pt);
+-		} while (addr = next, next != end);
+-		address += row_add;
+-	}
+-	mb();
+-}
+-
+-void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+-				 unsigned long address, uint32_t num_pages)
+-{
+-	struct psb_mmu_pt *pt;
+-	unsigned long addr;
+-	unsigned long end;
+-	unsigned long next;
+-	unsigned long f_address = address;
+-
+-	down_read(&pd->driver->sem);
+-
+-	addr = address;
+-	end = addr + (num_pages << PAGE_SHIFT);
+-
+-	do {
+-		next = psb_pd_addr_end(addr, end);
+-		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+-		if (!pt)
+-			goto out;
+-		do {
+-			psb_mmu_invalidate_pte(pt, addr);
+-			--pt->count;
+-		} while (addr += PAGE_SIZE, addr < next);
+-		psb_mmu_pt_unmap_unlock(pt);
+-
+-	} while (addr = next, next != end);
+-
+-out:
+-	if (pd->hw_context != -1)
+-		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+-
+-	up_read(&pd->driver->sem);
+-
+-	if (pd->hw_context != -1)
+-		psb_mmu_flush(pd->driver, 0);
+-
+-	return;
+-}
+-
+-void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
+-			  uint32_t num_pages, uint32_t desired_tile_stride,
+-			  uint32_t hw_tile_stride)
+-{
+-	struct psb_mmu_pt *pt;
+-	uint32_t rows = 1;
+-	uint32_t i;
+-	unsigned long addr;
+-	unsigned long end;
+-	unsigned long next;
+-	unsigned long add;
+-	unsigned long row_add;
+-	unsigned long f_address = address;
+-
+-	if (hw_tile_stride)
+-		rows = num_pages / desired_tile_stride;
+-	else
+-		desired_tile_stride = num_pages;
+-
+-	add = desired_tile_stride << PAGE_SHIFT;
+-	row_add = hw_tile_stride << PAGE_SHIFT;
+-
+-	/* down_read(&pd->driver->sem); */
+-
+-	/* Make sure we only need to flush this processor's cache */
+-
+-	for (i = 0; i < rows; ++i) {
+-
+-		addr = address;
+-		end = addr + add;
+-
+-		do {
+-			next = psb_pd_addr_end(addr, end);
+-			pt = psb_mmu_pt_map_lock(pd, addr);
+-			if (!pt)
+-				continue;
+-			do {
+-				psb_mmu_invalidate_pte(pt, addr);
+-				--pt->count;
+-
+-			} while (addr += PAGE_SIZE, addr < next);
+-			psb_mmu_pt_unmap_unlock(pt);
+-
+-		} while (addr = next, next != end);
+-		address += row_add;
+-	}
+-	if (pd->hw_context != -1)
+-		psb_mmu_flush_ptes(pd, f_address, num_pages,
+-				   desired_tile_stride, hw_tile_stride);
+-
+-	/* up_read(&pd->driver->sem); */
+-
+-	if (pd->hw_context != -1)
+-		psb_mmu_flush(pd->driver, 0);
+-}
+-
+-int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
+-				unsigned long address, uint32_t num_pages,
+-				int type)
+-{
+-	struct psb_mmu_pt *pt;
+-	uint32_t pte;
+-	unsigned long addr;
+-	unsigned long end;
+-	unsigned long next;
+-	unsigned long f_address = address;
+-	int ret = 0;
+-
+-	down_read(&pd->driver->sem);
+-
+-	addr = address;
+-	end = addr + (num_pages << PAGE_SHIFT);
+-
+-	do {
+-		next = psb_pd_addr_end(addr, end);
+-		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+-		if (!pt) {
+-			ret = -ENOMEM;
+-			goto out;
+-		}
+-		do {
+-			pte = psb_mmu_mask_pte(start_pfn++, type);
+-			psb_mmu_set_pte(pt, addr, pte);
+-			pt->count++;
+-		} while (addr += PAGE_SIZE, addr < next);
+-		psb_mmu_pt_unmap_unlock(pt);
+-
+-	} while (addr = next, next != end);
+-
+-out:
+-	if (pd->hw_context != -1)
+-		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
+-
+-	up_read(&pd->driver->sem);
+-
+-	if (pd->hw_context != -1)
+-		psb_mmu_flush(pd->driver, 1);
+-
+-	return ret;
+-}
+-
+-int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+-			 unsigned long address, uint32_t num_pages,
+-			 uint32_t desired_tile_stride,
+-			 uint32_t hw_tile_stride, int type)
+-{
+-	struct psb_mmu_pt *pt;
+-	uint32_t rows = 1;
+-	uint32_t i;
+-	uint32_t pte;
+-	unsigned long addr;
+-	unsigned long end;
+-	unsigned long next;
+-	unsigned long add;
+-	unsigned long row_add;
+-	unsigned long f_address = address;
+-	int ret = 0;
+-
+-	if (hw_tile_stride) {
+-		if (num_pages % desired_tile_stride != 0)
+-			return -EINVAL;
+-		rows = num_pages / desired_tile_stride;
+-	} else {
+-		desired_tile_stride = num_pages;
+-	}
+-
+-	add = desired_tile_stride << PAGE_SHIFT;
+-	row_add = hw_tile_stride << PAGE_SHIFT;
+-
+-	down_read(&pd->driver->sem);
+-
+-	for (i = 0; i < rows; ++i) {
+-
+-		addr = address;
+-		end = addr + add;
+-
+-		do {
+-			next = psb_pd_addr_end(addr, end);
+-			pt = psb_mmu_pt_alloc_map_lock(pd, addr);
+-			if (!pt) {
+-				ret = -ENOMEM;
+-				goto out;
+-			}
+-			do {
+-				pte =
+-				    psb_mmu_mask_pte(page_to_pfn(*pages++),
+-						     type);
+-				psb_mmu_set_pte(pt, addr, pte);
+-				pt->count++;
+-			} while (addr += PAGE_SIZE, addr < next);
+-			psb_mmu_pt_unmap_unlock(pt);
+-
+-		} while (addr = next, next != end);
+-
+-		address += row_add;
+-	}
+-out:
+-	if (pd->hw_context != -1)
+-		psb_mmu_flush_ptes(pd, f_address, num_pages,
+-				   desired_tile_stride, hw_tile_stride);
+-
+-	up_read(&pd->driver->sem);
+-
+-	if (pd->hw_context != -1)
+-		psb_mmu_flush(pd->driver, 1);
+-
+-	return ret;
+-}
+-
+-int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+-			   unsigned long *pfn)
+-{
+-	int ret;
+-	struct psb_mmu_pt *pt;
+-	uint32_t tmp;
+-	spinlock_t *lock = &pd->driver->lock;
+-
+-	down_read(&pd->driver->sem);
+-	pt = psb_mmu_pt_map_lock(pd, virtual);
+-	if (!pt) {
+-		uint32_t *v;
+-
+-		spin_lock(lock);
+-		v = kmap_atomic(pd->p, KM_USER0);
+-		tmp = v[psb_mmu_pd_index(virtual)];
+-		kunmap_atomic(v, KM_USER0);
+-		spin_unlock(lock);
+-
+-		if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
+-		    !(pd->invalid_pte & PSB_PTE_VALID)) {
+-			ret = -EINVAL;
+-			goto out;
+-		}
+-		ret = 0;
+-		*pfn = pd->invalid_pte >> PAGE_SHIFT;
+-		goto out;
+-	}
+-	tmp = pt->v[psb_mmu_pt_index(virtual)];
+-	if (!(tmp & PSB_PTE_VALID)) {
+-		ret = -EINVAL;
+-	} else {
+-		ret = 0;
+-		*pfn = tmp >> PAGE_SHIFT;
+-	}
+-	psb_mmu_pt_unmap_unlock(pt);
+-out:
+-	up_read(&pd->driver->sem);
+-	return ret;
+-}
+diff --git a/drivers/staging/gma500/mrst.h b/drivers/staging/gma500/mrst.h
+deleted file mode 100644
+index b563dbc..0000000
+--- a/drivers/staging/gma500/mrst.h
++++ /dev/null
+@@ -1,252 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2007-2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-/* MID device specific descriptors */
+-
+-struct mrst_vbt {
+-	s8 signature[4];	/*4 bytes,"$GCT" */
+-	u8 revision;
+-	u8 size;
+-	u8 checksum;
+-	void *mrst_gct;
+-} __packed;
+-
+-struct mrst_timing_info {
+-	u16 pixel_clock;
+-	u8 hactive_lo;
+-	u8 hblank_lo;
+-	u8 hblank_hi:4;
+-	u8 hactive_hi:4;
+-	u8 vactive_lo;
+-	u8 vblank_lo;
+-	u8 vblank_hi:4;
+-	u8 vactive_hi:4;
+-	u8 hsync_offset_lo;
+-	u8 hsync_pulse_width_lo;
+-	u8 vsync_pulse_width_lo:4;
+-	u8 vsync_offset_lo:4;
+-	u8 vsync_pulse_width_hi:2;
+-	u8 vsync_offset_hi:2;
+-	u8 hsync_pulse_width_hi:2;
+-	u8 hsync_offset_hi:2;
+-	u8 width_mm_lo;
+-	u8 height_mm_lo;
+-	u8 height_mm_hi:4;
+-	u8 width_mm_hi:4;
+-	u8 hborder;
+-	u8 vborder;
+-	u8 unknown0:1;
+-	u8 hsync_positive:1;
+-	u8 vsync_positive:1;
+-	u8 separate_sync:2;
+-	u8 stereo:1;
+-	u8 unknown6:1;
+-	u8 interlaced:1;
+-} __packed;
+-
+-struct gct_r10_timing_info {
+-	u16 pixel_clock;
+-	u32 hactive_lo:8;
+-	u32 hactive_hi:4;
+-	u32 hblank_lo:8;
+-	u32 hblank_hi:4;
+-	u32 hsync_offset_lo:8;
+-	u16 hsync_offset_hi:2;
+-	u16 hsync_pulse_width_lo:8;
+-	u16 hsync_pulse_width_hi:2;
+-	u16 hsync_positive:1;
+-	u16 rsvd_1:3;
+-	u8  vactive_lo:8;
+-	u16 vactive_hi:4;
+-	u16 vblank_lo:8;
+-	u16 vblank_hi:4;
+-	u16 vsync_offset_lo:4;
+-	u16 vsync_offset_hi:2;
+-	u16 vsync_pulse_width_lo:4;
+-	u16 vsync_pulse_width_hi:2;
+-	u16 vsync_positive:1;
+-	u16 rsvd_2:3;
+-} __packed;
+-
+-struct mrst_panel_descriptor_v1 {
+-	u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+-				/* 0x61190 if MIPI */
+-	u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+-	u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+-	u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
+-						/* Register 0x61210 */
+-	struct mrst_timing_info DTD;/*18 bytes, Standard definition */
+-	u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
+-				/* Bit 0, Frequency, 15 bits,0 - 32767Hz */
+-			/* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
+-	u16 Panel_MIPI_Display_Descriptor;
+-			/*16 bits, Defined as follows: */
+-			/* if MIPI, 0x0000 if LVDS */
+-			/* Bit 0, Type, 2 bits, */
+-			/* 0: Type-1, */
+-			/* 1: Type-2, */
+-			/* 2: Type-3, */
+-			/* 3: Type-4 */
+-			/* Bit 2, Pixel Format, 4 bits */
+-			/* Bit0: 16bpp (not supported in LNC), */
+-			/* Bit1: 18bpp loosely packed, */
+-			/* Bit2: 18bpp packed, */
+-			/* Bit3: 24bpp */
+-			/* Bit 6, Reserved, 2 bits, 00b */
+-			/* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+-			/* Bit 14, Reserved, 2 bits, 00b */
+-} __packed;
+-
+-struct mrst_panel_descriptor_v2 {
+-	u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
+-				/* 0x61190 if MIPI */
+-	u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
+-	u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+-	u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
+-						/* Register 0x61210 */
+-	struct mrst_timing_info DTD;/*18 bytes, Standard definition */
+-	u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
+-				/*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
+-	u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
+-			/*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
+-	u16 Panel_MIPI_Display_Descriptor;
+-			/*16 bits, Defined as follows: */
+-			/* if MIPI, 0x0000 if LVDS */
+-			/* Bit 0, Type, 2 bits, */
+-			/* 0: Type-1, */
+-			/* 1: Type-2, */
+-			/* 2: Type-3, */
+-			/* 3: Type-4 */
+-			/* Bit 2, Pixel Format, 4 bits */
+-			/* Bit0: 16bpp (not supported in LNC), */
+-			/* Bit1: 18bpp loosely packed, */
+-			/* Bit2: 18bpp packed, */
+-			/* Bit3: 24bpp */
+-			/* Bit 6, Reserved, 2 bits, 00b */
+-			/* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
+-			/* Bit 14, Reserved, 2 bits, 00b */
+-} __packed;
+-
+-union mrst_panel_rx {
+-	struct {
+-		u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
+-			/* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
+-		u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
+-		/*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
+-		u16 SupportedVideoTransferMode:2; /*0: Non-burst only */
+-					/* 1: Burst and non-burst */
+-					/* 2/3: Reserved */
+-		u16 HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
+-		u16 DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
+-		u16 ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
+-		u16 BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
+-		u16 Rsvd:5;/*5 bits,00000b */
+-	} panelrx;
+-	u16 panel_receiver;
+-} __packed;
+-
+-struct mrst_gct_v1 {
+-	union { /*8 bits,Defined as follows: */
+-		struct {
+-			u8 PanelType:4; /*4 bits, Bit field for panels*/
+-					/* 0 - 3: 0 = LVDS, 1 = MIPI*/
+-					/*2 bits,Specifies which of the*/
+-			u8 BootPanelIndex:2;
+-					/* 4 panels to use by default*/
+-			u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+-					/* the 4 MIPI DSI receivers to use*/
+-		} PD;
+-		u8 PanelDescriptor;
+-	};
+-	struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
+-	union mrst_panel_rx panelrx[4]; /* panel receivers*/
+-} __packed;
+-
+-struct mrst_gct_v2 {
+-	union { /*8 bits,Defined as follows: */
+-		struct {
+-			u8 PanelType:4; /*4 bits, Bit field for panels*/
+-					/* 0 - 3: 0 = LVDS, 1 = MIPI*/
+-					/*2 bits,Specifies which of the*/
+-			u8 BootPanelIndex:2;
+-					/* 4 panels to use by default*/
+-			u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
+-					/* the 4 MIPI DSI receivers to use*/
+-		} PD;
+-		u8 PanelDescriptor;
+-	};
+-	struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
+-	union mrst_panel_rx panelrx[4]; /* panel receivers*/
+-} __packed;
+-
+-struct mrst_gct_data {
+-	u8 bpi; /* boot panel index, number of panel used during boot */
+-	u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
+-	struct mrst_timing_info DTD; /* timing info for the selected panel */
+-	u32 Panel_Port_Control;
+-	u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
+-	u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
+-	u32 PP_Cycle_Delay;
+-	u16 Panel_Backlight_Inverter_Descriptor;
+-	u16 Panel_MIPI_Display_Descriptor;
+-} __packed;
+-
+-#define MODE_SETTING_IN_CRTC		0x1
+-#define MODE_SETTING_IN_ENCODER		0x2
+-#define MODE_SETTING_ON_GOING		0x3
+-#define MODE_SETTING_IN_DSR		0x4
+-#define MODE_SETTING_ENCODER_DONE	0x8
+-
+-#define GCT_R10_HEADER_SIZE		16
+-#define GCT_R10_DISPLAY_DESC_SIZE	28
+-
+-/*
+- *	Moorestown HDMI interfaces
+- */
+-
+-struct mrst_hdmi_dev {
+-	struct pci_dev *dev;
+-	void __iomem *regs;
+-	unsigned int mmio, mmio_len;
+-	int dpms_mode;
+-	struct hdmi_i2c_dev *i2c_dev;
+-
+-	/* register state */
+-	u32 saveDPLL_CTRL;
+-	u32 saveDPLL_DIV_CTRL;
+-	u32 saveDPLL_ADJUST;
+-	u32 saveDPLL_UPDATE;
+-	u32 saveDPLL_CLK_ENABLE;
+-	u32 savePCH_HTOTAL_B;
+-	u32 savePCH_HBLANK_B;
+-	u32 savePCH_HSYNC_B;
+-	u32 savePCH_VTOTAL_B;
+-	u32 savePCH_VBLANK_B;
+-	u32 savePCH_VSYNC_B;
+-	u32 savePCH_PIPEBCONF;
+-	u32 savePCH_PIPEBSRC;
+-};
+-
+-extern void mrst_hdmi_setup(struct drm_device *dev);
+-extern void mrst_hdmi_teardown(struct drm_device *dev);
+-extern int  mrst_hdmi_i2c_init(struct pci_dev *dev);
+-extern void mrst_hdmi_i2c_exit(struct pci_dev *dev);
+-extern void mrst_hdmi_save(struct drm_device *dev);
+-extern void mrst_hdmi_restore(struct drm_device *dev);
+-extern void mrst_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
+diff --git a/drivers/staging/gma500/mrst_crtc.c b/drivers/staging/gma500/mrst_crtc.c
+deleted file mode 100644
+index c9311a5..0000000
+--- a/drivers/staging/gma500/mrst_crtc.c
++++ /dev/null
+@@ -1,604 +0,0 @@
+-/*
+- * Copyright © 2009 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- */
+-
+-#include <linux/i2c.h>
+-#include <linux/pm_runtime.h>
+-
+-#include <drm/drmP.h>
+-#include "framebuffer.h"
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "psb_intel_display.h"
+-#include "power.h"
+-
+-struct psb_intel_range_t {
+-	int min, max;
+-};
+-
+-struct mrst_limit_t {
+-	struct psb_intel_range_t dot, m, p1;
+-};
+-
+-struct mrst_clock_t {
+-	/* derived values */
+-	int dot;
+-	int m;
+-	int p1;
+-};
+-
+-#define MRST_LIMIT_LVDS_100L	    0
+-#define MRST_LIMIT_LVDS_83	    1
+-#define MRST_LIMIT_LVDS_100	    2
+-
+-#define MRST_DOT_MIN		  19750
+-#define MRST_DOT_MAX		  120000
+-#define MRST_M_MIN_100L		    20
+-#define MRST_M_MIN_100		    10
+-#define MRST_M_MIN_83		    12
+-#define MRST_M_MAX_100L		    34
+-#define MRST_M_MAX_100		    17
+-#define MRST_M_MAX_83		    20
+-#define MRST_P1_MIN		    2
+-#define MRST_P1_MAX_0		    7
+-#define MRST_P1_MAX_1		    8
+-
+-static const struct mrst_limit_t mrst_limits[] = {
+-	{			/* MRST_LIMIT_LVDS_100L */
+-	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+-	 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
+-	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+-	 },
+-	{			/* MRST_LIMIT_LVDS_83L */
+-	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+-	 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
+-	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
+-	 },
+-	{			/* MRST_LIMIT_LVDS_100 */
+-	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
+-	 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
+-	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
+-	 },
+-};
+-
+-#define MRST_M_MIN	    10
+-static const u32 mrst_m_converts[] = {
+-	0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
+-	0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
+-	0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
+-};
+-
+-static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
+-{
+-	const struct mrst_limit_t *limit = NULL;
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+-	    || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
+-		switch (dev_priv->core_freq) {
+-		case 100:
+-			limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
+-			break;
+-		case 166:
+-			limit = &mrst_limits[MRST_LIMIT_LVDS_83];
+-			break;
+-		case 200:
+-			limit = &mrst_limits[MRST_LIMIT_LVDS_100];
+-			break;
+-		}
+-	} else {
+-		limit = NULL;
+-		dev_err(dev->dev, "mrst_limit Wrong display type.\n");
+-	}
+-
+-	return limit;
+-}
+-
+-/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+-static void mrst_clock(int refclk, struct mrst_clock_t *clock)
+-{
+-	clock->dot = (refclk * clock->m) / (14 * clock->p1);
+-}
+-
+-void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
+-{
+-	pr_debug("%s: dotclock = %d,  m = %d, p1 = %d.\n",
+-	     prefix, clock->dot, clock->m, clock->p1);
+-}
+-
+-/**
+- * Returns a set of divisors for the desired target clock with the given refclk,
+- * or FALSE.  Divisor values are the actual divisors for
+- */
+-static bool
+-mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
+-		struct mrst_clock_t *best_clock)
+-{
+-	struct mrst_clock_t clock;
+-	const struct mrst_limit_t *limit = mrst_limit(crtc);
+-	int err = target;
+-
+-	memset(best_clock, 0, sizeof(*best_clock));
+-
+-	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
+-		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
+-		     clock.p1++) {
+-			int this_err;
+-
+-			mrst_clock(refclk, &clock);
+-
+-			this_err = abs(clock.dot - target);
+-			if (this_err < err) {
+-				*best_clock = clock;
+-				err = this_err;
+-			}
+-		}
+-	}
+-	dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
+-	return err != target;
+-}
+-
+-/**
+- * Sets the power management mode of the pipe and plane.
+- *
+- * This code should probably grow support for turning the cursor off and back
+- * on appropriately at the same time as we're turning the pipe off/on.
+- */
+-static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+-	int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	u32 temp;
+-	bool enabled;
+-
+-	if (!gma_power_begin(dev, true))
+-		return;
+-
+-	/* XXX: When our outputs are all unaware of DPMS modes other than off
+-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+-	 */
+-	switch (mode) {
+-	case DRM_MODE_DPMS_ON:
+-	case DRM_MODE_DPMS_STANDBY:
+-	case DRM_MODE_DPMS_SUSPEND:
+-		/* Enable the DPLL */
+-		temp = REG_READ(dpll_reg);
+-		if ((temp & DPLL_VCO_ENABLE) == 0) {
+-			REG_WRITE(dpll_reg, temp);
+-			REG_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+-			REG_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+-			REG_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-		}
+-		/* Enable the pipe */
+-		temp = REG_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) == 0)
+-			REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+-		/* Enable the plane */
+-		temp = REG_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+-			REG_WRITE(dspcntr_reg,
+-				  temp | DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-		}
+-
+-		psb_intel_crtc_load_lut(crtc);
+-
+-		/* Give the overlay scaler a chance to enable
+-		   if it's on this pipe */
+-		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+-		break;
+-	case DRM_MODE_DPMS_OFF:
+-		/* Give the overlay scaler a chance to disable
+-		 * if it's on this pipe */
+-		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+-
+-		/* Disable the VGA plane that we never use */
+-		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+-		/* Disable display plane */
+-		temp = REG_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+-			REG_WRITE(dspcntr_reg,
+-				  temp & ~DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-			REG_READ(dspbase_reg);
+-		}
+-
+-		/* Next, disable display pipes */
+-		temp = REG_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) != 0) {
+-			REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+-			REG_READ(pipeconf_reg);
+-		}
+-		/* Wait for for the pipe disable to take effect. */
+-		psb_intel_wait_for_vblank(dev);
+-
+-		temp = REG_READ(dpll_reg);
+-		if ((temp & DPLL_VCO_ENABLE) != 0) {
+-			REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+-			REG_READ(dpll_reg);
+-		}
+-
+-		/* Wait for the clocks to turn off. */
+-		udelay(150);
+-		break;
+-	}
+-
+-	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+-
+-	/*Set FIFO Watermarks*/
+-	REG_WRITE(DSPARB, 0x3FFF);
+-	REG_WRITE(DSPFW1, 0x3F88080A);
+-	REG_WRITE(DSPFW2, 0x0b060808);
+-	REG_WRITE(DSPFW3, 0x0);
+-	REG_WRITE(DSPFW4, 0x08030404);
+-	REG_WRITE(DSPFW5, 0x04040404);
+-	REG_WRITE(DSPFW6, 0x78);
+-	REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
+-	/* Must write Bit 14 of the Chicken Bit Register */
+-
+-	gma_power_end(dev);
+-}
+-
+-/**
+- * Return the pipe currently connected to the panel fitter,
+- * or -1 if the panel fitter is not present or not in use
+- */
+-static int mrst_panel_fitter_pipe(struct drm_device *dev)
+-{
+-	u32 pfit_control;
+-
+-	pfit_control = REG_READ(PFIT_CONTROL);
+-
+-	/* See if the panel fitter is in use */
+-	if ((pfit_control & PFIT_ENABLE) == 0)
+-		return -1;
+-	return (pfit_control >> 29) & 3;
+-}
+-
+-static int mrst_crtc_mode_set(struct drm_crtc *crtc,
+-			      struct drm_display_mode *mode,
+-			      struct drm_display_mode *adjusted_mode,
+-			      int x, int y,
+-			      struct drm_framebuffer *old_fb)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int pipe = psb_intel_crtc->pipe;
+-	int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
+-	int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
+-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+-	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+-	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+-	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+-	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+-	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+-	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+-	int refclk = 0;
+-	struct mrst_clock_t clock;
+-	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+-	bool ok, is_sdvo = false;
+-	bool is_crt = false, is_lvds = false, is_tv = false;
+-	bool is_mipi = false;
+-	struct drm_mode_config *mode_config = &dev->mode_config;
+-	struct psb_intel_output *psb_intel_output = NULL;
+-	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
+-	struct drm_encoder *encoder;
+-
+-	if (!gma_power_begin(dev, true))
+-		return 0;
+-
+-	memcpy(&psb_intel_crtc->saved_mode,
+-		mode,
+-		sizeof(struct drm_display_mode));
+-	memcpy(&psb_intel_crtc->saved_adjusted_mode,
+-		adjusted_mode,
+-		sizeof(struct drm_display_mode));
+-
+-	list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+-
+-		if (encoder->crtc != crtc)
+-			continue;
+-
+-		psb_intel_output = enc_to_psb_intel_output(encoder);
+-		switch (psb_intel_output->type) {
+-		case INTEL_OUTPUT_LVDS:
+-			is_lvds = true;
+-			break;
+-		case INTEL_OUTPUT_SDVO:
+-			is_sdvo = true;
+-			break;
+-		case INTEL_OUTPUT_TVOUT:
+-			is_tv = true;
+-			break;
+-		case INTEL_OUTPUT_ANALOG:
+-			is_crt = true;
+-			break;
+-		case INTEL_OUTPUT_MIPI:
+-			is_mipi = true;
+-			break;
+-		}
+-	}
+-
+-	/* Disable the VGA plane that we never use */
+-	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+-
+-	/* Disable the panel fitter if it was on our pipe */
+-	if (mrst_panel_fitter_pipe(dev) == pipe)
+-		REG_WRITE(PFIT_CONTROL, 0);
+-
+-	REG_WRITE(pipesrc_reg,
+-		  ((mode->crtc_hdisplay - 1) << 16) |
+-		  (mode->crtc_vdisplay - 1));
+-
+-	if (psb_intel_output)
+-		drm_connector_property_get_value(&psb_intel_output->base,
+-			dev->mode_config.scaling_mode_property, &scalingType);
+-
+-	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
+-		/* Moorestown doesn't have register support for centering so
+-		 * we need to mess with the h/vblank and h/vsync start and
+-		 * ends to get centering */
+-		int offsetX = 0, offsetY = 0;
+-
+-		offsetX = (adjusted_mode->crtc_hdisplay -
+-			   mode->crtc_hdisplay) / 2;
+-		offsetY = (adjusted_mode->crtc_vdisplay -
+-			   mode->crtc_vdisplay) / 2;
+-
+-		REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
+-			((adjusted_mode->crtc_htotal - 1) << 16));
+-		REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
+-			((adjusted_mode->crtc_vtotal - 1) << 16));
+-		REG_WRITE(hblank_reg,
+-			(adjusted_mode->crtc_hblank_start - offsetX - 1) |
+-			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
+-		REG_WRITE(hsync_reg,
+-			(adjusted_mode->crtc_hsync_start - offsetX - 1) |
+-			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
+-		REG_WRITE(vblank_reg,
+-			(adjusted_mode->crtc_vblank_start - offsetY - 1) |
+-			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
+-		REG_WRITE(vsync_reg,
+-			(adjusted_mode->crtc_vsync_start - offsetY - 1) |
+-			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
+-	} else {
+-		REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+-			((adjusted_mode->crtc_htotal - 1) << 16));
+-		REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+-			((adjusted_mode->crtc_vtotal - 1) << 16));
+-		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+-			((adjusted_mode->crtc_hblank_end - 1) << 16));
+-		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+-			((adjusted_mode->crtc_hsync_end - 1) << 16));
+-		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+-			((adjusted_mode->crtc_vblank_end - 1) << 16));
+-		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+-			((adjusted_mode->crtc_vsync_end - 1) << 16));
+-	}
+-
+-	/* Flush the plane changes */
+-	{
+-		struct drm_crtc_helper_funcs *crtc_funcs =
+-		    crtc->helper_private;
+-		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+-	}
+-
+-	/* setup pipeconf */
+-	pipeconf = REG_READ(pipeconf_reg);
+-
+-	/* Set up the display plane register */
+-	dspcntr = REG_READ(dspcntr_reg);
+-	dspcntr |= DISPPLANE_GAMMA_ENABLE;
+-
+-	if (pipe == 0)
+-		dspcntr |= DISPPLANE_SEL_PIPE_A;
+-	else
+-		dspcntr |= DISPPLANE_SEL_PIPE_B;
+-
+-	dev_priv->dspcntr = dspcntr |= DISPLAY_PLANE_ENABLE;
+-	dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
+-
+-	if (is_mipi)
+-		goto mrst_crtc_mode_set_exit;
+-
+-	refclk = dev_priv->core_freq * 1000;
+-
+-	dpll = 0;		/*BIT16 = 0 for 100MHz reference */
+-
+-	ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
+-
+-	if (!ok) {
+-		dev_dbg(dev->dev, "mrstFindBestPLL fail in mrst_crtc_mode_set.\n");
+-	} else {
+-		dev_dbg(dev->dev, "mrst_crtc_mode_set pixel clock = %d,"
+-			 "m = %x, p1 = %x.\n", clock.dot, clock.m,
+-			 clock.p1);
+-	}
+-
+-	fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
+-
+-	dpll |= DPLL_VGA_MODE_DIS;
+-
+-
+-	dpll |= DPLL_VCO_ENABLE;
+-
+-	if (is_lvds)
+-		dpll |= DPLLA_MODE_LVDS;
+-	else
+-		dpll |= DPLLB_MODE_DAC_SERIAL;
+-
+-	if (is_sdvo) {
+-		int sdvo_pixel_multiply =
+-		    adjusted_mode->clock / mode->clock;
+-
+-		dpll |= DPLL_DVO_HIGH_SPEED;
+-		dpll |=
+-		    (sdvo_pixel_multiply -
+-		     1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+-	}
+-
+-
+-	/* compute bitmask from p1 value */
+-	dpll |= (1 << (clock.p1 - 2)) << 17;
+-
+-	dpll |= DPLL_VCO_ENABLE;
+-
+-	mrstPrintPll("chosen", &clock);
+-
+-	if (dpll & DPLL_VCO_ENABLE) {
+-		REG_WRITE(fp_reg, fp);
+-		REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+-		REG_READ(dpll_reg);
+-		/* Check the DPLLA lock bit PIPEACONF[29] */
+-		udelay(150);
+-	}
+-
+-	REG_WRITE(fp_reg, fp);
+-	REG_WRITE(dpll_reg, dpll);
+-	REG_READ(dpll_reg);
+-	/* Wait for the clocks to stabilize. */
+-	udelay(150);
+-
+-	/* write it again -- the BIOS does, after all */
+-	REG_WRITE(dpll_reg, dpll);
+-	REG_READ(dpll_reg);
+-	/* Wait for the clocks to stabilize. */
+-	udelay(150);
+-
+-	REG_WRITE(pipeconf_reg, pipeconf);
+-	REG_READ(pipeconf_reg);
+-	psb_intel_wait_for_vblank(dev);
+-
+-	REG_WRITE(dspcntr_reg, dspcntr);
+-	psb_intel_wait_for_vblank(dev);
+-
+-mrst_crtc_mode_set_exit:
+-	gma_power_end(dev);
+-	return 0;
+-}
+-
+-static bool mrst_crtc_mode_fixup(struct drm_crtc *crtc,
+-				  struct drm_display_mode *mode,
+-				  struct drm_display_mode *adjusted_mode)
+-{
+-	return true;
+-}
+-
+-int mrst_pipe_set_base(struct drm_crtc *crtc,
+-			    int x, int y, struct drm_framebuffer *old_fb)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+-	int pipe = psb_intel_crtc->pipe;
+-	unsigned long start, offset;
+-
+-	int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
+-	int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+-	int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+-	u32 dspcntr;
+-	int ret = 0;
+-
+-	/* no fb bound */
+-	if (!crtc->fb) {
+-		dev_dbg(dev->dev, "No FB bound\n");
+-		return 0;
+-	}
+-
+-	if (!gma_power_begin(dev, true))
+-		return 0;
+-
+-	start = psbfb->gtt->offset;
+-	offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+-
+-	REG_WRITE(dspstride, crtc->fb->pitch);
+-
+-	dspcntr = REG_READ(dspcntr_reg);
+-	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+-
+-	switch (crtc->fb->bits_per_pixel) {
+-	case 8:
+-		dspcntr |= DISPPLANE_8BPP;
+-		break;
+-	case 16:
+-		if (crtc->fb->depth == 15)
+-			dspcntr |= DISPPLANE_15_16BPP;
+-		else
+-			dspcntr |= DISPPLANE_16BPP;
+-		break;
+-	case 24:
+-	case 32:
+-		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Unknown color depth\n");
+-		ret = -EINVAL;
+-		goto pipe_set_base_exit;
+-	}
+-	REG_WRITE(dspcntr_reg, dspcntr);
+-
+-	REG_WRITE(dspbase, offset);
+-	REG_READ(dspbase);
+-	REG_WRITE(dspsurf, start);
+-	REG_READ(dspsurf);
+-
+-pipe_set_base_exit:
+-	gma_power_end(dev);
+-	return ret;
+-}
+-
+-static void mrst_crtc_prepare(struct drm_crtc *crtc)
+-{
+-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+-}
+-
+-static void mrst_crtc_commit(struct drm_crtc *crtc)
+-{
+-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+-}
+-
+-const struct drm_crtc_helper_funcs mrst_helper_funcs = {
+-	.dpms = mrst_crtc_dpms,
+-	.mode_fixup = mrst_crtc_mode_fixup,
+-	.mode_set = mrst_crtc_mode_set,
+-	.mode_set_base = mrst_pipe_set_base,
+-	.prepare = mrst_crtc_prepare,
+-	.commit = mrst_crtc_commit,
+-};
+-
+diff --git a/drivers/staging/gma500/mrst_device.c b/drivers/staging/gma500/mrst_device.c
+deleted file mode 100644
+index 6707faf..0000000
+--- a/drivers/staging/gma500/mrst_device.c
++++ /dev/null
+@@ -1,634 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-#include <linux/backlight.h>
+-#include <linux/module.h>
+-#include <linux/dmi.h>
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include "psb_drm.h"
+-#include "psb_drv.h"
+-#include "psb_reg.h"
+-#include "psb_intel_reg.h"
+-#include <asm/mrst.h>
+-#include <asm/intel_scu_ipc.h>
+-#include "mid_bios.h"
+-
+-static int devtype;
+-
+-module_param_named(type, devtype, int, 0600);
+-MODULE_PARM_DESC(type, "Moorestown/Oaktrail device type");
+-
+-#define DEVICE_MOORESTOWN		1
+-#define DEVICE_OAKTRAIL			2
+-#define DEVICE_MOORESTOWN_MM		3
+-
+-static int mrst_device_ident(struct drm_device *dev)
+-{
+-	/* User forced */
+-	if (devtype)
+-		return devtype;
+-	if (dmi_match(DMI_PRODUCT_NAME, "OakTrail") ||
+-		dmi_match(DMI_PRODUCT_NAME, "OakTrail platform"))
+-		return DEVICE_OAKTRAIL;
+-#if defined(CONFIG_X86_MRST)
+-	if (dmi_match(DMI_PRODUCT_NAME, "MM") ||
+-		dmi_match(DMI_PRODUCT_NAME, "MM 10"))
+-		return DEVICE_MOORESTOWN_MM;
+-	if (mrst_identify_cpu())
+-		return DEVICE_MOORESTOWN;
+-#endif
+-	return DEVICE_OAKTRAIL;
+-}
+-
+-
+-/* IPC message and command defines used to enable/disable mipi panel voltages */
+-#define IPC_MSG_PANEL_ON_OFF    0xE9
+-#define IPC_CMD_PANEL_ON        1
+-#define IPC_CMD_PANEL_OFF       0
+-
+-static int mrst_output_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	if (dev_priv->iLVDS_enable)
+-		mrst_lvds_init(dev, &dev_priv->mode_dev);
+-	else
+-		dev_err(dev->dev, "DSI is not supported\n");
+-	if (dev_priv->hdmi_priv)
+-		mrst_hdmi_init(dev, &dev_priv->mode_dev);
+-	return 0;
+-}
+-
+-/*
+- *	Provide the low level interfaces for the Moorestown backlight
+- */
+-
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-
+-#define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
+-#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
+-#define BLC_PWM_FREQ_CALC_CONSTANT 32
+-#define MHz 1000000
+-#define BLC_ADJUSTMENT_MAX 100
+-
+-static struct backlight_device *mrst_backlight_device;
+-static int mrst_brightness;
+-
+-static int mrst_set_brightness(struct backlight_device *bd)
+-{
+-	struct drm_device *dev = bl_get_data(mrst_backlight_device);
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int level = bd->props.brightness;
+-	u32 blc_pwm_ctl;
+-	u32 max_pwm_blc;
+-
+-	/* Percentage 1-100% being valid */
+-	if (level < 1)
+-		level = 1;
+-
+-	if (gma_power_begin(dev, 0)) {
+-		/* Calculate and set the brightness value */
+-		max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
+-		blc_pwm_ctl = level * max_pwm_blc / 100;
+-
+-		/* Adjust the backlight level with the percent in
+-		 * dev_priv->blc_adj1;
+-		 */
+-		blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
+-		blc_pwm_ctl = blc_pwm_ctl / 100;
+-
+-		/* Adjust the backlight level with the percent in
+-		 * dev_priv->blc_adj2;
+-		 */
+-		blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
+-		blc_pwm_ctl = blc_pwm_ctl / 100;
+-
+-		/* force PWM bit on */
+-		REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+-		REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
+-		gma_power_end(dev);
+-	}
+-	mrst_brightness = level;
+-	return 0;
+-}
+-
+-static int mrst_get_brightness(struct backlight_device *bd)
+-{
+-	/* return locally cached var instead of HW read (due to DPST etc.) */
+-	/* FIXME: ideally return actual value in case firmware fiddled with
+-	   it */
+-	return mrst_brightness;
+-}
+-
+-static int device_backlight_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned long core_clock;
+-	u16 bl_max_freq;
+-	uint32_t value;
+-	uint32_t blc_pwm_precision_factor;
+-
+-	dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
+-	dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
+-	bl_max_freq = 256;
+-	/* this needs to be set elsewhere */
+-	blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
+-
+-	core_clock = dev_priv->core_freq;
+-
+-	value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+-	value *= blc_pwm_precision_factor;
+-	value /= bl_max_freq;
+-	value /= blc_pwm_precision_factor;
+-
+-	if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
+-			return -ERANGE;
+-
+-	if (gma_power_begin(dev, false)) {
+-		REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
+-		REG_WRITE(BLC_PWM_CTL, value | (value << 16));
+-		gma_power_end(dev);
+-	}
+-	return 0;
+-}
+-
+-static const struct backlight_ops mrst_ops = {
+-	.get_brightness = mrst_get_brightness,
+-	.update_status  = mrst_set_brightness,
+-};
+-
+-int mrst_backlight_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int ret;
+-	struct backlight_properties props;
+-
+-	memset(&props, 0, sizeof(struct backlight_properties));
+-	props.max_brightness = 100;
+-	props.type = BACKLIGHT_PLATFORM;
+-
+-	mrst_backlight_device = backlight_device_register("mrst-bl",
+-					NULL, (void *)dev, &mrst_ops, &props);
+-
+-	if (IS_ERR(mrst_backlight_device))
+-		return PTR_ERR(mrst_backlight_device);
+-
+-	ret = device_backlight_init(dev);
+-	if (ret < 0) {
+-		backlight_device_unregister(mrst_backlight_device);
+-		return ret;
+-	}
+-	mrst_backlight_device->props.brightness = 100;
+-	mrst_backlight_device->props.max_brightness = 100;
+-	backlight_update_status(mrst_backlight_device);
+-	dev_priv->backlight_device = mrst_backlight_device;
+-	return 0;
+-}
+-
+-#endif
+-
+-/*
+- *	Provide the Moorestown specific chip logic and low level methods
+- *	for power management
+- */
+-
+-static void mrst_init_pm(struct drm_device *dev)
+-{
+-}
+-
+-/**
+- *	mrst_save_display_registers	-	save registers lost on suspend
+- *	@dev: our DRM device
+- *
+- *	Save the state we need in order to be able to restore the interface
+- *	upon resume from suspend
+- */
+-static int mrst_save_display_registers(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int i;
+-	u32 pp_stat;
+-
+-	/* Display arbitration control + watermarks */
+-	dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+-	dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+-	dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+-	dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+-	dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+-	dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+-	dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+-	dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+-
+-	/* Pipe & plane A info */
+-	dev_priv->savePIPEACONF = PSB_RVDC32(PIPEACONF);
+-	dev_priv->savePIPEASRC = PSB_RVDC32(PIPEASRC);
+-	dev_priv->saveFPA0 = PSB_RVDC32(MRST_FPA0);
+-	dev_priv->saveFPA1 = PSB_RVDC32(MRST_FPA1);
+-	dev_priv->saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
+-	dev_priv->saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
+-	dev_priv->saveHBLANK_A = PSB_RVDC32(HBLANK_A);
+-	dev_priv->saveHSYNC_A = PSB_RVDC32(HSYNC_A);
+-	dev_priv->saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
+-	dev_priv->saveVBLANK_A = PSB_RVDC32(VBLANK_A);
+-	dev_priv->saveVSYNC_A = PSB_RVDC32(VSYNC_A);
+-	dev_priv->saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
+-	dev_priv->saveDSPACNTR = PSB_RVDC32(DSPACNTR);
+-	dev_priv->saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
+-	dev_priv->saveDSPAADDR = PSB_RVDC32(DSPABASE);
+-	dev_priv->saveDSPASURF = PSB_RVDC32(DSPASURF);
+-	dev_priv->saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
+-	dev_priv->saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
+-
+-	/* Save cursor regs */
+-	dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
+-	dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
+-	dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
+-
+-	/* Save palette (gamma) */
+-	for (i = 0; i < 256; i++)
+-		dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
+-
+-	if (dev_priv->hdmi_priv)
+-		mrst_hdmi_save(dev);
+-
+-	/* Save performance state */
+-	dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
+-
+-	/* LVDS state */
+-	dev_priv->savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
+-	dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
+-	dev_priv->savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
+-	dev_priv->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
+-	dev_priv->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
+-	dev_priv->saveLVDS = PSB_RVDC32(LVDS);
+-	dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
+-	dev_priv->savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
+-	dev_priv->savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
+-	dev_priv->savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
+-
+-	/* HW overlay */
+-	dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
+-	dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+-	dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+-	dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+-	dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+-	dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+-	dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+-
+-	/* DPST registers */
+-	dev_priv->saveHISTOGRAM_INT_CONTROL_REG =
+-					PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+-	dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG =
+-					PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+-	dev_priv->savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
+-
+-	if (dev_priv->iLVDS_enable) {
+-		/* Shut down the panel */
+-		PSB_WVDC32(0, PP_CONTROL);
+-
+-		do {
+-			pp_stat = PSB_RVDC32(PP_STATUS);
+-		} while (pp_stat & 0x80000000);
+-
+-		/* Turn off the plane */
+-		PSB_WVDC32(0x58000000, DSPACNTR);
+-		/* Trigger the plane disable */
+-		PSB_WVDC32(0, DSPASURF);
+-
+-		/* Wait ~4 ticks */
+-		msleep(4);
+-
+-		/* Turn off pipe */
+-		PSB_WVDC32(0x0, PIPEACONF);
+-		/* Wait ~8 ticks */
+-		msleep(8);
+-
+-		/* Turn off PLLs */
+-		PSB_WVDC32(0, MRST_DPLL_A);
+-	}
+-	return 0;
+-}
+-
+-/**
+- *	mrst_restore_display_registers	-	restore lost register state
+- *	@dev: our DRM device
+- *
+- *	Restore register state that was lost during suspend and resume.
+- */
+-static int mrst_restore_display_registers(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 pp_stat;
+-	int i;
+-
+-	/* Display arbitration + watermarks */
+-	PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+-	PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+-	PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+-	PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+-	PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+-	PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+-	PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+-	PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+-
+-	/* Make sure VGA plane is off. it initializes to on after reset!*/
+-	PSB_WVDC32(0x80000000, VGACNTRL);
+-
+-	/* set the plls */
+-	PSB_WVDC32(dev_priv->saveFPA0, MRST_FPA0);
+-	PSB_WVDC32(dev_priv->saveFPA1, MRST_FPA1);
+-
+-	/* Actually enable it */
+-	PSB_WVDC32(dev_priv->saveDPLL_A, MRST_DPLL_A);
+-	DRM_UDELAY(150);
+-
+-	/* Restore mode */
+-	PSB_WVDC32(dev_priv->saveHTOTAL_A, HTOTAL_A);
+-	PSB_WVDC32(dev_priv->saveHBLANK_A, HBLANK_A);
+-	PSB_WVDC32(dev_priv->saveHSYNC_A, HSYNC_A);
+-	PSB_WVDC32(dev_priv->saveVTOTAL_A, VTOTAL_A);
+-	PSB_WVDC32(dev_priv->saveVBLANK_A, VBLANK_A);
+-	PSB_WVDC32(dev_priv->saveVSYNC_A, VSYNC_A);
+-	PSB_WVDC32(dev_priv->savePIPEASRC, PIPEASRC);
+-	PSB_WVDC32(dev_priv->saveBCLRPAT_A, BCLRPAT_A);
+-
+-	/* Restore performance mode*/
+-	PSB_WVDC32(dev_priv->savePERF_MODE, MRST_PERF_MODE);
+-
+-	/* Enable the pipe*/
+-	if (dev_priv->iLVDS_enable)
+-		PSB_WVDC32(dev_priv->savePIPEACONF, PIPEACONF);
+-
+-	/* Set up the plane*/
+-	PSB_WVDC32(dev_priv->saveDSPALINOFF, DSPALINOFF);
+-	PSB_WVDC32(dev_priv->saveDSPASTRIDE, DSPASTRIDE);
+-	PSB_WVDC32(dev_priv->saveDSPATILEOFF, DSPATILEOFF);
+-
+-	/* Enable the plane */
+-	PSB_WVDC32(dev_priv->saveDSPACNTR, DSPACNTR);
+-	PSB_WVDC32(dev_priv->saveDSPASURF, DSPASURF);
+-
+-	/* Enable Cursor A */
+-	PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
+-	PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
+-	PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
+-
+-	/* Restore palette (gamma) */
+-	for (i = 0; i < 256; i++)
+-		PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2));
+-
+-	if (dev_priv->hdmi_priv)
+-		mrst_hdmi_restore(dev);
+-
+-	if (dev_priv->iLVDS_enable) {
+-		PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
+-		PSB_WVDC32(dev_priv->saveLVDS, LVDS); /*port 61180h*/
+-		PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
+-		PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
+-		PSB_WVDC32(dev_priv->savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
+-		PSB_WVDC32(dev_priv->saveBLC_PWM_CTL, BLC_PWM_CTL);
+-		PSB_WVDC32(dev_priv->savePP_ON_DELAYS, LVDSPP_ON);
+-		PSB_WVDC32(dev_priv->savePP_OFF_DELAYS, LVDSPP_OFF);
+-		PSB_WVDC32(dev_priv->savePP_DIVISOR, PP_CYCLE);
+-		PSB_WVDC32(dev_priv->savePP_CONTROL, PP_CONTROL);
+-	}
+-
+-	/* Wait for cycle delay */
+-	do {
+-		pp_stat = PSB_RVDC32(PP_STATUS);
+-	} while (pp_stat & 0x08000000);
+-
+-	/* Wait for panel power up */
+-	do {
+-		pp_stat = PSB_RVDC32(PP_STATUS);
+-	} while (pp_stat & 0x10000000);
+-
+-	/* Restore HW overlay */
+-	PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
+-	PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
+-
+-	/* DPST registers */
+-	PSB_WVDC32(dev_priv->saveHISTOGRAM_INT_CONTROL_REG,
+-						HISTOGRAM_INT_CONTROL);
+-	PSB_WVDC32(dev_priv->saveHISTOGRAM_LOGIC_CONTROL_REG,
+-						HISTOGRAM_LOGIC_CONTROL);
+-	PSB_WVDC32(dev_priv->savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
+-
+-	return 0;
+-}
+-
+-/**
+- *	mrst_power_down	-	power down the display island
+- *	@dev: our DRM device
+- *
+- *	Power down the display interface of our device
+- */
+-static int mrst_power_down(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 pwr_mask ;
+-	u32 pwr_sts;
+-
+-	pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+-	outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
+-
+-	while (true) {
+-		pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+-		if ((pwr_sts & pwr_mask) == pwr_mask)
+-			break;
+-		else
+-			udelay(10);
+-	}
+-	return 0;
+-}
+-
+-/*
+- * mrst_power_up
+- *
+- * Restore power to the specified island(s) (powergating)
+- */
+-static int mrst_power_up(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
+-	u32 pwr_sts, pwr_cnt;
+-
+-	pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
+-	pwr_cnt &= ~pwr_mask;
+-	outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
+-
+-	while (true) {
+-		pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
+-		if ((pwr_sts & pwr_mask) == 0)
+-			break;
+-		else
+-			udelay(10);
+-	}
+-	return 0;
+-}
+-
+-#if defined(CONFIG_X86_MRST)
+-static void mrst_lvds_cache_bl(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	intel_scu_ipc_ioread8(0x28, &(dev_priv->saveBKLTCNT));
+-	intel_scu_ipc_ioread8(0x29, &(dev_priv->saveBKLTREQ));
+-	intel_scu_ipc_ioread8(0x2A, &(dev_priv->saveBKLTBRTL));
+-}
+-
+-static void mrst_mm_bl_power(struct drm_device *dev, bool on)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (on) {
+-		intel_scu_ipc_iowrite8(0x2A, dev_priv->saveBKLTBRTL);
+-		intel_scu_ipc_iowrite8(0x28, dev_priv->saveBKLTCNT);
+-		intel_scu_ipc_iowrite8(0x29, dev_priv->saveBKLTREQ);
+-	} else {
+-		intel_scu_ipc_iowrite8(0x2A, 0);
+-		intel_scu_ipc_iowrite8(0x28, 0);
+-		intel_scu_ipc_iowrite8(0x29, 0);
+-	}
+-}
+-
+-static const struct psb_ops mrst_mm_chip_ops = {
+-	.name = "Moorestown MM ",
+-	.accel_2d = 1,
+-	.pipes = 1,
+-	.crtcs = 1,
+-	.sgx_offset = MRST_SGX_OFFSET,
+-
+-	.crtc_helper = &mrst_helper_funcs,
+-	.crtc_funcs = &psb_intel_crtc_funcs,
+-
+-	.output_init = mrst_output_init,
+-
+-	.lvds_bl_power = mrst_mm_bl_power,
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	.backlight_init = mrst_backlight_init,
+-#endif
+-
+-	.init_pm = mrst_init_pm,
+-	.save_regs = mrst_save_display_registers,
+-	.restore_regs = mrst_restore_display_registers,
+-	.power_down = mrst_power_down,
+-	.power_up = mrst_power_up,
+-
+-	.i2c_bus = 0,
+-};
+-
+-#endif
+-
+-static void oaktrail_teardown(struct drm_device *dev)
+-{
+-	mrst_hdmi_teardown(dev);
+-}
+-
+-static const struct psb_ops oaktrail_chip_ops = {
+-	.name = "Oaktrail",
+-	.accel_2d = 1,
+-	.pipes = 2,
+-	.crtcs = 2,
+-	.sgx_offset = MRST_SGX_OFFSET,
+-
+-	.chip_setup = mid_chip_setup,
+-	.chip_teardown = oaktrail_teardown,
+-	.crtc_helper = &mrst_helper_funcs,
+-	.crtc_funcs = &psb_intel_crtc_funcs,
+-
+-	.output_init = mrst_output_init,
+-
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	.backlight_init = mrst_backlight_init,
+-#endif
+-
+-	.init_pm = mrst_init_pm,
+-	.save_regs = mrst_save_display_registers,
+-	.restore_regs = mrst_restore_display_registers,
+-	.power_down = mrst_power_down,
+-	.power_up = mrst_power_up,
+-
+-	.i2c_bus = 1,
+-};
+-
+-/**
+- *	mrst_chip_setup		-	perform the initial chip init
+- *	@dev: Our drm_device
+- *
+- *	Figure out which incarnation we are and then scan the firmware for
+- *	tables and information.
+- */
+-static int mrst_chip_setup(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	switch (mrst_device_ident(dev)) {
+-	case DEVICE_OAKTRAIL:
+-		/* Dual CRTC, PC compatible, HDMI, I2C #2 */
+-		dev_priv->ops = &oaktrail_chip_ops;
+-		mrst_hdmi_setup(dev);
+-		return mid_chip_setup(dev);
+-#if defined(CONFIG_X86_MRST)
+-	case DEVICE_MOORESTOWN_MM:
+-		/* Single CRTC, No HDMI, I2C #0, BL control */
+-		mrst_lvds_cache_bl(dev);
+-		dev_priv->ops = &mrst_mm_chip_ops;
+-		return mid_chip_setup(dev);
+-	case DEVICE_MOORESTOWN:
+-		/* Dual CRTC, No HDMI(?), I2C #1 */
+-		return mid_chip_setup(dev);
+-#endif
+-	default:
+-		dev_err(dev->dev, "unsupported device type.\n");
+-		return -ENODEV;
+-	}
+-}
+-
+-const struct psb_ops mrst_chip_ops = {
+-	.name = "Moorestown",
+-	.accel_2d = 1,
+-	.pipes = 2,
+-	.crtcs = 2,
+-	.sgx_offset = MRST_SGX_OFFSET,
+-
+-	.chip_setup = mrst_chip_setup,
+-	.crtc_helper = &mrst_helper_funcs,
+-	.crtc_funcs = &psb_intel_crtc_funcs,
+-
+-	.output_init = mrst_output_init,
+-
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	.backlight_init = mrst_backlight_init,
+-#endif
+-
+-	.init_pm = mrst_init_pm,
+-	.save_regs = mrst_save_display_registers,
+-	.restore_regs = mrst_restore_display_registers,
+-	.power_down = mrst_power_down,
+-	.power_up = mrst_power_up,
+-
+-	.i2c_bus = 2,
+-};
+-
+diff --git a/drivers/staging/gma500/mrst_hdmi.c b/drivers/staging/gma500/mrst_hdmi.c
+deleted file mode 100644
+index e66607e..0000000
+--- a/drivers/staging/gma500/mrst_hdmi.c
++++ /dev/null
+@@ -1,852 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- *	Li Peng <peng.li at intel.com>
+- */
+-
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "psb_drv.h"
+-
+-#define HDMI_READ(reg)		readl(hdmi_dev->regs + (reg))
+-#define HDMI_WRITE(reg, val)	writel(val, hdmi_dev->regs + (reg))
+-
+-#define HDMI_HCR	0x1000
+-#define HCR_ENABLE_HDCP		(1 << 5)
+-#define HCR_ENABLE_AUDIO	(1 << 2)
+-#define HCR_ENABLE_PIXEL	(1 << 1)
+-#define HCR_ENABLE_TMDS		(1 << 0)
+-
+-#define HDMI_HICR	0x1004
+-#define HDMI_HSR	0x1008
+-#define HDMI_HISR	0x100C
+-#define HDMI_DETECT_HDP		(1 << 0)
+-
+-#define HDMI_VIDEO_REG	0x3000
+-#define HDMI_UNIT_EN		(1 << 7)
+-#define HDMI_MODE_OUTPUT	(1 << 0)
+-#define HDMI_HBLANK_A	0x3100
+-
+-#define HDMI_AUDIO_CTRL	0x4000
+-#define HDMI_ENABLE_AUDIO	(1 << 0)
+-
+-#define PCH_HTOTAL_B	0x3100
+-#define PCH_HBLANK_B	0x3104
+-#define PCH_HSYNC_B	0x3108
+-#define PCH_VTOTAL_B	0x310C
+-#define PCH_VBLANK_B	0x3110
+-#define PCH_VSYNC_B	0x3114
+-#define PCH_PIPEBSRC	0x311C
+-
+-#define PCH_PIPEB_DSL	0x3800
+-#define PCH_PIPEB_SLC	0x3804
+-#define PCH_PIPEBCONF	0x3808
+-#define PCH_PIPEBSTAT	0x3824
+-
+-#define CDVO_DFT	0x5000
+-#define CDVO_SLEWRATE	0x5004
+-#define CDVO_STRENGTH	0x5008
+-#define CDVO_RCOMP	0x500C
+-
+-#define DPLL_CTRL       0x6000
+-#define DPLL_PDIV_SHIFT		16
+-#define DPLL_PDIV_MASK		(0xf << 16)
+-#define DPLL_PWRDN		(1 << 4)
+-#define DPLL_RESET		(1 << 3)
+-#define DPLL_FASTEN		(1 << 2)
+-#define DPLL_ENSTAT		(1 << 1)
+-#define DPLL_DITHEN		(1 << 0)
+-
+-#define DPLL_DIV_CTRL   0x6004
+-#define DPLL_CLKF_MASK		0xffffffc0
+-#define DPLL_CLKR_MASK		(0x3f)
+-
+-#define DPLL_CLK_ENABLE 0x6008
+-#define DPLL_EN_DISP		(1 << 31)
+-#define DPLL_SEL_HDMI		(1 << 8)
+-#define DPLL_EN_HDMI		(1 << 1)
+-#define DPLL_EN_VGA		(1 << 0)
+-
+-#define DPLL_ADJUST     0x600C
+-#define DPLL_STATUS     0x6010
+-#define DPLL_UPDATE     0x6014
+-#define DPLL_DFT        0x6020
+-
+-struct intel_range {
+-	int	min, max;
+-};
+-
+-struct mrst_hdmi_limit {
+-	struct intel_range vco, np, nr, nf;
+-};
+-
+-struct mrst_hdmi_clock {
+-	int np;
+-	int nr;
+-	int nf;
+-	int dot;
+-};
+-
+-#define VCO_MIN		320000
+-#define VCO_MAX		1650000
+-#define	NP_MIN		1
+-#define	NP_MAX		15
+-#define	NR_MIN		1
+-#define	NR_MAX		64
+-#define NF_MIN		2
+-#define NF_MAX		4095
+-
+-static const struct mrst_hdmi_limit mrst_hdmi_limit = {
+-	.vco = { .min = VCO_MIN,		.max = VCO_MAX },
+-	.np  = { .min = NP_MIN,			.max = NP_MAX  },
+-	.nr  = { .min = NR_MIN,			.max = NR_MAX  },
+-	.nf  = { .min = NF_MIN,			.max = NF_MAX  },
+-};
+-
+-static void wait_for_vblank(struct drm_device *dev)
+-{
+-	/* FIXME: Can we do this as a sleep ? */
+-	/* Wait for 20ms, i.e. one cycle at 50hz. */
+-	mdelay(20);
+-}
+-
+-static void scu_busy_loop(void *scu_base)
+-{
+-	u32 status = 0;
+-	u32 loop_count = 0;
+-
+-	status = readl(scu_base + 0x04);
+-	while (status & 1) {
+-		udelay(1); /* scu processing time is in few u secods */
+-		status = readl(scu_base + 0x04);
+-		loop_count++;
+-		/* break if scu doesn't reset busy bit after huge retry */
+-		if (loop_count > 1000) {
+-			DRM_DEBUG_KMS("SCU IPC timed out");
+-			return;
+-		}
+-	}
+-}
+-
+-static void mrst_hdmi_reset(struct drm_device *dev)
+-{
+-	void *base;
+-	/* FIXME: at least make these defines */
+-	unsigned int scu_ipc_mmio = 0xff11c000;
+-	int scu_len = 1024;
+-
+-	base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
+-	if (base == NULL) {
+-		DRM_ERROR("failed to map SCU mmio\n");
+-		return;
+-	}
+-
+-	/* scu ipc: assert hdmi controller reset */
+-	writel(0xff11d118, base + 0x0c);
+-	writel(0x7fffffdf, base + 0x80);
+-	writel(0x42005, base + 0x0);
+-	scu_busy_loop(base);
+-
+-	/* scu ipc: de-assert hdmi controller reset */
+-	writel(0xff11d118, base + 0x0c);
+-	writel(0x7fffffff, base + 0x80);
+-	writel(0x42005, base + 0x0);
+-	scu_busy_loop(base);
+-
+-	iounmap(base);
+-}
+-
+-static void mrst_hdmi_audio_enable(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+-
+-	HDMI_WRITE(HDMI_HCR, 0x67);
+-	HDMI_READ(HDMI_HCR);
+-
+-	HDMI_WRITE(0x51a8, 0x10);
+-	HDMI_READ(0x51a8);
+-
+-	HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
+-	HDMI_READ(HDMI_AUDIO_CTRL);
+-}
+-
+-static void mrst_hdmi_audio_disable(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+-
+-	HDMI_WRITE(0x51a8, 0x0);
+-	HDMI_READ(0x51a8);
+-
+-	HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
+-	HDMI_READ(HDMI_AUDIO_CTRL);
+-
+-	HDMI_WRITE(HDMI_HCR, 0x47);
+-	HDMI_READ(HDMI_HCR);
+-}
+-
+-void mrst_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	u32 temp;
+-
+-	switch (mode) {
+-	case DRM_MODE_DPMS_OFF:
+-		/* Disable VGACNTRL */
+-		REG_WRITE(VGACNTRL, 0x80000000);
+-
+-		/* Disable plane */
+-		temp = REG_READ(DSPBCNTR);
+-		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+-			REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
+-			REG_READ(DSPBCNTR);
+-			/* Flush the plane changes */
+-			REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+-			REG_READ(DSPBSURF);
+-		}
+-
+-		/* Disable pipe B */
+-		temp = REG_READ(PIPEBCONF);
+-		if ((temp & PIPEACONF_ENABLE) != 0) {
+-			REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+-			REG_READ(PIPEBCONF);
+-		}
+-
+-		/* Disable LNW Pipes, etc */
+-		temp = REG_READ(PCH_PIPEBCONF);
+-		if ((temp & PIPEACONF_ENABLE) != 0) {
+-			REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
+-			REG_READ(PCH_PIPEBCONF);
+-		}
+-		/* wait for pipe off */
+-		udelay(150);
+-		/* Disable dpll */
+-		temp = REG_READ(DPLL_CTRL);
+-		if ((temp & DPLL_PWRDN) == 0) {
+-			REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
+-			REG_WRITE(DPLL_STATUS, 0x1);
+-		}
+-		/* wait for dpll off */
+-		udelay(150);
+-		break;
+-	case DRM_MODE_DPMS_ON:
+-	case DRM_MODE_DPMS_STANDBY:
+-	case DRM_MODE_DPMS_SUSPEND:
+-		/* Enable dpll */
+-		temp = REG_READ(DPLL_CTRL);
+-		if ((temp & DPLL_PWRDN) != 0) {
+-			REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
+-			temp = REG_READ(DPLL_CLK_ENABLE);
+-			REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
+-			REG_READ(DPLL_CLK_ENABLE);
+-		}
+-		/* wait for dpll warm up */
+-		udelay(150);
+-
+-		/* Enable pipe B */
+-		temp = REG_READ(PIPEBCONF);
+-		if ((temp & PIPEACONF_ENABLE) == 0) {
+-			REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
+-			REG_READ(PIPEBCONF);
+-		}
+-
+-		/* Enable LNW Pipe B */
+-		temp = REG_READ(PCH_PIPEBCONF);
+-		if ((temp & PIPEACONF_ENABLE) == 0) {
+-			REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
+-			REG_READ(PCH_PIPEBCONF);
+-		}
+-		wait_for_vblank(dev);
+-
+-		/* Enable plane */
+-		temp = REG_READ(DSPBCNTR);
+-		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+-			REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
+-			REG_READ(DSPBSURF);
+-		}
+-		psb_intel_crtc_load_lut(crtc);
+-	}
+-	/* DSPARB */
+-	REG_WRITE(DSPARB, 0x00003fbf);
+-	/* FW1 */
+-	REG_WRITE(0x70034, 0x3f880a0a);
+-	/* FW2 */
+-	REG_WRITE(0x70038, 0x0b060808);
+-	/* FW4 */
+-	REG_WRITE(0x70050, 0x08030404);
+-	/* FW5 */
+-	REG_WRITE(0x70054, 0x04040404);
+-	/* LNC Chicken Bits */
+-	REG_WRITE(0x70400, 0x4000);
+-}
+-
+-
+-static void mrst_hdmi_dpms(struct drm_encoder *encoder, int mode)
+-{
+-	static int dpms_mode = -1;
+-
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+-	u32 temp;
+-
+-	if (dpms_mode == mode)
+-		return;
+-
+-	if (mode != DRM_MODE_DPMS_ON)
+-		temp = 0x0;
+-	else
+-		temp = 0x99;
+-
+-	dpms_mode = mode;
+-	HDMI_WRITE(HDMI_VIDEO_REG, temp);
+-}
+-
+-static unsigned int htotal_calculate(struct drm_display_mode *mode)
+-{
+-	u32 htotal, new_crtc_htotal;
+-
+-	htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
+-
+-	/*
+-	 * 1024 x 768  new_crtc_htotal = 0x1024;
+-	 * 1280 x 1024 new_crtc_htotal = 0x0c34;
+-	 */
+-	new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
+-
+-	return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
+-}
+-
+-static void mrst_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+-				int refclk, struct mrst_hdmi_clock *best_clock)
+-{
+-	int np_min, np_max, nr_min, nr_max;
+-	int np, nr, nf;
+-
+-	np_min = DIV_ROUND_UP(mrst_hdmi_limit.vco.min, target * 10);
+-	np_max = mrst_hdmi_limit.vco.max / (target * 10);
+-	if (np_min < mrst_hdmi_limit.np.min)
+-		np_min = mrst_hdmi_limit.np.min;
+-	if (np_max > mrst_hdmi_limit.np.max)
+-		np_max = mrst_hdmi_limit.np.max;
+-
+-	nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
+-	nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
+-	if (nr_min < mrst_hdmi_limit.nr.min)
+-		nr_min = mrst_hdmi_limit.nr.min;
+-	if (nr_max > mrst_hdmi_limit.nr.max)
+-		nr_max = mrst_hdmi_limit.nr.max;
+-
+-	np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
+-	nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
+-	nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
+-	DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
+-
+-	/*
+-	 * 1024 x 768  np = 1; nr = 0x26; nf = 0x0fd8000;
+-	 * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
+-	 */
+-	best_clock->np = np;
+-	best_clock->nr = nr - 1;
+-	best_clock->nf = (nf << 14);
+-}
+-
+-int mrst_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+-			    struct drm_display_mode *mode,
+-			    struct drm_display_mode *adjusted_mode,
+-			    int x, int y,
+-			    struct drm_framebuffer *old_fb)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+-	int pipe = 1;
+-	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+-	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+-	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+-	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+-	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+-	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+-	int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+-	int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+-	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	int refclk;
+-	struct mrst_hdmi_clock clock;
+-	u32 dspcntr, pipeconf, dpll, temp;
+-	int dspcntr_reg = DSPBCNTR;
+-
+-	/* Disable the VGA plane that we never use */
+-	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+-
+-	/* XXX: Disable the panel fitter if it was on our pipe */
+-
+-	/* Disable dpll if necessary */
+-	dpll = REG_READ(DPLL_CTRL);
+-	if ((dpll & DPLL_PWRDN) == 0) {
+-		REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
+-		REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
+-		REG_WRITE(DPLL_STATUS, 0x1);
+-	}
+-	udelay(150);
+-
+-	/* reset controller: FIXME - can we sort out the ioremap mess ? */
+-	iounmap(hdmi_dev->regs);
+-	mrst_hdmi_reset(dev);
+-
+-	/* program and enable dpll */
+-	refclk = 25000;
+-	mrst_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+-
+-	/* Setting DPLL */
+-	dpll = REG_READ(DPLL_CTRL);
+-	dpll &= ~DPLL_PDIV_MASK;
+-	dpll &= ~(DPLL_PWRDN | DPLL_RESET);
+-	REG_WRITE(DPLL_CTRL, 0x00000008);
+-	REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
+-	REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
+-	REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
+-	REG_WRITE(DPLL_UPDATE, 0x80000000);
+-	REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
+-	udelay(150);
+-
+-	hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+-	if (hdmi_dev->regs == NULL) {
+-		DRM_ERROR("failed to do hdmi mmio mapping\n");
+-		return -ENOMEM;
+-	}
+-
+-	/* configure HDMI */
+-	HDMI_WRITE(0x1004, 0x1fd);
+-	HDMI_WRITE(0x2000, 0x1);
+-	HDMI_WRITE(0x2008, 0x0);
+-	HDMI_WRITE(0x3130, 0x8);
+-	HDMI_WRITE(0x101c, 0x1800810);
+-
+-	temp = htotal_calculate(adjusted_mode);
+-	REG_WRITE(htot_reg, temp);
+-	REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+-	REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+-	REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+-	REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+-	REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+-	REG_WRITE(pipesrc_reg,
+-		((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+-
+-	REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+-	REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+-	REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+-	REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
+-	REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
+-	REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
+-	REG_WRITE(PCH_PIPEBSRC,
+-		((mode->crtc_hdisplay - 1) << 16) |  (mode->crtc_vdisplay - 1));
+-
+-	temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
+-	HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) |  temp);
+-
+-	REG_WRITE(dspsize_reg,
+-			((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+-	REG_WRITE(dsppos_reg, 0);
+-
+-	/* Flush the plane changes */
+-	{
+-		struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+-	}
+-
+-	/* Set up the display plane register */
+-	dspcntr = REG_READ(dspcntr_reg);
+-	dspcntr |= DISPPLANE_GAMMA_ENABLE;
+-	dspcntr |= DISPPLANE_SEL_PIPE_B;
+-	dspcntr |= DISPLAY_PLANE_ENABLE;
+-
+-	/* setup pipeconf */
+-	pipeconf = REG_READ(pipeconf_reg);
+-	pipeconf |= PIPEACONF_ENABLE;
+-
+-	REG_WRITE(pipeconf_reg, pipeconf);
+-	REG_READ(pipeconf_reg);
+-
+-	REG_WRITE(PCH_PIPEBCONF, pipeconf);
+-	REG_READ(PCH_PIPEBCONF);
+-	wait_for_vblank(dev);
+-
+-	REG_WRITE(dspcntr_reg, dspcntr);
+-	wait_for_vblank(dev);
+-
+-	return 0;
+-}
+-
+-static int mrst_hdmi_mode_valid(struct drm_connector *connector,
+-				struct drm_display_mode *mode)
+-{
+-	if (mode->clock > 165000)
+-		return MODE_CLOCK_HIGH;
+-	if (mode->clock < 20000)
+-		return MODE_CLOCK_LOW;
+-
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+-		return MODE_NO_DBLESCAN;
+-
+-	return MODE_OK;
+-}
+-
+-static bool mrst_hdmi_mode_fixup(struct drm_encoder *encoder,
+-				 struct drm_display_mode *mode,
+-				 struct drm_display_mode *adjusted_mode)
+-{
+-	return true;
+-}
+-
+-static enum drm_connector_status
+-mrst_hdmi_detect(struct drm_connector *connector, bool force)
+-{
+-	enum drm_connector_status status;
+-	struct drm_device *dev = connector->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+-	u32 temp;
+-
+-	temp = HDMI_READ(HDMI_HSR);
+-	DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
+-
+-	if ((temp & HDMI_DETECT_HDP) != 0)
+-		status = connector_status_connected;
+-	else
+-		status = connector_status_disconnected;
+-
+-	return status;
+-}
+-
+-static const unsigned char raw_edid[] = {
+-	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
+-	0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
+-	0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
+-	0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
+-	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+-	0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
+-	0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
+-	0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
+-	0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
+-	0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
+-	0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
+-};
+-
+-static int mrst_hdmi_get_modes(struct drm_connector *connector)
+-{
+-	struct drm_device *dev = connector->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct i2c_adapter *i2c_adap;
+-	struct edid *edid;
+-	struct drm_display_mode *mode, *t;
+-	int i = 0, ret = 0;
+-
+-	i2c_adap = i2c_get_adapter(3);
+-	if (i2c_adap == NULL) {
+-		DRM_ERROR("No ddc adapter available!\n");
+-		edid = (struct edid *)raw_edid;
+-	} else {
+-		edid = (struct edid *)raw_edid;
+-		/* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
+-	}
+-
+-	if (edid) {
+-		drm_mode_connector_update_edid_property(connector, edid);
+-		ret = drm_add_edid_modes(connector, edid);
+-		connector->display_info.raw_edid = NULL;
+-	}
+-
+-	/*
+-	 * prune modes that require frame buffer bigger than stolen mem
+-	 */
+-	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
+-		if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
+-			i++;
+-			drm_mode_remove(connector, mode);
+-		}
+-	}
+-	return ret - i;
+-}
+-
+-static void mrst_hdmi_mode_set(struct drm_encoder *encoder,
+-			       struct drm_display_mode *mode,
+-			       struct drm_display_mode *adjusted_mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-
+-	mrst_hdmi_audio_enable(dev);
+-	return;
+-}
+-
+-static void mrst_hdmi_destroy(struct drm_connector *connector)
+-{
+-	return;
+-}
+-
+-static const struct drm_encoder_helper_funcs mrst_hdmi_helper_funcs = {
+-	.dpms = mrst_hdmi_dpms,
+-	.mode_fixup = mrst_hdmi_mode_fixup,
+-	.prepare = psb_intel_encoder_prepare,
+-	.mode_set = mrst_hdmi_mode_set,
+-	.commit = psb_intel_encoder_commit,
+-};
+-
+-static const struct drm_connector_helper_funcs
+-					mrst_hdmi_connector_helper_funcs = {
+-	.get_modes = mrst_hdmi_get_modes,
+-	.mode_valid = mrst_hdmi_mode_valid,
+-	.best_encoder = psb_intel_best_encoder,
+-};
+-
+-static const struct drm_connector_funcs mrst_hdmi_connector_funcs = {
+-	.dpms = drm_helper_connector_dpms,
+-	.detect = mrst_hdmi_detect,
+-	.fill_modes = drm_helper_probe_single_connector_modes,
+-	.destroy = mrst_hdmi_destroy,
+-};
+-
+-static void mrst_hdmi_enc_destroy(struct drm_encoder *encoder)
+-{
+-	drm_encoder_cleanup(encoder);
+-}
+-
+-static const struct drm_encoder_funcs mrst_hdmi_enc_funcs = {
+-	.destroy = mrst_hdmi_enc_destroy,
+-};
+-
+-void mrst_hdmi_init(struct drm_device *dev,
+-					struct psb_intel_mode_device *mode_dev)
+-{
+-	struct psb_intel_output *psb_intel_output;
+-	struct drm_connector *connector;
+-	struct drm_encoder *encoder;
+-
+-	psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
+-	if (!psb_intel_output)
+-		return;
+-
+-	psb_intel_output->mode_dev = mode_dev;
+-	connector = &psb_intel_output->base;
+-	encoder = &psb_intel_output->enc;
+-	drm_connector_init(dev, &psb_intel_output->base,
+-			   &mrst_hdmi_connector_funcs,
+-			   DRM_MODE_CONNECTOR_DVID);
+-
+-	drm_encoder_init(dev, &psb_intel_output->enc,
+-			 &mrst_hdmi_enc_funcs,
+-			 DRM_MODE_ENCODER_TMDS);
+-
+-	drm_mode_connector_attach_encoder(&psb_intel_output->base,
+-					  &psb_intel_output->enc);
+-
+-	psb_intel_output->type = INTEL_OUTPUT_HDMI;
+-	drm_encoder_helper_add(encoder, &mrst_hdmi_helper_funcs);
+-	drm_connector_helper_add(connector, &mrst_hdmi_connector_helper_funcs);
+-
+-	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+-	connector->interlace_allowed = false;
+-	connector->doublescan_allowed = false;
+-	drm_sysfs_connector_add(connector);
+-
+-	return;
+-}
+-
+-static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
+-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
+-	{}
+-};
+-
+-void mrst_hdmi_setup(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct pci_dev *pdev;
+-	struct mrst_hdmi_dev *hdmi_dev;
+-	int ret;
+-
+-	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
+-	if (!pdev)
+-		return;
+-
+-	hdmi_dev = kzalloc(sizeof(struct mrst_hdmi_dev), GFP_KERNEL);
+-	if (!hdmi_dev) {
+-		dev_err(dev->dev, "failed to allocate memory\n");
+-		goto out;
+-	}
+-
+-
+-	ret = pci_enable_device(pdev);
+-	if (ret) {
+-		dev_err(dev->dev, "failed to enable hdmi controller\n");
+-		goto free;
+-	}
+-
+-	hdmi_dev->mmio = pci_resource_start(pdev, 0);
+-	hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
+-	hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
+-	if (!hdmi_dev->regs) {
+-		dev_err(dev->dev, "failed to map hdmi mmio\n");
+-		goto free;
+-	}
+-
+-	hdmi_dev->dev = pdev;
+-	pci_set_drvdata(pdev, hdmi_dev);
+-
+-	/* Initialize i2c controller */
+-	ret = mrst_hdmi_i2c_init(hdmi_dev->dev);
+-	if (ret)
+-		dev_err(dev->dev, "HDMI I2C initialization failed\n");
+-
+-	dev_priv->hdmi_priv = hdmi_dev;
+-	mrst_hdmi_audio_disable(dev);
+-	return;
+-
+-free:
+-	kfree(hdmi_dev);
+-out:
+-	return;
+-}
+-
+-void mrst_hdmi_teardown(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+-	struct pci_dev *pdev;
+-
+-	if (hdmi_dev) {
+-		pdev = hdmi_dev->dev;
+-		pci_set_drvdata(pdev, NULL);
+-		mrst_hdmi_i2c_exit(pdev);
+-		iounmap(hdmi_dev->regs);
+-		kfree(hdmi_dev);
+-		pci_dev_put(pdev);
+-	}
+-}
+-
+-/* save HDMI register state */
+-void mrst_hdmi_save(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+-	int i;
+-
+-	/* dpll */
+-	hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
+-	hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
+-	hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
+-	hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
+-	hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
+-
+-	/* pipe B */
+-	dev_priv->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
+-	dev_priv->savePIPEBSRC  = PSB_RVDC32(PIPEBSRC);
+-	dev_priv->saveHTOTAL_B  = PSB_RVDC32(HTOTAL_B);
+-	dev_priv->saveHBLANK_B  = PSB_RVDC32(HBLANK_B);
+-	dev_priv->saveHSYNC_B   = PSB_RVDC32(HSYNC_B);
+-	dev_priv->saveVTOTAL_B  = PSB_RVDC32(VTOTAL_B);
+-	dev_priv->saveVBLANK_B  = PSB_RVDC32(VBLANK_B);
+-	dev_priv->saveVSYNC_B   = PSB_RVDC32(VSYNC_B);
+-
+-	hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
+-	hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
+-	hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
+-	hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
+-	hdmi_dev->savePCH_HSYNC_B  = PSB_RVDC32(PCH_HSYNC_B);
+-	hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
+-	hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
+-	hdmi_dev->savePCH_VSYNC_B  = PSB_RVDC32(PCH_VSYNC_B);
+-
+-	/* plane */
+-	dev_priv->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
+-	dev_priv->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
+-	dev_priv->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
+-	dev_priv->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
+-	dev_priv->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
+-	dev_priv->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
+-
+-	/* cursor B */
+-	dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
+-	dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
+-	dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
+-
+-	/* save palette */
+-	for (i = 0; i < 256; i++)
+-		dev_priv->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
+-}
+-
+-/* restore HDMI register state */
+-void mrst_hdmi_restore(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+-	int i;
+-
+-	/* dpll */
+-	PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
+-	PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
+-	PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
+-	PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
+-	PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
+-	DRM_UDELAY(150);
+-
+-	/* pipe */
+-	PSB_WVDC32(dev_priv->savePIPEBSRC, PIPEBSRC);
+-	PSB_WVDC32(dev_priv->saveHTOTAL_B, HTOTAL_B);
+-	PSB_WVDC32(dev_priv->saveHBLANK_B, HBLANK_B);
+-	PSB_WVDC32(dev_priv->saveHSYNC_B,  HSYNC_B);
+-	PSB_WVDC32(dev_priv->saveVTOTAL_B, VTOTAL_B);
+-	PSB_WVDC32(dev_priv->saveVBLANK_B, VBLANK_B);
+-	PSB_WVDC32(dev_priv->saveVSYNC_B,  VSYNC_B);
+-
+-	PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
+-	PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
+-	PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
+-	PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B,  PCH_HSYNC_B);
+-	PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
+-	PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
+-	PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B,  PCH_VSYNC_B);
+-
+-	PSB_WVDC32(dev_priv->savePIPEBCONF, PIPEBCONF);
+-	PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
+-
+-	/* plane */
+-	PSB_WVDC32(dev_priv->saveDSPBLINOFF, DSPBLINOFF);
+-	PSB_WVDC32(dev_priv->saveDSPBSTRIDE, DSPBSTRIDE);
+-	PSB_WVDC32(dev_priv->saveDSPBTILEOFF, DSPBTILEOFF);
+-	PSB_WVDC32(dev_priv->saveDSPBCNTR, DSPBCNTR);
+-	PSB_WVDC32(dev_priv->saveDSPBSURF, DSPBSURF);
+-
+-	/* cursor B */
+-	PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
+-	PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
+-	PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
+-
+-	/* restore palette */
+-	for (i = 0; i < 256; i++)
+-		PSB_WVDC32(dev_priv->save_palette_b[i], PALETTE_B + (i << 2));
+-}
+diff --git a/drivers/staging/gma500/mrst_hdmi_i2c.c b/drivers/staging/gma500/mrst_hdmi_i2c.c
+deleted file mode 100644
+index 36e7edc..0000000
+--- a/drivers/staging/gma500/mrst_hdmi_i2c.c
++++ /dev/null
+@@ -1,328 +0,0 @@
+-/*
+- * Copyright © 2010 Intel Corporation
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+- * DEALINGS IN THE SOFTWARE.
+- *
+- * Authors:
+- *	Li Peng <peng.li at intel.com>
+- */
+-
+-#include <linux/mutex.h>
+-#include <linux/pci.h>
+-#include <linux/i2c.h>
+-#include <linux/interrupt.h>
+-#include <linux/delay.h>
+-#include <linux/export.h>
+-#include "psb_drv.h"
+-
+-#define HDMI_READ(reg)		readl(hdmi_dev->regs + (reg))
+-#define HDMI_WRITE(reg, val)	writel(val, hdmi_dev->regs + (reg))
+-
+-#define HDMI_HCR	0x1000
+-#define HCR_DETECT_HDP		(1 << 6)
+-#define HCR_ENABLE_HDCP		(1 << 5)
+-#define HCR_ENABLE_AUDIO	(1 << 2)
+-#define HCR_ENABLE_PIXEL	(1 << 1)
+-#define HCR_ENABLE_TMDS		(1 << 0)
+-#define HDMI_HICR	0x1004
+-#define HDMI_INTR_I2C_ERROR	(1 << 4)
+-#define HDMI_INTR_I2C_FULL	(1 << 3)
+-#define HDMI_INTR_I2C_DONE	(1 << 2)
+-#define HDMI_INTR_HPD		(1 << 0)
+-#define HDMI_HSR	0x1008
+-#define HDMI_HISR	0x100C
+-#define HDMI_HI2CRDB0	0x1200
+-#define HDMI_HI2CHCR	0x1240
+-#define HI2C_HDCP_WRITE		(0 << 2)
+-#define HI2C_HDCP_RI_READ	(1 << 2)
+-#define HI2C_HDCP_READ		(2 << 2)
+-#define HI2C_EDID_READ		(3 << 2)
+-#define HI2C_READ_CONTINUE	(1 << 1)
+-#define HI2C_ENABLE_TRANSACTION	(1 << 0)
+-
+-#define HDMI_ICRH	0x1100
+-#define HDMI_HI2CTDR0	0x1244
+-#define HDMI_HI2CTDR1	0x1248
+-
+-#define I2C_STAT_INIT		0
+-#define I2C_READ_DONE		1
+-#define I2C_TRANSACTION_DONE	2
+-
+-struct hdmi_i2c_dev {
+-	struct i2c_adapter *adap;
+-	struct mutex i2c_lock;
+-	struct completion complete;
+-	int status;
+-	struct i2c_msg *msg;
+-	int buf_offset;
+-};
+-
+-static void hdmi_i2c_irq_enable(struct mrst_hdmi_dev *hdmi_dev)
+-{
+-	u32 temp;
+-
+-	temp = HDMI_READ(HDMI_HICR);
+-	temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
+-	HDMI_WRITE(HDMI_HICR, temp);
+-	HDMI_READ(HDMI_HICR);
+-}
+-
+-static void hdmi_i2c_irq_disable(struct mrst_hdmi_dev *hdmi_dev)
+-{
+-	HDMI_WRITE(HDMI_HICR, 0x0);
+-	HDMI_READ(HDMI_HICR);
+-}
+-
+-static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+-{
+-	struct mrst_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+-	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+-	u32 temp;
+-
+-	i2c_dev->status = I2C_STAT_INIT;
+-	i2c_dev->msg = pmsg;
+-	i2c_dev->buf_offset = 0;
+-	INIT_COMPLETION(i2c_dev->complete);
+-
+-	/* Enable I2C transaction */
+-	temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
+-	HDMI_WRITE(HDMI_HI2CHCR, temp);
+-	HDMI_READ(HDMI_HI2CHCR);
+-
+-	while (i2c_dev->status != I2C_TRANSACTION_DONE)
+-		wait_for_completion_interruptible_timeout(&i2c_dev->complete,
+-								10 * HZ);
+-
+-	return 0;
+-}
+-
+-static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
+-{
+-	/*
+-	 * XXX: i2c write seems isn't useful for EDID probe, don't do anything
+-	 */
+-	return 0;
+-}
+-
+-static int mrst_hdmi_i2c_access(struct i2c_adapter *adap,
+-				struct i2c_msg *pmsg,
+-				int num)
+-{
+-	struct mrst_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+-	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+-	int i, err = 0;
+-
+-	mutex_lock(&i2c_dev->i2c_lock);
+-
+-	/* Enable i2c unit */
+-	HDMI_WRITE(HDMI_ICRH, 0x00008760);
+-
+-	/* Enable irq */
+-	hdmi_i2c_irq_enable(hdmi_dev);
+-	for (i = 0; i < num; i++) {
+-		if (pmsg->len && pmsg->buf) {
+-			if (pmsg->flags & I2C_M_RD)
+-				err = xfer_read(adap, pmsg);
+-			else
+-				err = xfer_write(adap, pmsg);
+-		}
+-		pmsg++;         /* next message */
+-	}
+-
+-	/* Disable irq */
+-	hdmi_i2c_irq_disable(hdmi_dev);
+-
+-	mutex_unlock(&i2c_dev->i2c_lock);
+-
+-	return i;
+-}
+-
+-static u32 mrst_hdmi_i2c_func(struct i2c_adapter *adapter)
+-{
+-	return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+-}
+-
+-static const struct i2c_algorithm mrst_hdmi_i2c_algorithm = {
+-	.master_xfer	= mrst_hdmi_i2c_access,
+-	.functionality  = mrst_hdmi_i2c_func,
+-};
+-
+-static struct i2c_adapter mrst_hdmi_i2c_adapter = {
+-	.name		= "mrst_hdmi_i2c",
+-	.nr		= 3,
+-	.owner		= THIS_MODULE,
+-	.class		= I2C_CLASS_DDC,
+-	.algo		= &mrst_hdmi_i2c_algorithm,
+-};
+-
+-static void hdmi_i2c_read(struct mrst_hdmi_dev *hdmi_dev)
+-{
+-	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+-	struct i2c_msg *msg = i2c_dev->msg;
+-	u8 *buf = msg->buf;
+-	u32 temp;
+-	int i, offset;
+-
+-	offset = i2c_dev->buf_offset;
+-	for (i = 0; i < 0x10; i++) {
+-		temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
+-		memcpy(buf + (offset + i * 4), &temp, 4);
+-	}
+-	i2c_dev->buf_offset += (0x10 * 4);
+-
+-	/* clearing read buffer full intr */
+-	temp = HDMI_READ(HDMI_HISR);
+-	HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
+-	HDMI_READ(HDMI_HISR);
+-
+-	/* continue read transaction */
+-	temp = HDMI_READ(HDMI_HI2CHCR);
+-	HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
+-	HDMI_READ(HDMI_HI2CHCR);
+-
+-	i2c_dev->status = I2C_READ_DONE;
+-	return;
+-}
+-
+-static void hdmi_i2c_transaction_done(struct mrst_hdmi_dev *hdmi_dev)
+-{
+-	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+-	u32 temp;
+-
+-	/* clear transaction done intr */
+-	temp = HDMI_READ(HDMI_HISR);
+-	HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
+-	HDMI_READ(HDMI_HISR);
+-
+-
+-	temp = HDMI_READ(HDMI_HI2CHCR);
+-	HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
+-	HDMI_READ(HDMI_HI2CHCR);
+-
+-	i2c_dev->status = I2C_TRANSACTION_DONE;
+-	return;
+-}
+-
+-static irqreturn_t mrst_hdmi_i2c_handler(int this_irq, void *dev)
+-{
+-	struct mrst_hdmi_dev *hdmi_dev = dev;
+-	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
+-	u32 stat;
+-
+-	stat = HDMI_READ(HDMI_HISR);
+-
+-	if (stat & HDMI_INTR_HPD) {
+-		HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
+-		HDMI_READ(HDMI_HISR);
+-	}
+-
+-	if (stat & HDMI_INTR_I2C_FULL)
+-		hdmi_i2c_read(hdmi_dev);
+-
+-	if (stat & HDMI_INTR_I2C_DONE)
+-		hdmi_i2c_transaction_done(hdmi_dev);
+-
+-	complete(&i2c_dev->complete);
+-
+-	return IRQ_HANDLED;
+-}
+-
+-/*
+- * choose alternate function 2 of GPIO pin 52, 53,
+- * which is used by HDMI I2C logic
+- */
+-static void mrst_hdmi_i2c_gpio_fix(void)
+-{
+-	void *base;
+-	unsigned int gpio_base = 0xff12c000;
+-	int gpio_len = 0x1000;
+-	u32 temp;
+-
+-	base = ioremap((resource_size_t)gpio_base, gpio_len);
+-	if (base == NULL) {
+-		DRM_ERROR("gpio ioremap fail\n");
+-		return;
+-	}
+-
+-	temp = readl(base + 0x44);
+-	DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
+-	writel((temp | 0x00000a00), (base +  0x44));
+-	temp = readl(base + 0x44);
+-	DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
+-
+-	iounmap(base);
+-}
+-
+-int mrst_hdmi_i2c_init(struct pci_dev *dev)
+-{
+-	struct mrst_hdmi_dev *hdmi_dev;
+-	struct hdmi_i2c_dev *i2c_dev;
+-	int ret;
+-
+-	hdmi_dev = pci_get_drvdata(dev);
+-
+-	i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+-	if (i2c_dev == NULL) {
+-		DRM_ERROR("Can't allocate interface\n");
+-		ret = -ENOMEM;
+-		goto exit;
+-	}
+-
+-	i2c_dev->adap = &mrst_hdmi_i2c_adapter;
+-	i2c_dev->status = I2C_STAT_INIT;
+-	init_completion(&i2c_dev->complete);
+-	mutex_init(&i2c_dev->i2c_lock);
+-	i2c_set_adapdata(&mrst_hdmi_i2c_adapter, hdmi_dev);
+-	hdmi_dev->i2c_dev = i2c_dev;
+-
+-	/* Enable HDMI I2C function on gpio */
+-	mrst_hdmi_i2c_gpio_fix();
+-
+-	/* request irq */
+-	ret = request_irq(dev->irq, mrst_hdmi_i2c_handler, IRQF_SHARED,
+-			  mrst_hdmi_i2c_adapter.name, hdmi_dev);
+-	if (ret) {
+-		DRM_ERROR("Failed to request IRQ for I2C controller\n");
+-		goto err;
+-	}
+-
+-	/* Adapter registration */
+-	ret = i2c_add_numbered_adapter(&mrst_hdmi_i2c_adapter);
+-	return ret;
+-
+-err:
+-	kfree(i2c_dev);
+-exit:
+-	return ret;
+-}
+-
+-void mrst_hdmi_i2c_exit(struct pci_dev *dev)
+-{
+-	struct mrst_hdmi_dev *hdmi_dev;
+-	struct hdmi_i2c_dev *i2c_dev;
+-
+-	hdmi_dev = pci_get_drvdata(dev);
+-	if (i2c_del_adapter(&mrst_hdmi_i2c_adapter))
+-		DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
+-
+-	i2c_dev = hdmi_dev->i2c_dev;
+-	kfree(i2c_dev);
+-	free_irq(dev->irq, hdmi_dev);
+-}
+diff --git a/drivers/staging/gma500/mrst_lvds.c b/drivers/staging/gma500/mrst_lvds.c
+deleted file mode 100644
+index e7999a2..0000000
+--- a/drivers/staging/gma500/mrst_lvds.c
++++ /dev/null
+@@ -1,407 +0,0 @@
+-/*
+- * Copyright © 2006-2009 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *	Eric Anholt <eric at anholt.net>
+- *	Dave Airlie <airlied at linux.ie>
+- *	Jesse Barnes <jesse.barnes at intel.com>
+- */
+-
+-#include <linux/i2c.h>
+-#include <drm/drmP.h>
+-#include <asm/mrst.h>
+-
+-#include "intel_bios.h"
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "power.h"
+-#include <linux/pm_runtime.h>
+-
+-/* The max/min PWM frequency in BPCR[31:17] - */
+-/* The smallest number is 1 (not 0) that can fit in the
+- * 15-bit field of the and then*/
+-/* shifts to the left by one bit to get the actual 16-bit
+- * value that the 15-bits correspond to.*/
+-#define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
+-#define BRIGHTNESS_MAX_LEVEL 100
+-
+-/**
+- * Sets the power state for the panel.
+- */
+-static void mrst_lvds_set_power(struct drm_device *dev,
+-				struct psb_intel_output *output, bool on)
+-{
+-	u32 pp_status;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (!gma_power_begin(dev, true))
+-		return;
+-
+-	if (on) {
+-		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+-			  POWER_TARGET_ON);
+-		do {
+-			pp_status = REG_READ(PP_STATUS);
+-		} while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
+-		dev_priv->is_lvds_on = true;
+-		if (dev_priv->ops->lvds_bl_power)
+-			dev_priv->ops->lvds_bl_power(dev, true);
+-	} else {
+-		if (dev_priv->ops->lvds_bl_power)
+-			dev_priv->ops->lvds_bl_power(dev, false);
+-		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+-			  ~POWER_TARGET_ON);
+-		do {
+-			pp_status = REG_READ(PP_STATUS);
+-		} while (pp_status & PP_ON);
+-		dev_priv->is_lvds_on = false;
+-		pm_request_idle(&dev->pdev->dev);
+-	}
+-	gma_power_end(dev);
+-}
+-
+-static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-
+-	if (mode == DRM_MODE_DPMS_ON)
+-		mrst_lvds_set_power(dev, output, true);
+-	else
+-		mrst_lvds_set_power(dev, output, false);
+-
+-	/* XXX: We never power down the LVDS pairs. */
+-}
+-
+-static void mrst_lvds_mode_set(struct drm_encoder *encoder,
+-			       struct drm_display_mode *mode,
+-			       struct drm_display_mode *adjusted_mode)
+-{
+-	struct psb_intel_mode_device *mode_dev =
+-				enc_to_psb_intel_output(encoder)->mode_dev;
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 lvds_port;
+-	uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
+-
+-	if (!gma_power_begin(dev, true))
+-		return;
+-
+-	/*
+-	 * The LVDS pin pair will already have been turned on in the
+-	 * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+-	 * settings.
+-	 */
+-	lvds_port = (REG_READ(LVDS) &
+-		    (~LVDS_PIPEB_SELECT)) |
+-		    LVDS_PORT_EN |
+-		    LVDS_BORDER_EN;
+-
+-	/* If the firmware says dither on Moorestown, or the BIOS does
+-	   on Oaktrail then enable dithering */
+-	if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
+-		lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
+-
+-	REG_WRITE(LVDS, lvds_port);
+-
+-	drm_connector_property_get_value(
+-		&enc_to_psb_intel_output(encoder)->base,
+-		dev->mode_config.scaling_mode_property,
+-		&v);
+-
+-	if (v == DRM_MODE_SCALE_NO_SCALE)
+-		REG_WRITE(PFIT_CONTROL, 0);
+-	else if (v == DRM_MODE_SCALE_ASPECT) {
+-		if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
+-		    (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
+-			if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
+-			    (mode->hdisplay * adjusted_mode->crtc_vdisplay))
+-				REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+-			else if ((adjusted_mode->crtc_hdisplay *
+-				mode->vdisplay) > (mode->hdisplay *
+-				adjusted_mode->crtc_vdisplay))
+-				REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+-					  PFIT_SCALING_MODE_PILLARBOX);
+-			else
+-				REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
+-					  PFIT_SCALING_MODE_LETTERBOX);
+-		} else
+-			REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+-	} else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
+-		REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
+-
+-	gma_power_end(dev);
+-}
+-
+-static void mrst_lvds_prepare(struct drm_encoder *encoder)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-	struct psb_intel_mode_device *mode_dev = output->mode_dev;
+-
+-	if (!gma_power_begin(dev, true))
+-		return;
+-
+-	mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+-	mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+-					  BACKLIGHT_DUTY_CYCLE_MASK);
+-	mrst_lvds_set_power(dev, output, false);
+-	gma_power_end(dev);
+-}
+-
+-static u32 mrst_lvds_get_max_backlight(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 ret;
+-
+-	if (gma_power_begin(dev, false)) {
+-		ret = ((REG_READ(BLC_PWM_CTL) &
+-			  BACKLIGHT_MODULATION_FREQ_MASK) >>
+-			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+-
+-		gma_power_end(dev);
+-	} else
+-		ret = ((dev_priv->saveBLC_PWM_CTL &
+-			  BACKLIGHT_MODULATION_FREQ_MASK) >>
+-			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+-
+-	return ret;
+-}
+-
+-static void mrst_lvds_commit(struct drm_encoder *encoder)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-	struct psb_intel_mode_device *mode_dev = output->mode_dev;
+-
+-	if (mode_dev->backlight_duty_cycle == 0)
+-		mode_dev->backlight_duty_cycle =
+-					mrst_lvds_get_max_backlight(dev);
+-	mrst_lvds_set_power(dev, output, true);
+-}
+-
+-static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
+-	.dpms = mrst_lvds_dpms,
+-	.mode_fixup = psb_intel_lvds_mode_fixup,
+-	.prepare = mrst_lvds_prepare,
+-	.mode_set = mrst_lvds_mode_set,
+-	.commit = mrst_lvds_commit,
+-};
+-
+-static struct drm_display_mode lvds_configuration_modes[] = {
+-	/* hard coded fixed mode for TPO LTPS LPJ040K001A */
+-	{ DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
+-		   846, 1056, 0, 480, 489, 491, 525, 0, 0) },
+-	/* hard coded fixed mode for LVDS 800x480 */
+-	{ DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
+-		   802, 1024, 0, 480, 481, 482, 525, 0, 0) },
+-	/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600 at 75 */
+-	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
+-		   1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
+-	/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600 at 75 */
+-	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
+-		   1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
+-	/* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
+-	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
+-		   1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
+-	/* hard coded fixed mode for LVDS 1024x768 */
+-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+-		   1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
+-	/* hard coded fixed mode for LVDS 1366x768 */
+-	{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
+-		   1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
+-};
+-
+-/* Returns the panel fixed mode from configuration. */
+-
+-static struct drm_display_mode *
+-mrst_lvds_get_configuration_mode(struct drm_device *dev)
+-{
+-	struct drm_display_mode *mode = NULL;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+-
+-	if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
+-		mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+-		if (!mode)
+-			return NULL;
+-
+-		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
+-		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
+-		mode->hsync_start = mode->hdisplay + \
+-				((ti->hsync_offset_hi << 8) | \
+-				ti->hsync_offset_lo);
+-		mode->hsync_end = mode->hsync_start + \
+-				((ti->hsync_pulse_width_hi << 8) | \
+-				ti->hsync_pulse_width_lo);
+-		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
+-							ti->hblank_lo);
+-		mode->vsync_start = \
+-			mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
+-						ti->vsync_offset_lo);
+-		mode->vsync_end = \
+-			mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
+-						ti->vsync_pulse_width_lo);
+-		mode->vtotal = mode->vdisplay + \
+-				((ti->vblank_hi << 8) | ti->vblank_lo);
+-		mode->clock = ti->pixel_clock * 10;
+-#if 0
+-		printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
+-		printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
+-		printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
+-		printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
+-		printk(KERN_INFO "htotal is %d\n", mode->htotal);
+-		printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
+-		printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
+-		printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
+-		printk(KERN_INFO "clock is %d\n", mode->clock);
+-#endif
+-	} else
+-		mode = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
+-
+-	drm_mode_set_name(mode);
+-	drm_mode_set_crtcinfo(mode, 0);
+-
+-	return mode;
+-}
+-
+-/**
+- * mrst_lvds_init - setup LVDS connectors on this device
+- * @dev: drm device
+- *
+- * Create the connector, register the LVDS DDC bus, and try to figure out what
+- * modes we can display on the LVDS panel (if present).
+- */
+-void mrst_lvds_init(struct drm_device *dev,
+-		    struct psb_intel_mode_device *mode_dev)
+-{
+-	struct psb_intel_output *psb_intel_output;
+-	struct drm_connector *connector;
+-	struct drm_encoder *encoder;
+-	struct drm_psb_private *dev_priv =
+-				(struct drm_psb_private *) dev->dev_private;
+-	struct edid *edid;
+-	int ret = 0;
+-	struct i2c_adapter *i2c_adap;
+-	struct drm_display_mode *scan;	/* *modes, *bios_mode; */
+-
+-	psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
+-	if (!psb_intel_output)
+-		return;
+-
+-	psb_intel_output->mode_dev = mode_dev;
+-	connector = &psb_intel_output->base;
+-	encoder = &psb_intel_output->enc;
+-	dev_priv->is_lvds_on = true;
+-	drm_connector_init(dev, &psb_intel_output->base,
+-			   &psb_intel_lvds_connector_funcs,
+-			   DRM_MODE_CONNECTOR_LVDS);
+-
+-	drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
+-			 DRM_MODE_ENCODER_LVDS);
+-
+-	drm_mode_connector_attach_encoder(&psb_intel_output->base,
+-					  &psb_intel_output->enc);
+-	psb_intel_output->type = INTEL_OUTPUT_LVDS;
+-
+-	drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
+-	drm_connector_helper_add(connector,
+-				 &psb_intel_lvds_connector_helper_funcs);
+-	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+-	connector->interlace_allowed = false;
+-	connector->doublescan_allowed = false;
+-
+-	drm_connector_attach_property(connector,
+-					dev->mode_config.scaling_mode_property,
+-					DRM_MODE_SCALE_FULLSCREEN);
+-	drm_connector_attach_property(connector,
+-					dev_priv->backlight_property,
+-					BRIGHTNESS_MAX_LEVEL);
+-
+-	mode_dev->panel_wants_dither = false;
+-	if (dev_priv->vbt_data.size != 0x00)
+-		mode_dev->panel_wants_dither = (dev_priv->gct_data.
+-			Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
+-
+-	/*
+-	 * LVDS discovery:
+-	 * 1) check for EDID on DDC
+-	 * 2) check for VBT data
+-	 * 3) check to see if LVDS is already on
+-	 *    if none of the above, no panel
+-	 * 4) make sure lid is open
+-	 *    if closed, act like it's not there for now
+-	 */
+-
+-	i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
+-
+-	if (i2c_adap == NULL)
+-		dev_err(dev->dev, "No ddc adapter available!\n");
+-	/*
+-	 * Attempt to get the fixed panel mode from DDC.  Assume that the
+-	 * preferred mode is the right one.
+-	 */
+-	if (i2c_adap) {
+-		edid = drm_get_edid(connector, i2c_adap);
+-		if (edid) {
+-			drm_mode_connector_update_edid_property(connector,
+-									edid);
+-			ret = drm_add_edid_modes(connector, edid);
+-			kfree(edid);
+-		}
+-
+-		list_for_each_entry(scan, &connector->probed_modes, head) {
+-			if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+-				mode_dev->panel_fixed_mode =
+-				    drm_mode_duplicate(dev, scan);
+-				goto out;	/* FIXME: check for quirks */
+-			}
+-		}
+-	}
+-	/*
+-	 * If we didn't get EDID, try geting panel timing
+-	 * from configuration data
+-	 */
+-	mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
+-
+-	if (mode_dev->panel_fixed_mode) {
+-		mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+-		goto out;	/* FIXME: check for quirks */
+-	}
+-
+-	/* If we still don't have a mode after all that, give up. */
+-	if (!mode_dev->panel_fixed_mode) {
+-		dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+-		goto failed_find;
+-	}
+-
+-out:
+-	drm_sysfs_connector_add(connector);
+-	return;
+-
+-failed_find:
+-	dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
+-	if (psb_intel_output->ddc_bus)
+-		psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+-
+-/* failed_ddc: */
+-
+-	drm_encoder_cleanup(encoder);
+-	drm_connector_cleanup(connector);
+-	kfree(connector);
+-}
+-
+diff --git a/drivers/staging/gma500/power.c b/drivers/staging/gma500/power.c
+deleted file mode 100644
+index 436fe97..0000000
+--- a/drivers/staging/gma500/power.c
++++ /dev/null
+@@ -1,318 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2009-2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- *
+- * Authors:
+- *    Benjamin Defnet <benjamin.r.defnet at intel.com>
+- *    Rajesh Poornachandran <rajesh.poornachandran at intel.com>
+- * Massively reworked
+- *    Alan Cox <alan at linux.intel.com>
+- */
+-
+-#include "power.h"
+-#include "psb_drv.h"
+-#include "psb_reg.h"
+-#include "psb_intel_reg.h"
+-#include <linux/mutex.h>
+-#include <linux/pm_runtime.h>
+-
+-static struct mutex power_mutex;	/* Serialize power ops */
+-static spinlock_t power_ctrl_lock;	/* Serialize power claim */
+-
+-/**
+- *	gma_power_init		-	initialise power manager
+- *	@dev: our device
+- *
+- *	Set up for power management tracking of our hardware.
+- */
+-void gma_power_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	/* FIXME: Move APM/OSPM base into relevant device code */
+-	dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
+-	dev_priv->ospm_base &= 0xffff;
+-
+-	dev_priv->display_power = true;	/* We start active */
+-	dev_priv->display_count = 0;	/* Currently no users */
+-	dev_priv->suspended = false;	/* And not suspended */
+-	spin_lock_init(&power_ctrl_lock);
+-	mutex_init(&power_mutex);
+-
+-	dev_priv->ops->init_pm(dev);
+-}
+-
+-/**
+- *	gma_power_uninit	-	end power manager
+- *	@dev: device to end for
+- *
+- *	Undo the effects of gma_power_init
+- */
+-void gma_power_uninit(struct drm_device *dev)
+-{
+-	pm_runtime_disable(&dev->pdev->dev);
+-	pm_runtime_set_suspended(&dev->pdev->dev);
+-}
+-
+-/**
+- *	gma_suspend_display	-	suspend the display logic
+- *	@dev: our DRM device
+- *
+- *	Suspend the display logic of the graphics interface
+- */
+-static void gma_suspend_display(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (!dev_priv->display_power)
+-		return;
+-	dev_priv->ops->save_regs(dev);
+-	dev_priv->ops->power_down(dev);
+-	dev_priv->display_power = false;
+-}
+-
+-/**
+- *	gma_resume_display	-	resume display side logic
+- *
+- *	Resume the display hardware restoring state and enabling
+- *	as necessary.
+- */
+-static void gma_resume_display(struct pci_dev *pdev)
+-{
+-	struct drm_device *dev = pci_get_drvdata(pdev);
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (dev_priv->display_power)
+-		return;
+-
+-	/* turn on the display power island */
+-	dev_priv->ops->power_up(dev);
+-	dev_priv->suspended = false;
+-	dev_priv->display_power = true;
+-
+-	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
+-	pci_write_config_word(pdev, PSB_GMCH_CTRL,
+-			dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
+-	dev_priv->ops->restore_regs(dev);
+-}
+-
+-/**
+- *	gma_suspend_pci		-	suspend PCI side
+- *	@pdev: PCI device
+- *
+- *	Perform the suspend processing on our PCI device state
+- */
+-static void gma_suspend_pci(struct pci_dev *pdev)
+-{
+-	struct drm_device *dev = pci_get_drvdata(pdev);
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int bsm, vbt;
+-
+-	if (dev_priv->suspended)
+-		return;
+-
+-	pci_save_state(pdev);
+-	pci_read_config_dword(pdev, 0x5C, &bsm);
+-	dev_priv->saveBSM = bsm;
+-	pci_read_config_dword(pdev, 0xFC, &vbt);
+-	dev_priv->saveVBT = vbt;
+-	pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
+-	pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
+-
+-	pci_disable_device(pdev);
+-	pci_set_power_state(pdev, PCI_D3hot);
+-
+-	dev_priv->suspended = true;
+-}
+-
+-/**
+- *	gma_resume_pci		-	resume helper
+- *	@dev: our PCI device
+- *
+- *	Perform the resume processing on our PCI device state - rewrite
+- *	register state and re-enable the PCI device
+- */
+-static bool gma_resume_pci(struct pci_dev *pdev)
+-{
+-	struct drm_device *dev = pci_get_drvdata(pdev);
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int ret;
+-
+-	if (!dev_priv->suspended)
+-		return true;
+-
+-	pci_set_power_state(pdev, PCI_D0);
+-	pci_restore_state(pdev);
+-	pci_write_config_dword(pdev, 0x5c, dev_priv->saveBSM);
+-	pci_write_config_dword(pdev, 0xFC, dev_priv->saveVBT);
+-	/* restoring MSI address and data in PCIx space */
+-	pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
+-	pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
+-	ret = pci_enable_device(pdev);
+-
+-	if (ret != 0)
+-		dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
+-	else
+-		dev_priv->suspended = false;
+-	return !dev_priv->suspended;
+-}
+-
+-/**
+- *	gma_power_suspend		-	bus callback for suspend
+- *	@pdev: our PCI device
+- *	@state: suspend type
+- *
+- *	Called back by the PCI layer during a suspend of the system. We
+- *	perform the necessary shut down steps and save enough state that
+- *	we can undo this when resume is called.
+- */
+-int gma_power_suspend(struct device *_dev)
+-{
+-	struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+-	struct drm_device *dev = pci_get_drvdata(pdev);
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	mutex_lock(&power_mutex);
+-	if (!dev_priv->suspended) {
+-		if (dev_priv->display_count) {
+-			mutex_unlock(&power_mutex);
+-			return -EBUSY;
+-		}
+-		psb_irq_uninstall(dev);
+-		gma_suspend_display(dev);
+-		gma_suspend_pci(pdev);
+-	}
+-	mutex_unlock(&power_mutex);
+-	return 0;
+-}
+-
+-/**
+- *	gma_power_resume		-	resume power
+- *	@pdev: PCI device
+- *
+- *	Resume the PCI side of the graphics and then the displays
+- */
+-int gma_power_resume(struct device *_dev)
+-{
+-	struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
+-	struct drm_device *dev = pci_get_drvdata(pdev);
+-
+-	mutex_lock(&power_mutex);
+-	gma_resume_pci(pdev);
+-	gma_resume_display(pdev);
+-	psb_irq_preinstall(dev);
+-	psb_irq_postinstall(dev);
+-	mutex_unlock(&power_mutex);
+-	return 0;
+-}
+-
+-/**
+- *	gma_power_is_on		-	returne true if power is on
+- *	@dev: our DRM device
+- *
+- *	Returns true if the display island power is on at this moment
+- */
+-bool gma_power_is_on(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	return dev_priv->display_power;
+-}
+-
+-/**
+- *	gma_power_begin		-	begin requiring power
+- *	@dev: our DRM device
+- *	@force_on: true to force power on
+- *
+- *	Begin an action that requires the display power island is enabled.
+- *	We refcount the islands.
+- */
+-bool gma_power_begin(struct drm_device *dev, bool force_on)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int ret;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&power_ctrl_lock, flags);
+-	/* Power already on ? */
+-	if (dev_priv->display_power) {
+-		dev_priv->display_count++;
+-		pm_runtime_get(&dev->pdev->dev);
+-		spin_unlock_irqrestore(&power_ctrl_lock, flags);
+-		return true;
+-	}
+-	if (force_on == false)
+-		goto out_false;
+-
+-	/* Ok power up needed */
+-	ret = gma_resume_pci(dev->pdev);
+-	if (ret == 0) {
+-		/* FIXME: we want to defer this for Medfield/Oaktrail */
+-		gma_resume_display(dev);
+-		psb_irq_preinstall(dev);
+-		psb_irq_postinstall(dev);
+-		pm_runtime_get(&dev->pdev->dev);
+-		dev_priv->display_count++;
+-		spin_unlock_irqrestore(&power_ctrl_lock, flags);
+-		return true;
+-	}
+-out_false:
+-	spin_unlock_irqrestore(&power_ctrl_lock, flags);
+-	return false;
+-}
+-
+-/**
+- *	gma_power_end		-	end use of power
+- *	@dev: Our DRM device
+- *
+- *	Indicate that one of our gma_power_begin() requested periods when
+- *	the diplay island power is needed has completed.
+- */
+-void gma_power_end(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned long flags;
+-	spin_lock_irqsave(&power_ctrl_lock, flags);
+-	dev_priv->display_count--;
+-	WARN_ON(dev_priv->display_count < 0);
+-	spin_unlock_irqrestore(&power_ctrl_lock, flags);
+-	pm_runtime_put(&dev->pdev->dev);
+-}
+-
+-int psb_runtime_suspend(struct device *dev)
+-{
+-	return gma_power_suspend(dev);
+-}
+-
+-int psb_runtime_resume(struct device *dev)
+-{
+-	return gma_power_resume(dev);;
+-}
+-
+-int psb_runtime_idle(struct device *dev)
+-{
+-	struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
+-	struct drm_psb_private *dev_priv = drmdev->dev_private;
+-	if (dev_priv->display_count)
+-		return 0;
+-	else
+-		return 1;
+-}
+diff --git a/drivers/staging/gma500/power.h b/drivers/staging/gma500/power.h
+deleted file mode 100644
+index 1969d2e..0000000
+--- a/drivers/staging/gma500/power.h
++++ /dev/null
+@@ -1,67 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2009-2011, Intel Corporation.
+- * All Rights Reserved.
+-
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- *
+- * Authors:
+- *    Benjamin Defnet <benjamin.r.defnet at intel.com>
+- *    Rajesh Poornachandran <rajesh.poornachandran at intel.com>
+- * Massively reworked
+- *    Alan Cox <alan at linux.intel.com>
+- */
+-#ifndef _PSB_POWERMGMT_H_
+-#define _PSB_POWERMGMT_H_
+-
+-#include <linux/pci.h>
+-#include <drm/drmP.h>
+-
+-void gma_power_init(struct drm_device *dev);
+-void gma_power_uninit(struct drm_device *dev);
+-
+-/*
+- * The kernel bus power management  will call these functions
+- */
+-int gma_power_suspend(struct device *dev);
+-int gma_power_resume(struct device *dev);
+-
+-/*
+- * These are the functions the driver should use to wrap all hw access
+- * (i.e. register reads and writes)
+- */
+-bool gma_power_begin(struct drm_device *dev, bool force);
+-void gma_power_end(struct drm_device *dev);
+-
+-/*
+- * Use this function to do an instantaneous check for if the hw is on.
+- * Only use this in cases where you know the mutex is already held such
+- * as in irq install/uninstall and you need to
+- * prevent a deadlock situation.  Otherwise use gma_power_begin().
+- */
+-bool gma_power_is_on(struct drm_device *dev);
+-
+-/*
+- * GFX-Runtime PM callbacks
+- */
+-int psb_runtime_suspend(struct device *dev);
+-int psb_runtime_resume(struct device *dev);
+-int psb_runtime_idle(struct device *dev);
+-
+-#endif /*_PSB_POWERMGMT_H_*/
+diff --git a/drivers/staging/gma500/psb_device.c b/drivers/staging/gma500/psb_device.c
+deleted file mode 100644
+index b97aa78..0000000
+--- a/drivers/staging/gma500/psb_device.c
++++ /dev/null
+@@ -1,321 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-#include <linux/backlight.h>
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include "psb_drm.h"
+-#include "psb_drv.h"
+-#include "psb_reg.h"
+-#include "psb_intel_reg.h"
+-#include "intel_bios.h"
+-
+-
+-static int psb_output_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	psb_intel_lvds_init(dev, &dev_priv->mode_dev);
+-	psb_intel_sdvo_init(dev, SDVOB);
+-	return 0;
+-}
+-
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-
+-/*
+- *	Poulsbo Backlight Interfaces
+- */
+-
+-#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
+-#define BLC_PWM_FREQ_CALC_CONSTANT 32
+-#define MHz 1000000
+-
+-#define PSB_BLC_PWM_PRECISION_FACTOR    10
+-#define PSB_BLC_MAX_PWM_REG_FREQ        0xFFFE
+-#define PSB_BLC_MIN_PWM_REG_FREQ        0x2
+-
+-#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+-#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
+-
+-static int psb_brightness;
+-static struct backlight_device *psb_backlight_device;
+-
+-static int psb_get_brightness(struct backlight_device *bd)
+-{
+-	/* return locally cached var instead of HW read (due to DPST etc.) */
+-	/* FIXME: ideally return actual value in case firmware fiddled with
+-	   it */
+-	return psb_brightness;
+-}
+-
+-
+-static int psb_backlight_setup(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned long core_clock;
+-	/* u32 bl_max_freq; */
+-	/* unsigned long value; */
+-	u16 bl_max_freq;
+-	uint32_t value;
+-	uint32_t blc_pwm_precision_factor;
+-
+-	/* get bl_max_freq and pol from dev_priv*/
+-	if (!dev_priv->lvds_bl) {
+-		dev_err(dev->dev, "Has no valid LVDS backlight info\n");
+-		return -ENOENT;
+-	}
+-	bl_max_freq = dev_priv->lvds_bl->freq;
+-	blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
+-
+-	core_clock = dev_priv->core_freq;
+-
+-	value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
+-	value *= blc_pwm_precision_factor;
+-	value /= bl_max_freq;
+-	value /= blc_pwm_precision_factor;
+-
+-	if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
+-		 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
+-				return -ERANGE;
+-	else {
+-		value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+-		REG_WRITE(BLC_PWM_CTL,
+-			(value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
+-	}
+-	return 0;
+-}
+-
+-static int psb_set_brightness(struct backlight_device *bd)
+-{
+-	struct drm_device *dev = bl_get_data(psb_backlight_device);
+-	int level = bd->props.brightness;
+-
+-	/* Percentage 1-100% being valid */
+-	if (level < 1)
+-		level = 1;
+-
+-	psb_intel_lvds_set_brightness(dev, level);
+-	psb_brightness = level;
+-	return 0;
+-}
+-
+-static const struct backlight_ops psb_ops = {
+-	.get_brightness = psb_get_brightness,
+-	.update_status  = psb_set_brightness,
+-};
+-
+-static int psb_backlight_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	int ret;
+-	struct backlight_properties props;
+-
+-	memset(&props, 0, sizeof(struct backlight_properties));
+-	props.max_brightness = 100;
+-	props.type = BACKLIGHT_PLATFORM;
+-
+-	psb_backlight_device = backlight_device_register("psb-bl",
+-					NULL, (void *)dev, &psb_ops, &props);
+-	if (IS_ERR(psb_backlight_device))
+-		return PTR_ERR(psb_backlight_device);
+-
+-	ret = psb_backlight_setup(dev);
+-	if (ret < 0) {
+-		backlight_device_unregister(psb_backlight_device);
+-		psb_backlight_device = NULL;
+-		return ret;
+-	}
+-	psb_backlight_device->props.brightness = 100;
+-	psb_backlight_device->props.max_brightness = 100;
+-	backlight_update_status(psb_backlight_device);
+-	dev_priv->backlight_device = psb_backlight_device;
+-	return 0;
+-}
+-
+-#endif
+-
+-/*
+- *	Provide the Poulsbo specific chip logic and low level methods
+- *	for power management
+- */
+-
+-static void psb_init_pm(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
+-	gating &= ~3;	/* Disable 2D clock gating */
+-	gating |= 1;
+-	PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
+-	PSB_RSGX32(PSB_CR_CLKGATECTL);
+-}
+-
+-/**
+- *	psb_save_display_registers	-	save registers lost on suspend
+- *	@dev: our DRM device
+- *
+- *	Save the state we need in order to be able to restore the interface
+- *	upon resume from suspend
+- */
+-static int psb_save_display_registers(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct drm_crtc *crtc;
+-	struct drm_connector *connector;
+-
+-	/* Display arbitration control + watermarks */
+-	dev_priv->saveDSPARB = PSB_RVDC32(DSPARB);
+-	dev_priv->saveDSPFW1 = PSB_RVDC32(DSPFW1);
+-	dev_priv->saveDSPFW2 = PSB_RVDC32(DSPFW2);
+-	dev_priv->saveDSPFW3 = PSB_RVDC32(DSPFW3);
+-	dev_priv->saveDSPFW4 = PSB_RVDC32(DSPFW4);
+-	dev_priv->saveDSPFW5 = PSB_RVDC32(DSPFW5);
+-	dev_priv->saveDSPFW6 = PSB_RVDC32(DSPFW6);
+-	dev_priv->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
+-
+-	/* Save crtc and output state */
+-	mutex_lock(&dev->mode_config.mutex);
+-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+-		if (drm_helper_crtc_in_use(crtc))
+-			crtc->funcs->save(crtc);
+-	}
+-
+-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+-		connector->funcs->save(connector);
+-
+-	mutex_unlock(&dev->mode_config.mutex);
+-	return 0;
+-}
+-
+-/**
+- *	psb_restore_display_registers	-	restore lost register state
+- *	@dev: our DRM device
+- *
+- *	Restore register state that was lost during suspend and resume.
+- */
+-static int psb_restore_display_registers(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct drm_crtc *crtc;
+-	struct drm_connector *connector;
+-
+-	/* Display arbitration + watermarks */
+-	PSB_WVDC32(dev_priv->saveDSPARB, DSPARB);
+-	PSB_WVDC32(dev_priv->saveDSPFW1, DSPFW1);
+-	PSB_WVDC32(dev_priv->saveDSPFW2, DSPFW2);
+-	PSB_WVDC32(dev_priv->saveDSPFW3, DSPFW3);
+-	PSB_WVDC32(dev_priv->saveDSPFW4, DSPFW4);
+-	PSB_WVDC32(dev_priv->saveDSPFW5, DSPFW5);
+-	PSB_WVDC32(dev_priv->saveDSPFW6, DSPFW6);
+-	PSB_WVDC32(dev_priv->saveCHICKENBIT, DSPCHICKENBIT);
+-
+-	/*make sure VGA plane is off. it initializes to on after reset!*/
+-	PSB_WVDC32(0x80000000, VGACNTRL);
+-
+-	mutex_lock(&dev->mode_config.mutex);
+-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+-		if (drm_helper_crtc_in_use(crtc))
+-			crtc->funcs->restore(crtc);
+-
+-	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+-		connector->funcs->restore(connector);
+-
+-	mutex_unlock(&dev->mode_config.mutex);
+-	return 0;
+-}
+-
+-static int psb_power_down(struct drm_device *dev)
+-{
+-	return 0;
+-}
+-
+-static int psb_power_up(struct drm_device *dev)
+-{
+-	return 0;
+-}
+-
+-static void psb_get_core_freq(struct drm_device *dev)
+-{
+-	uint32_t clock;
+-	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
+-	/*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
+-
+-	pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
+-	pci_read_config_dword(pci_root, 0xD4, &clock);
+-	pci_dev_put(pci_root);
+-
+-	switch (clock & 0x07) {
+-	case 0:
+-		dev_priv->core_freq = 100;
+-		break;
+-	case 1:
+-		dev_priv->core_freq = 133;
+-		break;
+-	case 2:
+-		dev_priv->core_freq = 150;
+-		break;
+-	case 3:
+-		dev_priv->core_freq = 178;
+-		break;
+-	case 4:
+-		dev_priv->core_freq = 200;
+-		break;
+-	case 5:
+-	case 6:
+-	case 7:
+-		dev_priv->core_freq = 266;
+-	default:
+-		dev_priv->core_freq = 0;
+-	}
+-}
+-
+-static int psb_chip_setup(struct drm_device *dev)
+-{
+-	psb_get_core_freq(dev);
+-	gma_intel_opregion_init(dev);
+-	psb_intel_init_bios(dev);
+-	return 0;
+-}
+-
+-const struct psb_ops psb_chip_ops = {
+-	.name = "Poulsbo",
+-	.accel_2d = 1,
+-	.pipes = 2,
+-	.crtcs = 2,
+-	.sgx_offset = PSB_SGX_OFFSET,
+-	.chip_setup = psb_chip_setup,
+-
+-	.crtc_helper = &psb_intel_helper_funcs,
+-	.crtc_funcs = &psb_intel_crtc_funcs,
+-
+-	.output_init = psb_output_init,
+-
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	.backlight_init = psb_backlight_init,
+-#endif
+-
+-	.init_pm = psb_init_pm,
+-	.save_regs = psb_save_display_registers,
+-	.restore_regs = psb_restore_display_registers,
+-	.power_down = psb_power_down,
+-	.power_up = psb_power_up,
+-};
+-
+diff --git a/drivers/staging/gma500/psb_drm.h b/drivers/staging/gma500/psb_drm.h
+deleted file mode 100644
+index 0da8468..0000000
+--- a/drivers/staging/gma500/psb_drm.h
++++ /dev/null
+@@ -1,219 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2007-2011, Intel Corporation.
+- * All Rights Reserved.
+- * Copyright (c) 2008, Tungsten Graphics Inc.  Cedar Park, TX., USA.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-#ifndef _PSB_DRM_H_
+-#define _PSB_DRM_H_
+-
+-#define PSB_NUM_PIPE 3
+-
+-#define PSB_GPU_ACCESS_READ         (1ULL << 32)
+-#define PSB_GPU_ACCESS_WRITE        (1ULL << 33)
+-#define PSB_GPU_ACCESS_MASK         (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
+-
+-#define PSB_BO_FLAG_COMMAND         (1ULL << 52)
+-
+-/*
+- * Feedback components:
+- */
+-
+-struct drm_psb_sizes_arg {
+-	u32 ta_mem_size;
+-	u32 mmu_size;
+-	u32 pds_size;
+-	u32 rastgeom_size;
+-	u32 tt_size;
+-	u32 vram_size;
+-};
+-
+-struct drm_psb_dpst_lut_arg {
+-	uint8_t lut[256];
+-	int output_id;
+-};
+-
+-#define PSB_DC_CRTC_SAVE 0x01
+-#define PSB_DC_CRTC_RESTORE 0x02
+-#define PSB_DC_OUTPUT_SAVE 0x04
+-#define PSB_DC_OUTPUT_RESTORE 0x08
+-#define PSB_DC_CRTC_MASK 0x03
+-#define PSB_DC_OUTPUT_MASK 0x0C
+-
+-struct drm_psb_dc_state_arg {
+-	u32 flags;
+-	u32 obj_id;
+-};
+-
+-struct drm_psb_mode_operation_arg {
+-	u32 obj_id;
+-	u16 operation;
+-	struct drm_mode_modeinfo mode;
+-	void *data;
+-};
+-
+-struct drm_psb_stolen_memory_arg {
+-	u32 base;
+-	u32 size;
+-};
+-
+-/*Display Register Bits*/
+-#define REGRWBITS_PFIT_CONTROLS			(1 << 0)
+-#define REGRWBITS_PFIT_AUTOSCALE_RATIOS		(1 << 1)
+-#define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS	(1 << 2)
+-#define REGRWBITS_PIPEASRC			(1 << 3)
+-#define REGRWBITS_PIPEBSRC			(1 << 4)
+-#define REGRWBITS_VTOTAL_A			(1 << 5)
+-#define REGRWBITS_VTOTAL_B			(1 << 6)
+-#define REGRWBITS_DSPACNTR	(1 << 8)
+-#define REGRWBITS_DSPBCNTR	(1 << 9)
+-#define REGRWBITS_DSPCCNTR	(1 << 10)
+-
+-/*Overlay Register Bits*/
+-#define OV_REGRWBITS_OVADD			(1 << 0)
+-#define OV_REGRWBITS_OGAM_ALL			(1 << 1)
+-
+-#define OVC_REGRWBITS_OVADD                  (1 << 2)
+-#define OVC_REGRWBITS_OGAM_ALL			(1 << 3)
+-
+-struct drm_psb_register_rw_arg {
+-	u32 b_force_hw_on;
+-
+-	u32 display_read_mask;
+-	u32 display_write_mask;
+-
+-	struct {
+-		u32 pfit_controls;
+-		u32 pfit_autoscale_ratios;
+-		u32 pfit_programmed_scale_ratios;
+-		u32 pipeasrc;
+-		u32 pipebsrc;
+-		u32 vtotal_a;
+-		u32 vtotal_b;
+-	} display;
+-
+-	u32 overlay_read_mask;
+-	u32 overlay_write_mask;
+-
+-	struct {
+-		u32 OVADD;
+-		u32 OGAMC0;
+-		u32 OGAMC1;
+-		u32 OGAMC2;
+-		u32 OGAMC3;
+-		u32 OGAMC4;
+-		u32 OGAMC5;
+-		u32 IEP_ENABLED;
+-		u32 IEP_BLE_MINMAX;
+-		u32 IEP_BSSCC_CONTROL;
+-		u32 b_wait_vblank;
+-	} overlay;
+-
+-	u32 sprite_enable_mask;
+-	u32 sprite_disable_mask;
+-
+-	struct {
+-		u32 dspa_control;
+-		u32 dspa_key_value;
+-		u32 dspa_key_mask;
+-		u32 dspc_control;
+-		u32 dspc_stride;
+-		u32 dspc_position;
+-		u32 dspc_linear_offset;
+-		u32 dspc_size;
+-		u32 dspc_surface;
+-	} sprite;
+-
+-	u32 subpicture_enable_mask;
+-	u32 subpicture_disable_mask;
+-};
+-
+-/* Controlling the kernel modesetting buffers */
+-
+-#define DRM_PSB_SIZES           0x07
+-#define DRM_PSB_FUSE_REG	0x08
+-#define DRM_PSB_DC_STATE	0x0A
+-#define DRM_PSB_ADB		0x0B
+-#define DRM_PSB_MODE_OPERATION	0x0C
+-#define DRM_PSB_STOLEN_MEMORY	0x0D
+-#define DRM_PSB_REGISTER_RW	0x0E
+-
+-/*
+- * NOTE: Add new commands here, but increment
+- * the values below and increment their
+- * corresponding defines where they're
+- * defined elsewhere.
+- */
+-
+-#define DRM_PSB_GEM_CREATE	0x10
+-#define DRM_PSB_2D_OP		0x11
+-#define DRM_PSB_GEM_MMAP	0x12
+-#define DRM_PSB_DPST		0x1B
+-#define DRM_PSB_GAMMA		0x1C
+-#define DRM_PSB_DPST_BL		0x1D
+-#define DRM_PSB_GET_PIPE_FROM_CRTC_ID 0x1F
+-
+-#define PSB_MODE_OPERATION_MODE_VALID	0x01
+-#define PSB_MODE_OPERATION_SET_DC_BASE  0x02
+-
+-struct drm_psb_get_pipe_from_crtc_id_arg {
+-	/** ID of CRTC being requested **/
+-	u32 crtc_id;
+-
+-	/** pipe of requested CRTC **/
+-	u32 pipe;
+-};
+-
+-/* FIXME: move this into a medfield header once we are sure it isn't needed for an
+-   ioctl  */
+-struct psb_drm_dpu_rect {  
+-	int x, y;             
+-	int width, height;    
+-};  
+-
+-struct drm_psb_gem_create {
+-	__u64 size;
+-	__u32 handle;
+-	__u32 flags;
+-#define PSB_GEM_CREATE_STOLEN		1	/* Stolen memory can be used */
+-};
+-
+-#define PSB_2D_OP_BUFLEN		16
+-
+-struct drm_psb_2d_op {
+-	__u32 src;		/* Handles, only src supported right now */
+-	__u32 dst;
+-	__u32 mask;
+-	__u32 pat;
+-	__u32 size;		/* In dwords of command */
+-	__u32 spare;		/* And bumps array to u64 align */
+-	__u32 cmd[PSB_2D_OP_BUFLEN];
+-};
+-
+-struct drm_psb_gem_mmap {
+-	__u32 handle;
+-	__u32 pad;
+-	/**
+-	 * Fake offset to use for subsequent mmap call
+-	 *
+-	 * This is a fixed-size type for 32/64 compatibility.
+-	 */
+-	__u64 offset;
+-};
+-
+-#endif
+diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
+deleted file mode 100644
+index 986a04d..0000000
+--- a/drivers/staging/gma500/psb_drv.c
++++ /dev/null
+@@ -1,1229 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2007-2011, Intel Corporation.
+- * All Rights Reserved.
+- * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-#include <drm/drmP.h>
+-#include <drm/drm.h>
+-#include "psb_drm.h"
+-#include "psb_drv.h"
+-#include "framebuffer.h"
+-#include "psb_reg.h"
+-#include "psb_intel_reg.h"
+-#include "intel_bios.h"
+-#include "mid_bios.h"
+-#include "mdfld_dsi_dbi.h"
+-#include <drm/drm_pciids.h>
+-#include "power.h"
+-#include <linux/cpu.h>
+-#include <linux/notifier.h>
+-#include <linux/spinlock.h>
+-#include <linux/pm_runtime.h>
+-#include <linux/module.h>
+-#include <acpi/video.h>
+-
+-static int drm_psb_trap_pagefaults;
+-
+-int drm_psb_no_fb;
+-
+-static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+-
+-MODULE_PARM_DESC(no_fb, "Disable FBdev");
+-MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
+-module_param_named(no_fb, drm_psb_no_fb, int, 0600);
+-module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
+-
+-
+-static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+-	{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+-	{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+-#if defined(CONFIG_DRM_PSB_MRST)
+-	{ 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+-	{ 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+-	{ 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+-	{ 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+-	{ 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+-	{ 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+-	{ 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+-	{ 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
+-#endif
+-#if defined(CONFIG_DRM_PSB_MFLD)
+-	{ 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+-	{ 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+-	{ 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+-	{ 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+-	{ 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+-	{ 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+-	{ 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+-	{ 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
+-#endif
+-#if defined(CONFIG_DRM_PSB_CDV)
+-	{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+-	{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+-	{ 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+-	{ 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+-	{ 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+-	{ 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+-	{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+-	{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+-#endif
+-	{ 0, 0, 0}
+-};
+-MODULE_DEVICE_TABLE(pci, pciidlist);
+-
+-/*
+- * Standard IOCTLs.
+- */
+-
+-#define DRM_IOCTL_PSB_SIZES	\
+-		DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
+-			struct drm_psb_sizes_arg)
+-#define DRM_IOCTL_PSB_FUSE_REG	\
+-		DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, uint32_t)
+-#define DRM_IOCTL_PSB_DC_STATE	\
+-		DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \
+-			struct drm_psb_dc_state_arg)
+-#define DRM_IOCTL_PSB_ADB	\
+-		DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, uint32_t)
+-#define DRM_IOCTL_PSB_MODE_OPERATION	\
+-		DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \
+-			 struct drm_psb_mode_operation_arg)
+-#define DRM_IOCTL_PSB_STOLEN_MEMORY	\
+-		DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \
+-			 struct drm_psb_stolen_memory_arg)
+-#define DRM_IOCTL_PSB_REGISTER_RW	\
+-		DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \
+-			 struct drm_psb_register_rw_arg)
+-#define DRM_IOCTL_PSB_DPST	\
+-		DRM_IOWR(DRM_PSB_DPST + DRM_COMMAND_BASE, \
+-			 uint32_t)
+-#define DRM_IOCTL_PSB_GAMMA	\
+-		DRM_IOWR(DRM_PSB_GAMMA + DRM_COMMAND_BASE, \
+-			 struct drm_psb_dpst_lut_arg)
+-#define DRM_IOCTL_PSB_DPST_BL	\
+-		DRM_IOWR(DRM_PSB_DPST_BL + DRM_COMMAND_BASE, \
+-			 uint32_t)
+-#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID	\
+-		DRM_IOWR(DRM_PSB_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
+-			 struct drm_psb_get_pipe_from_crtc_id_arg)
+-#define DRM_IOCTL_PSB_GEM_CREATE	\
+-		DRM_IOWR(DRM_PSB_GEM_CREATE + DRM_COMMAND_BASE, \
+-			 struct drm_psb_gem_create)
+-#define DRM_IOCTL_PSB_2D_OP	\
+-		DRM_IOW(DRM_PSB_2D_OP + DRM_COMMAND_BASE, \
+-			 struct drm_psb_2d_op)
+-#define DRM_IOCTL_PSB_GEM_MMAP	\
+-		DRM_IOWR(DRM_PSB_GEM_MMAP + DRM_COMMAND_BASE, \
+-			 struct drm_psb_gem_mmap)
+-
+-static int psb_sizes_ioctl(struct drm_device *dev, void *data,
+-			   struct drm_file *file_priv);
+-static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
+-			      struct drm_file *file_priv);
+-static int psb_adb_ioctl(struct drm_device *dev, void *data,
+-			 struct drm_file *file_priv);
+-static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+-				    struct drm_file *file_priv);
+-static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+-				   struct drm_file *file_priv);
+-static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
+-				 struct drm_file *file_priv);
+-static int psb_dpst_ioctl(struct drm_device *dev, void *data,
+-			  struct drm_file *file_priv);
+-static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+-			   struct drm_file *file_priv);
+-static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+-			     struct drm_file *file_priv);
+-
+-#define PSB_IOCTL_DEF(ioctl, func, flags) \
+-	[DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+-
+-static struct drm_ioctl_desc psb_ioctls[] = {
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
+-		      DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
+-		      DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
+-		      DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST, psb_dpst_ioctl, DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
+-					psb_intel_get_pipe_from_crtc_id, 0),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
+-						DRM_UNLOCKED | DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_2D_OP, psb_accel_ioctl,
+-						DRM_UNLOCKED| DRM_AUTH),
+-	PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
+-						DRM_UNLOCKED | DRM_AUTH),
+-};
+-
+-static void psb_lastclose(struct drm_device *dev)
+-{
+-	return;
+-}
+-
+-static void psb_do_takedown(struct drm_device *dev)
+-{
+-}
+-
+-static int psb_do_init(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct psb_gtt *pg = &dev_priv->gtt;
+-
+-	uint32_t stolen_gtt;
+-
+-	int ret = -ENOMEM;
+-
+-	if (pg->mmu_gatt_start & 0x0FFFFFFF) {
+-		dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
+-		ret = -EINVAL;
+-		goto out_err;
+-	}
+-
+-
+-	stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
+-	stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+-	stolen_gtt =
+-	    (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+-
+-	dev_priv->gatt_free_offset = pg->mmu_gatt_start +
+-	    (stolen_gtt << PAGE_SHIFT) * 1024;
+-
+-	if (1 || drm_debug) {
+-		uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
+-		uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
+-		DRM_INFO("SGX core id = 0x%08x\n", core_id);
+-		DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
+-			 (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
+-			 _PSB_CC_REVISION_MAJOR_SHIFT,
+-			 (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
+-			 _PSB_CC_REVISION_MINOR_SHIFT);
+-		DRM_INFO
+-		    ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
+-		     (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
+-		     _PSB_CC_REVISION_MAINTENANCE_SHIFT,
+-		     (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
+-		     _PSB_CC_REVISION_DESIGNER_SHIFT);
+-	}
+-
+-
+-	spin_lock_init(&dev_priv->irqmask_lock);
+-	spin_lock_init(&dev_priv->lock_2d);
+-
+-	PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
+-	PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
+-	PSB_RSGX32(PSB_CR_BIF_BANK1);
+-	PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
+-							PSB_CR_BIF_CTRL);
+-	psb_spank(dev_priv);
+-
+-	/* mmu_gatt ?? */
+-	PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+-	return 0;
+-out_err:
+-	psb_do_takedown(dev);
+-	return ret;
+-}
+-
+-static int psb_driver_unload(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	/* Kill vblank etc here */
+-
+-	gma_backlight_exit(dev);
+-
+-	if (drm_psb_no_fb == 0)
+-		psb_modeset_cleanup(dev);
+-
+-	if (dev_priv) {
+-		psb_lid_timer_takedown(dev_priv);
+-		gma_intel_opregion_exit(dev);
+-
+-		if (dev_priv->ops->chip_teardown)
+-			dev_priv->ops->chip_teardown(dev);
+-		psb_do_takedown(dev);
+-
+-
+-		if (dev_priv->pf_pd) {
+-			psb_mmu_free_pagedir(dev_priv->pf_pd);
+-			dev_priv->pf_pd = NULL;
+-		}
+-		if (dev_priv->mmu) {
+-			struct psb_gtt *pg = &dev_priv->gtt;
+-
+-			down_read(&pg->sem);
+-			psb_mmu_remove_pfn_sequence(
+-				psb_mmu_get_default_pd
+-				(dev_priv->mmu),
+-				pg->mmu_gatt_start,
+-				dev_priv->vram_stolen_size >> PAGE_SHIFT);
+-			up_read(&pg->sem);
+-			psb_mmu_driver_takedown(dev_priv->mmu);
+-			dev_priv->mmu = NULL;
+-		}
+-		psb_gtt_takedown(dev);
+-		if (dev_priv->scratch_page) {
+-			__free_page(dev_priv->scratch_page);
+-			dev_priv->scratch_page = NULL;
+-		}
+-		if (dev_priv->vdc_reg) {
+-			iounmap(dev_priv->vdc_reg);
+-			dev_priv->vdc_reg = NULL;
+-		}
+-		if (dev_priv->sgx_reg) {
+-			iounmap(dev_priv->sgx_reg);
+-			dev_priv->sgx_reg = NULL;
+-		}
+-
+-		kfree(dev_priv);
+-		dev->dev_private = NULL;
+-
+-		/*destroy VBT data*/
+-		psb_intel_destroy_bios(dev);
+-	}
+-
+-	gma_power_uninit(dev);
+-
+-	return 0;
+-}
+-
+-
+-static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+-{
+-	struct drm_psb_private *dev_priv;
+-	unsigned long resource_start;
+-	struct psb_gtt *pg;
+-	unsigned long irqflags;
+-	int ret = -ENOMEM;
+-	uint32_t tt_pages;
+-	struct drm_connector *connector;
+-	struct psb_intel_output *psb_intel_output;
+-
+-	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+-	if (dev_priv == NULL)
+-		return -ENOMEM;
+-
+-	dev_priv->ops = (struct psb_ops *)chipset;
+-	dev_priv->dev = dev;
+-	dev->dev_private = (void *) dev_priv;
+-
+-	if (!IS_PSB(dev)) {
+-		if (pci_enable_msi(dev->pdev))
+-			dev_warn(dev->dev, "Enabling MSI failed!\n");
+-	}
+-
+-	dev_priv->num_pipe = dev_priv->ops->pipes;
+-
+-	resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+-
+-	dev_priv->vdc_reg =
+-	    ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+-	if (!dev_priv->vdc_reg)
+-		goto out_err;
+-
+-	dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
+-							PSB_SGX_SIZE);
+-	if (!dev_priv->sgx_reg)
+-		goto out_err;
+-
+-	ret = dev_priv->ops->chip_setup(dev);
+-	if (ret)
+-		goto out_err;
+-
+-	/* Init OSPM support */
+-	gma_power_init(dev);
+-
+-	ret = -ENOMEM;
+-
+-	dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+-	if (!dev_priv->scratch_page)
+-		goto out_err;
+-
+-	set_pages_uc(dev_priv->scratch_page, 1);
+-
+-	ret = psb_gtt_init(dev, 0);
+-	if (ret)
+-		goto out_err;
+-
+-	dev_priv->mmu = psb_mmu_driver_init((void *)0,
+-					drm_psb_trap_pagefaults, 0,
+-					dev_priv);
+-	if (!dev_priv->mmu)
+-		goto out_err;
+-
+-	pg = &dev_priv->gtt;
+-
+-	tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+-		(pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+-
+-
+-	dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+-	if (!dev_priv->pf_pd)
+-		goto out_err;
+-
+-	psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+-	psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+-
+-	ret = psb_do_init(dev);
+-	if (ret)
+-		return ret;
+-
+-	PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
+-	PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
+-
+-/*	igd_opregion_init(&dev_priv->opregion_dev); */
+-	acpi_video_register();
+-	if (dev_priv->lid_state)
+-		psb_lid_timer_init(dev_priv);
+-
+-	ret = drm_vblank_init(dev, dev_priv->num_pipe);
+-	if (ret)
+-		goto out_err;
+-
+-	/*
+-	 * Install interrupt handlers prior to powering off SGX or else we will
+-	 * crash.
+-	 */
+-	dev_priv->vdc_irq_mask = 0;
+-	dev_priv->pipestat[0] = 0;
+-	dev_priv->pipestat[1] = 0;
+-	dev_priv->pipestat[2] = 0;
+-	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+-	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+-	PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+-	PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+-	if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
+-		drm_irq_install(dev);
+-
+-	dev->vblank_disable_allowed = 1;
+-
+-	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+-
+-	dev->driver->get_vblank_counter = psb_get_vblank_counter;
+-
+-#if defined(CONFIG_DRM_PSB_MFLD)
+-	/* FIXME: this is not the right place for this stuff ! */
+-	mdfld_output_setup(dev);
+-#endif
+-	if (drm_psb_no_fb == 0) {
+-		psb_modeset_init(dev);
+-		psb_fbdev_init(dev);
+-		drm_kms_helper_poll_init(dev);
+-	}
+-
+-	/* Only add backlight support if we have LVDS output */
+-	list_for_each_entry(connector, &dev->mode_config.connector_list,
+-			    head) {
+-		psb_intel_output = to_psb_intel_output(connector);
+-
+-		switch (psb_intel_output->type) {
+-		case INTEL_OUTPUT_LVDS:
+-		case INTEL_OUTPUT_MIPI:
+-			ret = gma_backlight_init(dev);
+-			break;
+-		}
+-	}
+-
+-	if (ret)
+-		return ret;
+-
+-	/* Enable runtime pm at last */
+-	pm_runtime_set_active(&dev->pdev->dev);
+-	return 0;
+-out_err:
+-	psb_driver_unload(dev);
+-	return ret;
+-}
+-
+-int psb_driver_device_is_agp(struct drm_device *dev)
+-{
+-	return 0;
+-}
+-
+-
+-static int psb_sizes_ioctl(struct drm_device *dev, void *data,
+-			   struct drm_file *file_priv)
+-{
+-	struct drm_psb_private *dev_priv = psb_priv(dev);
+-	struct drm_psb_sizes_arg *arg = data;
+-
+-	*arg = dev_priv->sizes;
+-	return 0;
+-}
+-
+-static int psb_dc_state_ioctl(struct drm_device *dev, void *data,
+-				struct drm_file *file_priv)
+-{
+-	uint32_t flags;
+-	uint32_t obj_id;
+-	struct drm_mode_object *obj;
+-	struct drm_connector *connector;
+-	struct drm_crtc *crtc;
+-	struct drm_psb_dc_state_arg *arg = data;
+-
+-
+-	/* Double check MRST case */
+-	if (IS_MRST(dev) || IS_MFLD(dev))
+-		return -EOPNOTSUPP;
+-
+-	flags = arg->flags;
+-	obj_id = arg->obj_id;
+-
+-	if (flags & PSB_DC_CRTC_MASK) {
+-		obj = drm_mode_object_find(dev, obj_id,
+-				DRM_MODE_OBJECT_CRTC);
+-		if (!obj) {
+-			dev_dbg(dev->dev, "Invalid CRTC object.\n");
+-			return -EINVAL;
+-		}
+-
+-		crtc = obj_to_crtc(obj);
+-
+-		mutex_lock(&dev->mode_config.mutex);
+-		if (drm_helper_crtc_in_use(crtc)) {
+-			if (flags & PSB_DC_CRTC_SAVE)
+-				crtc->funcs->save(crtc);
+-			else
+-				crtc->funcs->restore(crtc);
+-		}
+-		mutex_unlock(&dev->mode_config.mutex);
+-
+-		return 0;
+-	} else if (flags & PSB_DC_OUTPUT_MASK) {
+-		obj = drm_mode_object_find(dev, obj_id,
+-				DRM_MODE_OBJECT_CONNECTOR);
+-		if (!obj) {
+-			dev_dbg(dev->dev, "Invalid connector id.\n");
+-			return -EINVAL;
+-		}
+-
+-		connector = obj_to_connector(obj);
+-		if (flags & PSB_DC_OUTPUT_SAVE)
+-			connector->funcs->save(connector);
+-		else
+-			connector->funcs->restore(connector);
+-
+-		return 0;
+-	}
+-	return -EINVAL;
+-}
+-
+-static inline void get_brightness(struct backlight_device *bd)
+-{
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	if (bd) {
+-		bd->props.brightness = bd->ops->get_brightness(bd);
+-		backlight_update_status(bd);
+-	}
+-#endif
+-}
+-
+-static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+-		       struct drm_file *file_priv)
+-{
+-	struct drm_psb_private *dev_priv = psb_priv(dev);
+-	uint32_t *arg = data;
+-
+-	dev_priv->blc_adj2 = *arg;
+-	get_brightness(dev_priv->backlight_device);
+-	return 0;
+-}
+-
+-static int psb_adb_ioctl(struct drm_device *dev, void *data,
+-			struct drm_file *file_priv)
+-{
+-	struct drm_psb_private *dev_priv = psb_priv(dev);
+-	uint32_t *arg = data;
+-
+-	dev_priv->blc_adj1 = *arg;
+-	get_brightness(dev_priv->backlight_device);
+-	return 0;
+-}
+-
+-/* return the current mode to the dpst module */
+-static int psb_dpst_ioctl(struct drm_device *dev, void *data,
+-			  struct drm_file *file_priv)
+-{
+-	struct drm_psb_private *dev_priv = psb_priv(dev);
+-	uint32_t *arg = data;
+-	uint32_t x;
+-	uint32_t y;
+-	uint32_t reg;
+-
+-	if (!gma_power_begin(dev, 0))
+-		return -EIO;
+-
+-	reg = PSB_RVDC32(PIPEASRC);
+-
+-	gma_power_end(dev);
+-
+-	/* horizontal is the left 16 bits */
+-	x = reg >> 16;
+-	/* vertical is the right 16 bits */
+-	y = reg & 0x0000ffff;
+-
+-	/* the values are the image size minus one */
+-	x++;
+-	y++;
+-
+-	*arg = (x << 16) | y;
+-
+-	return 0;
+-}
+-static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+-			   struct drm_file *file_priv)
+-{
+-	struct drm_psb_dpst_lut_arg *lut_arg = data;
+-	struct drm_mode_object *obj;
+-	struct drm_crtc *crtc;
+-	struct drm_connector *connector;
+-	struct psb_intel_crtc *psb_intel_crtc;
+-	int i = 0;
+-	int32_t obj_id;
+-
+-	obj_id = lut_arg->output_id;
+-	obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+-	if (!obj) {
+-		dev_dbg(dev->dev, "Invalid Connector object.\n");
+-		return -EINVAL;
+-	}
+-
+-	connector = obj_to_connector(obj);
+-	crtc = connector->encoder->crtc;
+-	psb_intel_crtc = to_psb_intel_crtc(crtc);
+-
+-	for (i = 0; i < 256; i++)
+-		psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+-
+-	psb_intel_crtc_load_lut(crtc);
+-
+-	return 0;
+-}
+-
+-static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+-				struct drm_file *file_priv)
+-{
+-	uint32_t obj_id;
+-	uint16_t op;
+-	struct drm_mode_modeinfo *umode;
+-	struct drm_display_mode *mode = NULL;
+-	struct drm_psb_mode_operation_arg *arg;
+-	struct drm_mode_object *obj;
+-	struct drm_connector *connector;
+-	struct drm_framebuffer *drm_fb;
+-	struct psb_framebuffer *psb_fb;
+-	struct drm_connector_helper_funcs *connector_funcs;
+-	int ret = 0;
+-	int resp = MODE_OK;
+-	struct drm_psb_private *dev_priv = psb_priv(dev);
+-
+-	arg = (struct drm_psb_mode_operation_arg *)data;
+-	obj_id = arg->obj_id;
+-	op = arg->operation;
+-
+-	switch (op) {
+-	case PSB_MODE_OPERATION_SET_DC_BASE:
+-		obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_FB);
+-		if (!obj) {
+-			dev_dbg(dev->dev, "Invalid FB id %d\n", obj_id);
+-			return -EINVAL;
+-		}
+-
+-		drm_fb = obj_to_fb(obj);
+-		psb_fb = to_psb_fb(drm_fb);
+-
+-		if (gma_power_begin(dev, 0)) {
+-			REG_WRITE(DSPASURF, psb_fb->gtt->offset);
+-			REG_READ(DSPASURF);
+-			gma_power_end(dev);
+-		} else {
+-			dev_priv->saveDSPASURF = psb_fb->gtt->offset;
+-		}
+-
+-		return 0;
+-	case PSB_MODE_OPERATION_MODE_VALID:
+-		umode = &arg->mode;
+-
+-		mutex_lock(&dev->mode_config.mutex);
+-
+-		obj = drm_mode_object_find(dev, obj_id,
+-					DRM_MODE_OBJECT_CONNECTOR);
+-		if (!obj) {
+-			ret = -EINVAL;
+-			goto mode_op_out;
+-		}
+-
+-		connector = obj_to_connector(obj);
+-
+-		mode = drm_mode_create(dev);
+-		if (!mode) {
+-			ret = -ENOMEM;
+-			goto mode_op_out;
+-		}
+-
+-		/* drm_crtc_convert_umode(mode, umode); */
+-		{
+-			mode->clock = umode->clock;
+-			mode->hdisplay = umode->hdisplay;
+-			mode->hsync_start = umode->hsync_start;
+-			mode->hsync_end = umode->hsync_end;
+-			mode->htotal = umode->htotal;
+-			mode->hskew = umode->hskew;
+-			mode->vdisplay = umode->vdisplay;
+-			mode->vsync_start = umode->vsync_start;
+-			mode->vsync_end = umode->vsync_end;
+-			mode->vtotal = umode->vtotal;
+-			mode->vscan = umode->vscan;
+-			mode->vrefresh = umode->vrefresh;
+-			mode->flags = umode->flags;
+-			mode->type = umode->type;
+-			strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
+-			mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+-		}
+-
+-		connector_funcs = (struct drm_connector_helper_funcs *)
+-				   connector->helper_private;
+-
+-		if (connector_funcs->mode_valid) {
+-			resp = connector_funcs->mode_valid(connector, mode);
+-			arg->data = (void *)resp;
+-		}
+-
+-		/*do some clean up work*/
+-		if (mode)
+-			drm_mode_destroy(dev, mode);
+-mode_op_out:
+-		mutex_unlock(&dev->mode_config.mutex);
+-		return ret;
+-
+-	default:
+-		dev_dbg(dev->dev, "Unsupported psb mode operation\n");
+-		return -EOPNOTSUPP;
+-	}
+-
+-	return 0;
+-}
+-
+-static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+-				   struct drm_file *file_priv)
+-{
+-	struct drm_psb_private *dev_priv = psb_priv(dev);
+-	struct drm_psb_stolen_memory_arg *arg = data;
+-
+-	arg->base = dev_priv->stolen_base;
+-	arg->size = dev_priv->vram_stolen_size;
+-
+-	return 0;
+-}
+-
+-/* FIXME: needs Medfield changes */
+-static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
+-				 struct drm_file *file_priv)
+-{
+-	struct drm_psb_private *dev_priv = psb_priv(dev);
+-	struct drm_psb_register_rw_arg *arg = data;
+-	bool usage = arg->b_force_hw_on ? true : false;
+-
+-	if (arg->display_write_mask != 0) {
+-		if (gma_power_begin(dev, usage)) {
+-			if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
+-				PSB_WVDC32(arg->display.pfit_controls,
+-					   PFIT_CONTROL);
+-			if (arg->display_write_mask &
+-			    REGRWBITS_PFIT_AUTOSCALE_RATIOS)
+-				PSB_WVDC32(arg->display.pfit_autoscale_ratios,
+-					   PFIT_AUTO_RATIOS);
+-			if (arg->display_write_mask &
+-			    REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
+-				PSB_WVDC32(
+-				   arg->display.pfit_programmed_scale_ratios,
+-				   PFIT_PGM_RATIOS);
+-			if (arg->display_write_mask & REGRWBITS_PIPEASRC)
+-				PSB_WVDC32(arg->display.pipeasrc,
+-					   PIPEASRC);
+-			if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
+-				PSB_WVDC32(arg->display.pipebsrc,
+-					   PIPEBSRC);
+-			if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
+-				PSB_WVDC32(arg->display.vtotal_a,
+-					   VTOTAL_A);
+-			if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
+-				PSB_WVDC32(arg->display.vtotal_b,
+-					   VTOTAL_B);
+-			gma_power_end(dev);
+-		} else {
+-			if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
+-				dev_priv->savePFIT_CONTROL =
+-						arg->display.pfit_controls;
+-			if (arg->display_write_mask &
+-			    REGRWBITS_PFIT_AUTOSCALE_RATIOS)
+-				dev_priv->savePFIT_AUTO_RATIOS =
+-					arg->display.pfit_autoscale_ratios;
+-			if (arg->display_write_mask &
+-			    REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
+-				dev_priv->savePFIT_PGM_RATIOS =
+-				   arg->display.pfit_programmed_scale_ratios;
+-			if (arg->display_write_mask & REGRWBITS_PIPEASRC)
+-				dev_priv->savePIPEASRC = arg->display.pipeasrc;
+-			if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
+-				dev_priv->savePIPEBSRC = arg->display.pipebsrc;
+-			if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
+-				dev_priv->saveVTOTAL_A = arg->display.vtotal_a;
+-			if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
+-				dev_priv->saveVTOTAL_B = arg->display.vtotal_b;
+-		}
+-	}
+-
+-	if (arg->display_read_mask != 0) {
+-		if (gma_power_begin(dev, usage)) {
+-			if (arg->display_read_mask &
+-			    REGRWBITS_PFIT_CONTROLS)
+-				arg->display.pfit_controls =
+-						PSB_RVDC32(PFIT_CONTROL);
+-			if (arg->display_read_mask &
+-			    REGRWBITS_PFIT_AUTOSCALE_RATIOS)
+-				arg->display.pfit_autoscale_ratios =
+-						PSB_RVDC32(PFIT_AUTO_RATIOS);
+-			if (arg->display_read_mask &
+-			    REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
+-				arg->display.pfit_programmed_scale_ratios =
+-						PSB_RVDC32(PFIT_PGM_RATIOS);
+-			if (arg->display_read_mask & REGRWBITS_PIPEASRC)
+-				arg->display.pipeasrc = PSB_RVDC32(PIPEASRC);
+-			if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
+-				arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC);
+-			if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
+-				arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A);
+-			if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
+-				arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B);
+-			gma_power_end(dev);
+-		} else {
+-			if (arg->display_read_mask &
+-			    REGRWBITS_PFIT_CONTROLS)
+-				arg->display.pfit_controls =
+-						dev_priv->savePFIT_CONTROL;
+-			if (arg->display_read_mask &
+-			    REGRWBITS_PFIT_AUTOSCALE_RATIOS)
+-				arg->display.pfit_autoscale_ratios =
+-						dev_priv->savePFIT_AUTO_RATIOS;
+-			if (arg->display_read_mask &
+-			    REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
+-				arg->display.pfit_programmed_scale_ratios =
+-						dev_priv->savePFIT_PGM_RATIOS;
+-			if (arg->display_read_mask & REGRWBITS_PIPEASRC)
+-				arg->display.pipeasrc = dev_priv->savePIPEASRC;
+-			if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
+-				arg->display.pipebsrc = dev_priv->savePIPEBSRC;
+-			if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
+-				arg->display.vtotal_a = dev_priv->saveVTOTAL_A;
+-			if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
+-				arg->display.vtotal_b = dev_priv->saveVTOTAL_B;
+-		}
+-	}
+-
+-	if (arg->overlay_write_mask != 0) {
+-		if (gma_power_begin(dev, usage)) {
+-			if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
+-				PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5);
+-				PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4);
+-				PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3);
+-				PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2);
+-				PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1);
+-				PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0);
+-			}
+-			if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL) {
+-				PSB_WVDC32(arg->overlay.OGAMC5, OVC_OGAMC5);
+-				PSB_WVDC32(arg->overlay.OGAMC4, OVC_OGAMC4);
+-				PSB_WVDC32(arg->overlay.OGAMC3, OVC_OGAMC3);
+-				PSB_WVDC32(arg->overlay.OGAMC2, OVC_OGAMC2);
+-				PSB_WVDC32(arg->overlay.OGAMC1, OVC_OGAMC1);
+-				PSB_WVDC32(arg->overlay.OGAMC0, OVC_OGAMC0);
+-			}
+-
+-			if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) {
+-				PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
+-
+-				if (arg->overlay.b_wait_vblank) {
+-					/* Wait for 20ms.*/
+-					unsigned long vblank_timeout = jiffies
+-								+ HZ/50;
+-					uint32_t temp;
+-					while (time_before_eq(jiffies,
+-							vblank_timeout)) {
+-						temp = PSB_RVDC32(OV_DOVASTA);
+-						if ((temp & (0x1 << 31)) != 0)
+-							break;
+-						cpu_relax();
+-					}
+-				}
+-			}
+-			if (arg->overlay_write_mask & OVC_REGRWBITS_OVADD) {
+-				PSB_WVDC32(arg->overlay.OVADD, OVC_OVADD);
+-				if (arg->overlay.b_wait_vblank) {
+-					/* Wait for 20ms.*/
+-					unsigned long vblank_timeout =
+-							jiffies + HZ/50;
+-					uint32_t temp;
+-					while (time_before_eq(jiffies,
+-							vblank_timeout)) {
+-						temp = PSB_RVDC32(OVC_DOVCSTA);
+-						if ((temp & (0x1 << 31)) != 0)
+-							break;
+-						cpu_relax();
+-					}
+-				}
+-			}
+-			gma_power_end(dev);
+-		} else {
+-			if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
+-				dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5;
+-				dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4;
+-				dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3;
+-				dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2;
+-				dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1;
+-				dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0;
+-			}
+-			if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL) {
+-				dev_priv->saveOVC_OGAMC5 = arg->overlay.OGAMC5;
+-				dev_priv->saveOVC_OGAMC4 = arg->overlay.OGAMC4;
+-				dev_priv->saveOVC_OGAMC3 = arg->overlay.OGAMC3;
+-				dev_priv->saveOVC_OGAMC2 = arg->overlay.OGAMC2;
+-				dev_priv->saveOVC_OGAMC1 = arg->overlay.OGAMC1;
+-				dev_priv->saveOVC_OGAMC0 = arg->overlay.OGAMC0;
+-			}
+-			if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
+-				dev_priv->saveOV_OVADD = arg->overlay.OVADD;
+-			if (arg->overlay_write_mask & OVC_REGRWBITS_OVADD)
+-				dev_priv->saveOVC_OVADD = arg->overlay.OVADD;
+-		}
+-	}
+-
+-	if (arg->overlay_read_mask != 0) {
+-		if (gma_power_begin(dev, usage)) {
+-			if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
+-				arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5);
+-				arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4);
+-				arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3);
+-				arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2);
+-				arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1);
+-				arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0);
+-			}
+-			if (arg->overlay_read_mask & OVC_REGRWBITS_OGAM_ALL) {
+-				arg->overlay.OGAMC5 = PSB_RVDC32(OVC_OGAMC5);
+-				arg->overlay.OGAMC4 = PSB_RVDC32(OVC_OGAMC4);
+-				arg->overlay.OGAMC3 = PSB_RVDC32(OVC_OGAMC3);
+-				arg->overlay.OGAMC2 = PSB_RVDC32(OVC_OGAMC2);
+-				arg->overlay.OGAMC1 = PSB_RVDC32(OVC_OGAMC1);
+-				arg->overlay.OGAMC0 = PSB_RVDC32(OVC_OGAMC0);
+-			}
+-			if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
+-				arg->overlay.OVADD = PSB_RVDC32(OV_OVADD);
+-			if (arg->overlay_read_mask & OVC_REGRWBITS_OVADD)
+-				arg->overlay.OVADD = PSB_RVDC32(OVC_OVADD);
+-			gma_power_end(dev);
+-		} else {
+-			if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
+-				arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5;
+-				arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4;
+-				arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3;
+-				arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2;
+-				arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1;
+-				arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0;
+-			}
+-			if (arg->overlay_read_mask & OVC_REGRWBITS_OGAM_ALL) {
+-				arg->overlay.OGAMC5 = dev_priv->saveOVC_OGAMC5;
+-				arg->overlay.OGAMC4 = dev_priv->saveOVC_OGAMC4;
+-				arg->overlay.OGAMC3 = dev_priv->saveOVC_OGAMC3;
+-				arg->overlay.OGAMC2 = dev_priv->saveOVC_OGAMC2;
+-				arg->overlay.OGAMC1 = dev_priv->saveOVC_OGAMC1;
+-				arg->overlay.OGAMC0 = dev_priv->saveOVC_OGAMC0;
+-			}
+-			if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
+-				arg->overlay.OVADD = dev_priv->saveOV_OVADD;
+-			if (arg->overlay_read_mask & OVC_REGRWBITS_OVADD)
+-				arg->overlay.OVADD = dev_priv->saveOVC_OVADD;
+-		}
+-	}
+-
+-	if (arg->sprite_enable_mask != 0) {
+-		if (gma_power_begin(dev, usage)) {
+-			PSB_WVDC32(0x1F3E, DSPARB);
+-			PSB_WVDC32(arg->sprite.dspa_control
+-					| PSB_RVDC32(DSPACNTR), DSPACNTR);
+-			PSB_WVDC32(arg->sprite.dspa_key_value, DSPAKEYVAL);
+-			PSB_WVDC32(arg->sprite.dspa_key_mask, DSPAKEYMASK);
+-			PSB_WVDC32(PSB_RVDC32(DSPASURF), DSPASURF);
+-			PSB_RVDC32(DSPASURF);
+-			PSB_WVDC32(arg->sprite.dspc_control, DSPCCNTR);
+-			PSB_WVDC32(arg->sprite.dspc_stride, DSPCSTRIDE);
+-			PSB_WVDC32(arg->sprite.dspc_position, DSPCPOS);
+-			PSB_WVDC32(arg->sprite.dspc_linear_offset, DSPCLINOFF);
+-			PSB_WVDC32(arg->sprite.dspc_size, DSPCSIZE);
+-			PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
+-			PSB_RVDC32(DSPCSURF);
+-			gma_power_end(dev);
+-		}
+-	}
+-
+-	if (arg->sprite_disable_mask != 0) {
+-		if (gma_power_begin(dev, usage)) {
+-			PSB_WVDC32(0x3F3E, DSPARB);
+-			PSB_WVDC32(0x0, DSPCCNTR);
+-			PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
+-			PSB_RVDC32(DSPCSURF);
+-			gma_power_end(dev);
+-		}
+-	}
+-
+-	if (arg->subpicture_enable_mask != 0) {
+-		if (gma_power_begin(dev, usage)) {
+-			uint32_t temp;
+-			if (arg->subpicture_enable_mask & REGRWBITS_DSPACNTR) {
+-				temp =  PSB_RVDC32(DSPACNTR);
+-				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+-				temp &= ~DISPPLANE_BOTTOM;
+-				temp |= DISPPLANE_32BPP;
+-				PSB_WVDC32(temp, DSPACNTR);
+-
+-				temp =  PSB_RVDC32(DSPABASE);
+-				PSB_WVDC32(temp, DSPABASE);
+-				PSB_RVDC32(DSPABASE);
+-				temp =  PSB_RVDC32(DSPASURF);
+-				PSB_WVDC32(temp, DSPASURF);
+-				PSB_RVDC32(DSPASURF);
+-			}
+-			if (arg->subpicture_enable_mask & REGRWBITS_DSPBCNTR) {
+-				temp =  PSB_RVDC32(DSPBCNTR);
+-				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+-				temp &= ~DISPPLANE_BOTTOM;
+-				temp |= DISPPLANE_32BPP;
+-				PSB_WVDC32(temp, DSPBCNTR);
+-
+-				temp =  PSB_RVDC32(DSPBBASE);
+-				PSB_WVDC32(temp, DSPBBASE);
+-				PSB_RVDC32(DSPBBASE);
+-				temp =  PSB_RVDC32(DSPBSURF);
+-				PSB_WVDC32(temp, DSPBSURF);
+-				PSB_RVDC32(DSPBSURF);
+-			}
+-			if (arg->subpicture_enable_mask & REGRWBITS_DSPCCNTR) {
+-				temp =  PSB_RVDC32(DSPCCNTR);
+-				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+-				temp &= ~DISPPLANE_BOTTOM;
+-				temp |= DISPPLANE_32BPP;
+-				PSB_WVDC32(temp, DSPCCNTR);
+-
+-				temp =  PSB_RVDC32(DSPCBASE);
+-				PSB_WVDC32(temp, DSPCBASE);
+-				PSB_RVDC32(DSPCBASE);
+-				temp =  PSB_RVDC32(DSPCSURF);
+-				PSB_WVDC32(temp, DSPCSURF);
+-				PSB_RVDC32(DSPCSURF);
+-			}
+-			gma_power_end(dev);
+-		}
+-	}
+-
+-	if (arg->subpicture_disable_mask != 0) {
+-		if (gma_power_begin(dev, usage)) {
+-			uint32_t temp;
+-			if (arg->subpicture_disable_mask & REGRWBITS_DSPACNTR) {
+-				temp =  PSB_RVDC32(DSPACNTR);
+-				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+-				temp |= DISPPLANE_32BPP_NO_ALPHA;
+-				PSB_WVDC32(temp, DSPACNTR);
+-
+-				temp =  PSB_RVDC32(DSPABASE);
+-				PSB_WVDC32(temp, DSPABASE);
+-				PSB_RVDC32(DSPABASE);
+-				temp =  PSB_RVDC32(DSPASURF);
+-				PSB_WVDC32(temp, DSPASURF);
+-				PSB_RVDC32(DSPASURF);
+-			}
+-			if (arg->subpicture_disable_mask & REGRWBITS_DSPBCNTR) {
+-				temp =  PSB_RVDC32(DSPBCNTR);
+-				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+-				temp |= DISPPLANE_32BPP_NO_ALPHA;
+-				PSB_WVDC32(temp, DSPBCNTR);
+-
+-				temp =  PSB_RVDC32(DSPBBASE);
+-				PSB_WVDC32(temp, DSPBBASE);
+-				PSB_RVDC32(DSPBBASE);
+-				temp =  PSB_RVDC32(DSPBSURF);
+-				PSB_WVDC32(temp, DSPBSURF);
+-				PSB_RVDC32(DSPBSURF);
+-			}
+-			if (arg->subpicture_disable_mask & REGRWBITS_DSPCCNTR) {
+-				temp =  PSB_RVDC32(DSPCCNTR);
+-				temp &= ~DISPPLANE_PIXFORMAT_MASK;
+-				temp |= DISPPLANE_32BPP_NO_ALPHA;
+-				PSB_WVDC32(temp, DSPCCNTR);
+-
+-				temp =  PSB_RVDC32(DSPCBASE);
+-				PSB_WVDC32(temp, DSPCBASE);
+-				PSB_RVDC32(DSPCBASE);
+-				temp =  PSB_RVDC32(DSPCSURF);
+-				PSB_WVDC32(temp, DSPCSURF);
+-				PSB_RVDC32(DSPCSURF);
+-			}
+-			gma_power_end(dev);
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+-static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
+-{
+-	return 0;
+-}
+-
+-static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
+-{
+-}
+-
+-static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
+-			       unsigned long arg)
+-{
+-	struct drm_file *file_priv = filp->private_data;
+-	struct drm_device *dev = file_priv->minor->dev;
+-	int ret;
+-	
+-	pm_runtime_forbid(dev->dev);
+-	ret = drm_ioctl(filp, cmd, arg);
+-	pm_runtime_allow(dev->dev);
+-	return ret;
+-	/* FIXME: do we need to wrap the other side of this */
+-}
+-
+-
+-/* When a client dies:
+- *    - Check for and clean up flipped page state
+- */
+-void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+-{
+-}
+-
+-static void psb_remove(struct pci_dev *pdev)
+-{
+-	struct drm_device *dev = pci_get_drvdata(pdev);
+-	drm_put_dev(dev);
+-}
+-
+-static const struct dev_pm_ops psb_pm_ops = {
+-	.suspend = gma_power_suspend,
+-	.resume = gma_power_resume,
+-	.freeze = gma_power_suspend,
+-	.thaw = gma_power_resume,
+-	.poweroff = gma_power_suspend,
+-	.restore = gma_power_resume,
+-	.runtime_suspend = psb_runtime_suspend,
+-	.runtime_resume = psb_runtime_resume,
+-	.runtime_idle = psb_runtime_idle,
+-};
+-
+-static struct vm_operations_struct psb_gem_vm_ops = {
+-	.fault = psb_gem_fault,
+-	.open = drm_gem_vm_open,
+-	.close = drm_gem_vm_close,
+-};
+-
+-static struct drm_driver driver = {
+-	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
+-			   DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+-	.load = psb_driver_load,
+-	.unload = psb_driver_unload,
+-
+-	.ioctls = psb_ioctls,
+-	.num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
+-	.device_is_agp = psb_driver_device_is_agp,
+-	.irq_preinstall = psb_irq_preinstall,
+-	.irq_postinstall = psb_irq_postinstall,
+-	.irq_uninstall = psb_irq_uninstall,
+-	.irq_handler = psb_irq_handler,
+-	.enable_vblank = psb_enable_vblank,
+-	.disable_vblank = psb_disable_vblank,
+-	.get_vblank_counter = psb_get_vblank_counter,
+-	.lastclose = psb_lastclose,
+-	.open = psb_driver_open,
+-	.preclose = psb_driver_preclose,
+-	.postclose = psb_driver_close,
+-	.reclaim_buffers = drm_core_reclaim_buffers,
+-
+-	.gem_init_object = psb_gem_init_object,
+-	.gem_free_object = psb_gem_free_object,
+-	.gem_vm_ops = &psb_gem_vm_ops,
+-	.dumb_create = psb_gem_dumb_create,
+-	.dumb_map_offset = psb_gem_dumb_map_gtt,
+-	.dumb_destroy = psb_gem_dumb_destroy,
+-
+-	.fops = {
+-		 .owner = THIS_MODULE,
+-		 .open = drm_open,
+-		 .release = drm_release,
+-		 .unlocked_ioctl = psb_unlocked_ioctl,
+-		 .mmap = drm_gem_mmap,
+-		 .poll = drm_poll,
+-		 .fasync = drm_fasync,
+-		 .read = drm_read,
+-	 },
+-	.name = DRIVER_NAME,
+-	.desc = DRIVER_DESC,
+-	.date = PSB_DRM_DRIVER_DATE,
+-	.major = PSB_DRM_DRIVER_MAJOR,
+-	.minor = PSB_DRM_DRIVER_MINOR,
+-	.patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+-};
+-
+-static struct pci_driver psb_pci_driver = {
+-	.name = DRIVER_NAME,
+-	.id_table = pciidlist,
+-	.probe = psb_probe,
+-	.remove = psb_remove,
+-	.driver.pm = &psb_pm_ops,
+-};
+-
+-static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+-{
+-	return drm_get_pci_dev(pdev, ent, &driver);
+-}
+-
+-static int __init psb_init(void)
+-{
+-	return drm_pci_init(&driver, &psb_pci_driver);
+-}
+-
+-static void __exit psb_exit(void)
+-{
+-	drm_pci_exit(&driver, &psb_pci_driver);
+-}
+-
+-late_initcall(psb_init);
+-module_exit(psb_exit);
+-
+-MODULE_AUTHOR("Alan Cox <alan at linux.intel.com> and others");
+-MODULE_DESCRIPTION(DRIVER_DESC);
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h
+deleted file mode 100644
+index 11d963a..0000000
+--- a/drivers/staging/gma500/psb_drv.h
++++ /dev/null
+@@ -1,952 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2007-2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- **************************************************************************/
+-
+-#ifndef _PSB_DRV_H_
+-#define _PSB_DRV_H_
+-
+-#include <linux/kref.h>
+-
+-#include <drm/drmP.h>
+-#include "drm_global.h"
+-#include "gem_glue.h"
+-#include "psb_drm.h"
+-#include "psb_reg.h"
+-#include "psb_intel_drv.h"
+-#include "gtt.h"
+-#include "power.h"
+-#include "mrst.h"
+-#include "medfield.h"
+-
+-/* Append new drm mode definition here, align with libdrm definition */
+-#define DRM_MODE_SCALE_NO_SCALE   	2
+-
+-enum {
+-	CHIP_PSB_8108 = 0,		/* Poulsbo */
+-	CHIP_PSB_8109 = 1,		/* Poulsbo */
+-	CHIP_MRST_4100 = 2,		/* Moorestown/Oaktrail */
+-	CHIP_MFLD_0130 = 3,		/* Medfield */
+-};
+-
+-#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
+-#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
+-#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+-
+-/*
+- * Driver definitions
+- */
+-
+-#define DRIVER_NAME "gma500"
+-#define DRIVER_DESC "DRM driver for the Intel GMA500"
+-
+-#define PSB_DRM_DRIVER_DATE "2011-06-06"
+-#define PSB_DRM_DRIVER_MAJOR 1
+-#define PSB_DRM_DRIVER_MINOR 0
+-#define PSB_DRM_DRIVER_PATCHLEVEL 0
+-
+-/*
+- *	Hardware offsets
+- */
+-#define PSB_VDC_OFFSET		 0x00000000
+-#define PSB_VDC_SIZE		 0x000080000
+-#define MRST_MMIO_SIZE		 0x0000C0000
+-#define MDFLD_MMIO_SIZE          0x000100000
+-#define PSB_SGX_SIZE		 0x8000
+-#define PSB_SGX_OFFSET		 0x00040000
+-#define MRST_SGX_OFFSET		 0x00080000
+-/*
+- *	PCI resource identifiers
+- */
+-#define PSB_MMIO_RESOURCE	 0
+-#define PSB_GATT_RESOURCE	 2
+-#define PSB_GTT_RESOURCE	 3
+-/*
+- *	PCI configuration
+- */
+-#define PSB_GMCH_CTRL		 0x52
+-#define PSB_BSM			 0x5C
+-#define _PSB_GMCH_ENABLED	 0x4
+-#define PSB_PGETBL_CTL		 0x2020
+-#define _PSB_PGETBL_ENABLED	 0x00000001
+-#define PSB_SGX_2D_SLAVE_PORT	 0x4000
+-
+-/* To get rid of */
+-#define PSB_TT_PRIV0_LIMIT	 (256*1024*1024)
+-#define PSB_TT_PRIV0_PLIMIT	 (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
+-
+-/*
+- *	SGX side MMU definitions (these can probably go)
+- */
+-
+-/*
+- *	Flags for external memory type field.
+- */
+-#define PSB_MMU_CACHED_MEMORY	  0x0001	/* Bind to MMU only */
+-#define PSB_MMU_RO_MEMORY	  0x0002	/* MMU RO memory */
+-#define PSB_MMU_WO_MEMORY	  0x0004	/* MMU WO memory */
+-/*
+- *	PTE's and PDE's
+- */
+-#define PSB_PDE_MASK		  0x003FFFFF
+-#define PSB_PDE_SHIFT		  22
+-#define PSB_PTE_SHIFT		  12
+-/*
+- *	Cache control
+- */
+-#define PSB_PTE_VALID		  0x0001	/* PTE / PDE valid */
+-#define PSB_PTE_WO		  0x0002	/* Write only */
+-#define PSB_PTE_RO		  0x0004	/* Read only */
+-#define PSB_PTE_CACHED		  0x0008	/* CPU cache coherent */
+-
+-/*
+- *	VDC registers and bits
+- */
+-#define PSB_MSVDX_CLOCKGATING	  0x2064
+-#define PSB_TOPAZ_CLOCKGATING	  0x2068
+-#define PSB_HWSTAM		  0x2098
+-#define PSB_INSTPM		  0x20C0
+-#define PSB_INT_IDENTITY_R        0x20A4
+-#define _MDFLD_PIPEC_EVENT_FLAG   (1<<2)
+-#define _MDFLD_PIPEC_VBLANK_FLAG  (1<<3)
+-#define _PSB_DPST_PIPEB_FLAG      (1<<4)
+-#define _MDFLD_PIPEB_EVENT_FLAG   (1<<4)
+-#define _PSB_VSYNC_PIPEB_FLAG	  (1<<5)
+-#define _PSB_DPST_PIPEA_FLAG      (1<<6)
+-#define _PSB_PIPEA_EVENT_FLAG     (1<<6)
+-#define _PSB_VSYNC_PIPEA_FLAG	  (1<<7)
+-#define _MDFLD_MIPIA_FLAG	  (1<<16)
+-#define _MDFLD_MIPIC_FLAG	  (1<<17)
+-#define _PSB_IRQ_SGX_FLAG	  (1<<18)
+-#define _PSB_IRQ_MSVDX_FLAG	  (1<<19)
+-#define _LNC_IRQ_TOPAZ_FLAG	  (1<<20)
+-
+-#define _PSB_PIPE_EVENT_FLAG	(_PSB_VSYNC_PIPEA_FLAG | \
+-				 _PSB_VSYNC_PIPEB_FLAG)
+-
+-/* This flag includes all the display IRQ bits excepts the vblank irqs. */
+-#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
+-				  _MDFLD_PIPEB_EVENT_FLAG | \
+-				  _PSB_PIPEA_EVENT_FLAG | \
+-				  _PSB_VSYNC_PIPEA_FLAG | \
+-				  _MDFLD_MIPIA_FLAG | \
+-				  _MDFLD_MIPIC_FLAG)
+-#define PSB_INT_IDENTITY_R	  0x20A4
+-#define PSB_INT_MASK_R		  0x20A8
+-#define PSB_INT_ENABLE_R	  0x20A0
+-
+-#define _PSB_MMU_ER_MASK      0x0001FF00
+-#define _PSB_MMU_ER_HOST      (1 << 16)
+-#define GPIOA			0x5010
+-#define GPIOB			0x5014
+-#define GPIOC			0x5018
+-#define GPIOD			0x501c
+-#define GPIOE			0x5020
+-#define GPIOF			0x5024
+-#define GPIOG			0x5028
+-#define GPIOH			0x502c
+-#define GPIO_CLOCK_DIR_MASK		(1 << 0)
+-#define GPIO_CLOCK_DIR_IN		(0 << 1)
+-#define GPIO_CLOCK_DIR_OUT		(1 << 1)
+-#define GPIO_CLOCK_VAL_MASK		(1 << 2)
+-#define GPIO_CLOCK_VAL_OUT		(1 << 3)
+-#define GPIO_CLOCK_VAL_IN		(1 << 4)
+-#define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
+-#define GPIO_DATA_DIR_MASK		(1 << 8)
+-#define GPIO_DATA_DIR_IN		(0 << 9)
+-#define GPIO_DATA_DIR_OUT		(1 << 9)
+-#define GPIO_DATA_VAL_MASK		(1 << 10)
+-#define GPIO_DATA_VAL_OUT		(1 << 11)
+-#define GPIO_DATA_VAL_IN		(1 << 12)
+-#define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
+-
+-#define VCLK_DIVISOR_VGA0   0x6000
+-#define VCLK_DIVISOR_VGA1   0x6004
+-#define VCLK_POST_DIV	    0x6010
+-
+-#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
+-#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
+-#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
+-#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
+-#define PSB_COMM_USER_IRQ (1024 >> 2)
+-#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
+-#define PSB_COMM_FW (2048 >> 2)
+-
+-#define PSB_UIRQ_VISTEST	       1
+-#define PSB_UIRQ_OOM_REPLY	       2
+-#define PSB_UIRQ_FIRE_TA_REPLY	       3
+-#define PSB_UIRQ_FIRE_RASTER_REPLY     4
+-
+-#define PSB_2D_SIZE (256*1024*1024)
+-#define PSB_MAX_RELOC_PAGES 1024
+-
+-#define PSB_LOW_REG_OFFS 0x0204
+-#define PSB_HIGH_REG_OFFS 0x0600
+-
+-#define PSB_NUM_VBLANKS 2
+-
+-
+-#define PSB_2D_SIZE (256*1024*1024)
+-#define PSB_MAX_RELOC_PAGES 1024
+-
+-#define PSB_LOW_REG_OFFS 0x0204
+-#define PSB_HIGH_REG_OFFS 0x0600
+-
+-#define PSB_NUM_VBLANKS 2
+-#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
+-#define PSB_LID_DELAY (DRM_HZ / 10)
+-
+-#define MDFLD_PNW_B0 0x04
+-#define MDFLD_PNW_C0 0x08
+-
+-#define MDFLD_DSR_2D_3D_0 	(1 << 0)
+-#define MDFLD_DSR_2D_3D_2 	(1 << 1)
+-#define MDFLD_DSR_CURSOR_0 	(1 << 2)
+-#define MDFLD_DSR_CURSOR_2	(1 << 3)
+-#define MDFLD_DSR_OVERLAY_0 	(1 << 4)
+-#define MDFLD_DSR_OVERLAY_2 	(1 << 5)
+-#define MDFLD_DSR_MIPI_CONTROL	(1 << 6)
+-#define MDFLD_DSR_DAMAGE_MASK_0	((1 << 0) | (1 << 2) | (1 << 4))
+-#define MDFLD_DSR_DAMAGE_MASK_2	((1 << 1) | (1 << 3) | (1 << 5))
+-#define MDFLD_DSR_2D_3D 	(MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
+-
+-#define MDFLD_DSR_RR		45
+-#define MDFLD_DPU_ENABLE 	(1 << 31)
+-#define MDFLD_DSR_FULLSCREEN 	(1 << 30)
+-#define MDFLD_DSR_DELAY		(DRM_HZ / MDFLD_DSR_RR)
+-
+-#define PSB_PWR_STATE_ON		1
+-#define PSB_PWR_STATE_OFF		2
+-
+-#define PSB_PMPOLICY_NOPM		0
+-#define PSB_PMPOLICY_CLOCKGATING	1
+-#define PSB_PMPOLICY_POWERDOWN		2
+-
+-#define PSB_PMSTATE_POWERUP		0
+-#define PSB_PMSTATE_CLOCKGATED		1
+-#define PSB_PMSTATE_POWERDOWN		2
+-#define PSB_PCIx_MSI_ADDR_LOC		0x94
+-#define PSB_PCIx_MSI_DATA_LOC		0x98
+-
+-/* Medfield crystal settings */
+-#define KSEL_CRYSTAL_19 1
+-#define KSEL_BYPASS_19 5
+-#define KSEL_BYPASS_25 6
+-#define KSEL_BYPASS_83_100 7
+-
+-struct opregion_header;
+-struct opregion_acpi;
+-struct opregion_swsci;
+-struct opregion_asle;
+-
+-struct psb_intel_opregion {
+-	struct opregion_header *header;
+-	struct opregion_acpi *acpi;
+-	struct opregion_swsci *swsci;
+-	struct opregion_asle *asle;
+-	int enabled;
+-};
+-
+-struct psb_ops;
+-
+-struct drm_psb_private {
+-	struct drm_device *dev;
+-	const struct psb_ops *ops;
+-
+-	struct psb_gtt gtt;
+-
+-	/* GTT Memory manager */
+-	struct psb_gtt_mm *gtt_mm;
+-	struct page *scratch_page;
+-	u32 *gtt_map;
+-	uint32_t stolen_base;
+-	void *vram_addr;
+-	unsigned long vram_stolen_size;
+-	int gtt_initialized;
+-	u16 gmch_ctrl;		/* Saved GTT setup */
+-	u32 pge_ctl;
+-
+-	struct mutex gtt_mutex;
+-	struct resource *gtt_mem;	/* Our PCI resource */
+-
+-	struct psb_mmu_driver *mmu;
+-	struct psb_mmu_pd *pf_pd;
+-
+-	/*
+-	 * Register base
+-	 */
+-
+-	uint8_t *sgx_reg;
+-	uint8_t *vdc_reg;
+-	uint32_t gatt_free_offset;
+-
+-	/*
+-	 * Fencing / irq.
+-	 */
+-
+-	uint32_t vdc_irq_mask;
+-	uint32_t pipestat[PSB_NUM_PIPE];
+-
+-	spinlock_t irqmask_lock;
+-
+-	/*
+-	 * Power
+-	 */
+-
+-	bool suspended;
+-	bool display_power;
+-	int display_count;
+-
+-	/*
+-	 * Modesetting
+-	 */
+-	struct psb_intel_mode_device mode_dev;
+-
+-	struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
+-	struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
+-	uint32_t num_pipe;
+-
+-	/*
+-	 * OSPM info (Power management base) (can go ?)
+-	 */
+-	uint32_t ospm_base;
+-
+-	/*
+-	 * Sizes info
+-	 */
+-
+-	struct drm_psb_sizes_arg sizes;
+-
+-	u32 fuse_reg_value;
+-	u32 video_device_fuse;
+-
+-	/* PCI revision ID for B0:D2:F0 */
+-	uint8_t platform_rev_id;
+-
+-	/*
+-	 * LVDS info
+-	 */
+-	int backlight_duty_cycle;	/* restore backlight to this value */
+-	bool panel_wants_dither;
+-	struct drm_display_mode *panel_fixed_mode;
+-	struct drm_display_mode *lfp_lvds_vbt_mode;
+-	struct drm_display_mode *sdvo_lvds_vbt_mode;
+-
+-	struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
+-	struct psb_intel_i2c_chan *lvds_i2c_bus;
+-
+-	/* Feature bits from the VBIOS */
+-	unsigned int int_tv_support:1;
+-	unsigned int lvds_dither:1;
+-	unsigned int lvds_vbt:1;
+-	unsigned int int_crt_support:1;
+-	unsigned int lvds_use_ssc:1;
+-	int lvds_ssc_freq;
+-	bool is_lvds_on;
+-	bool is_mipi_on;
+-	u32 mipi_ctrl_display;
+-
+-	unsigned int core_freq;
+-	uint32_t iLVDS_enable;
+-
+-	/* Runtime PM state */
+-	int rpm_enabled;
+-
+-	/* MID specific */
+-	struct mrst_vbt vbt_data;
+-	struct mrst_gct_data gct_data;
+-
+-	/* MIPI Panel type etc */
+-	int panel_id;
+-	bool dual_mipi;		/* dual display - DPI & DBI */
+-	bool dpi_panel_on;	/* The DPI panel power is on */
+-	bool dpi_panel_on2;	/* The DPI panel power is on */
+-	bool dbi_panel_on;	/* The DBI panel power is on */
+-	bool dbi_panel_on2;	/* The DBI panel power is on */
+-	u32 dsr_fb_update;	/* DSR FB update counter */
+-
+-	/* Moorestown HDMI state */
+-	struct mrst_hdmi_dev *hdmi_priv;
+-
+-	/* Moorestown pipe config register value cache */
+-	uint32_t pipeconf;
+-	uint32_t pipeconf1;
+-	uint32_t pipeconf2;
+-
+-	/* Moorestown plane control register value cache */
+-	uint32_t dspcntr;
+-	uint32_t dspcntr1;
+-	uint32_t dspcntr2;
+-
+-	/* Moorestown MM backlight cache */
+-	uint8_t saveBKLTCNT;
+-	uint8_t saveBKLTREQ;
+-	uint8_t saveBKLTBRTL;
+-
+-	/*
+-	 * Register state
+-	 */
+-	uint32_t saveDSPACNTR;
+-	uint32_t saveDSPBCNTR;
+-	uint32_t savePIPEACONF;
+-	uint32_t savePIPEBCONF;
+-	uint32_t savePIPEASRC;
+-	uint32_t savePIPEBSRC;
+-	uint32_t saveFPA0;
+-	uint32_t saveFPA1;
+-	uint32_t saveDPLL_A;
+-	uint32_t saveDPLL_A_MD;
+-	uint32_t saveHTOTAL_A;
+-	uint32_t saveHBLANK_A;
+-	uint32_t saveHSYNC_A;
+-	uint32_t saveVTOTAL_A;
+-	uint32_t saveVBLANK_A;
+-	uint32_t saveVSYNC_A;
+-	uint32_t saveDSPASTRIDE;
+-	uint32_t saveDSPASIZE;
+-	uint32_t saveDSPAPOS;
+-	uint32_t saveDSPABASE;
+-	uint32_t saveDSPASURF;
+-	uint32_t saveDSPASTATUS;
+-	uint32_t saveFPB0;
+-	uint32_t saveFPB1;
+-	uint32_t saveDPLL_B;
+-	uint32_t saveDPLL_B_MD;
+-	uint32_t saveHTOTAL_B;
+-	uint32_t saveHBLANK_B;
+-	uint32_t saveHSYNC_B;
+-	uint32_t saveVTOTAL_B;
+-	uint32_t saveVBLANK_B;
+-	uint32_t saveVSYNC_B;
+-	uint32_t saveDSPBSTRIDE;
+-	uint32_t saveDSPBSIZE;
+-	uint32_t saveDSPBPOS;
+-	uint32_t saveDSPBBASE;
+-	uint32_t saveDSPBSURF;
+-	uint32_t saveDSPBSTATUS;
+-	uint32_t saveVCLK_DIVISOR_VGA0;
+-	uint32_t saveVCLK_DIVISOR_VGA1;
+-	uint32_t saveVCLK_POST_DIV;
+-	uint32_t saveVGACNTRL;
+-	uint32_t saveADPA;
+-	uint32_t saveLVDS;
+-	uint32_t saveDVOA;
+-	uint32_t saveDVOB;
+-	uint32_t saveDVOC;
+-	uint32_t savePP_ON;
+-	uint32_t savePP_OFF;
+-	uint32_t savePP_CONTROL;
+-	uint32_t savePP_CYCLE;
+-	uint32_t savePFIT_CONTROL;
+-	uint32_t savePaletteA[256];
+-	uint32_t savePaletteB[256];
+-	uint32_t saveBLC_PWM_CTL2;
+-	uint32_t saveBLC_PWM_CTL;
+-	uint32_t saveCLOCKGATING;
+-	uint32_t saveDSPARB;
+-	uint32_t saveDSPATILEOFF;
+-	uint32_t saveDSPBTILEOFF;
+-	uint32_t saveDSPAADDR;
+-	uint32_t saveDSPBADDR;
+-	uint32_t savePFIT_AUTO_RATIOS;
+-	uint32_t savePFIT_PGM_RATIOS;
+-	uint32_t savePP_ON_DELAYS;
+-	uint32_t savePP_OFF_DELAYS;
+-	uint32_t savePP_DIVISOR;
+-	uint32_t saveBSM;
+-	uint32_t saveVBT;
+-	uint32_t saveBCLRPAT_A;
+-	uint32_t saveBCLRPAT_B;
+-	uint32_t saveDSPALINOFF;
+-	uint32_t saveDSPBLINOFF;
+-	uint32_t savePERF_MODE;
+-	uint32_t saveDSPFW1;
+-	uint32_t saveDSPFW2;
+-	uint32_t saveDSPFW3;
+-	uint32_t saveDSPFW4;
+-	uint32_t saveDSPFW5;
+-	uint32_t saveDSPFW6;
+-	uint32_t saveCHICKENBIT;
+-	uint32_t saveDSPACURSOR_CTRL;
+-	uint32_t saveDSPBCURSOR_CTRL;
+-	uint32_t saveDSPACURSOR_BASE;
+-	uint32_t saveDSPBCURSOR_BASE;
+-	uint32_t saveDSPACURSOR_POS;
+-	uint32_t saveDSPBCURSOR_POS;
+-	uint32_t save_palette_a[256];
+-	uint32_t save_palette_b[256];
+-	uint32_t saveOV_OVADD;
+-	uint32_t saveOV_OGAMC0;
+-	uint32_t saveOV_OGAMC1;
+-	uint32_t saveOV_OGAMC2;
+-	uint32_t saveOV_OGAMC3;
+-	uint32_t saveOV_OGAMC4;
+-	uint32_t saveOV_OGAMC5;
+-	uint32_t saveOVC_OVADD;
+-	uint32_t saveOVC_OGAMC0;
+-	uint32_t saveOVC_OGAMC1;
+-	uint32_t saveOVC_OGAMC2;
+-	uint32_t saveOVC_OGAMC3;
+-	uint32_t saveOVC_OGAMC4;
+-	uint32_t saveOVC_OGAMC5;
+-
+-	/* MSI reg save */
+-	uint32_t msi_addr;
+-	uint32_t msi_data;
+-
+-	/* Medfield specific register save state */
+-	uint32_t saveHDMIPHYMISCCTL;
+-	uint32_t saveHDMIB_CONTROL;
+-	uint32_t saveDSPCCNTR;
+-	uint32_t savePIPECCONF;
+-	uint32_t savePIPECSRC;
+-	uint32_t saveHTOTAL_C;
+-	uint32_t saveHBLANK_C;
+-	uint32_t saveHSYNC_C;
+-	uint32_t saveVTOTAL_C;
+-	uint32_t saveVBLANK_C;
+-	uint32_t saveVSYNC_C;
+-	uint32_t saveDSPCSTRIDE;
+-	uint32_t saveDSPCSIZE;
+-	uint32_t saveDSPCPOS;
+-	uint32_t saveDSPCSURF;
+-	uint32_t saveDSPCSTATUS;
+-	uint32_t saveDSPCLINOFF;
+-	uint32_t saveDSPCTILEOFF;
+-	uint32_t saveDSPCCURSOR_CTRL;
+-	uint32_t saveDSPCCURSOR_BASE;
+-	uint32_t saveDSPCCURSOR_POS;
+-	uint32_t save_palette_c[256];
+-	uint32_t saveOV_OVADD_C;
+-	uint32_t saveOV_OGAMC0_C;
+-	uint32_t saveOV_OGAMC1_C;
+-	uint32_t saveOV_OGAMC2_C;
+-	uint32_t saveOV_OGAMC3_C;
+-	uint32_t saveOV_OGAMC4_C;
+-	uint32_t saveOV_OGAMC5_C;
+-
+-	/* DSI register save */
+-	uint32_t saveDEVICE_READY_REG;
+-	uint32_t saveINTR_EN_REG;
+-	uint32_t saveDSI_FUNC_PRG_REG;
+-	uint32_t saveHS_TX_TIMEOUT_REG;
+-	uint32_t saveLP_RX_TIMEOUT_REG;
+-	uint32_t saveTURN_AROUND_TIMEOUT_REG;
+-	uint32_t saveDEVICE_RESET_REG;
+-	uint32_t saveDPI_RESOLUTION_REG;
+-	uint32_t saveHORIZ_SYNC_PAD_COUNT_REG;
+-	uint32_t saveHORIZ_BACK_PORCH_COUNT_REG;
+-	uint32_t saveHORIZ_FRONT_PORCH_COUNT_REG;
+-	uint32_t saveHORIZ_ACTIVE_AREA_COUNT_REG;
+-	uint32_t saveVERT_SYNC_PAD_COUNT_REG;
+-	uint32_t saveVERT_BACK_PORCH_COUNT_REG;
+-	uint32_t saveVERT_FRONT_PORCH_COUNT_REG;
+-	uint32_t saveHIGH_LOW_SWITCH_COUNT_REG;
+-	uint32_t saveINIT_COUNT_REG;
+-	uint32_t saveMAX_RET_PAK_REG;
+-	uint32_t saveVIDEO_FMT_REG;
+-	uint32_t saveEOT_DISABLE_REG;
+-	uint32_t saveLP_BYTECLK_REG;
+-	uint32_t saveHS_LS_DBI_ENABLE_REG;
+-	uint32_t saveTXCLKESC_REG;
+-	uint32_t saveDPHY_PARAM_REG;
+-	uint32_t saveMIPI_CONTROL_REG;
+-	uint32_t saveMIPI;
+-	uint32_t saveMIPI_C;
+-
+-	/* DPST register save */
+-	uint32_t saveHISTOGRAM_INT_CONTROL_REG;
+-	uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
+-	uint32_t savePWM_CONTROL_LOGIC;
+-
+-	/*
+-	 * DSI info. 
+-	 */
+-	void * dbi_dsr_info;	
+-	void * dbi_dpu_info;
+-	void * dsi_configs[2];
+-	/*
+-	 * LID-Switch
+-	 */
+-	spinlock_t lid_lock;
+-	struct timer_list lid_timer;
+-	struct psb_intel_opregion opregion;
+-	u32 *lid_state;
+-	u32 lid_last_state;
+-
+-	/*
+-	 * Watchdog
+-	 */
+-
+-	uint32_t apm_reg;
+-	uint16_t apm_base;
+-
+-	/*
+-	 * Used for modifying backlight from
+-	 * xrandr -- consider removing and using HAL instead
+-	 */
+-	struct backlight_device *backlight_device;
+-	struct drm_property *backlight_property;
+-	uint32_t blc_adj1;
+-	uint32_t blc_adj2;
+-
+-	void *fbdev;
+-	/* DPST state */
+-	uint32_t dsr_idle_count;
+-	bool is_in_idle;
+-	bool dsr_enable;
+-	void (*exit_idle)(struct drm_device *dev, u32 update_src);
+-
+-	/* 2D acceleration */
+-	spinlock_t lock_2d;
+-
+-	/* FIXME: Arrays anyone ? */
+-	struct mdfld_dsi_encoder *encoder0;	
+-	struct mdfld_dsi_encoder *encoder2;	
+-	struct mdfld_dsi_dbi_output * dbi_output;
+-	struct mdfld_dsi_dbi_output * dbi_output2;
+-	u32 bpp;
+-	u32 bpp2;
+-	
+-	bool dispstatus;
+-};
+-
+-
+-/*
+- *	Operations for each board type
+- */
+- 
+-struct psb_ops {
+-	const char *name;
+-	unsigned int accel_2d:1;
+-	int pipes;		/* Number of output pipes */
+-	int crtcs;		/* Number of CRTCs */
+-	int sgx_offset;		/* Base offset of SGX device */
+-
+-	/* Sub functions */
+-	struct drm_crtc_helper_funcs const *crtc_helper;
+-	struct drm_crtc_funcs const *crtc_funcs;
+-
+-	/* Setup hooks */
+-	int (*chip_setup)(struct drm_device *dev);
+-	void (*chip_teardown)(struct drm_device *dev);
+-
+-	/* Display management hooks */
+-	int (*output_init)(struct drm_device *dev);
+-	/* Power management hooks */
+-	void (*init_pm)(struct drm_device *dev);
+-	int (*save_regs)(struct drm_device *dev);
+-	int (*restore_regs)(struct drm_device *dev);
+-	int (*power_up)(struct drm_device *dev);
+-	int (*power_down)(struct drm_device *dev);
+-
+-	void (*lvds_bl_power)(struct drm_device *dev, bool on);
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-	/* Backlight */
+-	int (*backlight_init)(struct drm_device *dev);
+-#endif
+-	int i2c_bus;		/* I2C bus identifier for Moorestown */
+-};
+-
+-
+-
+-struct psb_mmu_driver;
+-
+-extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
+-extern int drm_pick_crtcs(struct drm_device *dev);
+-
+-static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
+-{
+-	return (struct drm_psb_private *) dev->dev_private;
+-}
+-
+-/*
+- * MMU stuff.
+- */
+-
+-extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
+-					int trap_pagefaults,
+-					int invalid_type,
+-					struct drm_psb_private *dev_priv);
+-extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
+-extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
+-						 *driver);
+-extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
+-			       uint32_t gtt_start, uint32_t gtt_pages);
+-extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
+-					   int trap_pagefaults,
+-					   int invalid_type);
+-extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
+-extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
+-extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
+-					unsigned long address,
+-					uint32_t num_pages);
+-extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
+-				       uint32_t start_pfn,
+-				       unsigned long address,
+-				       uint32_t num_pages, int type);
+-extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
+-				  unsigned long *pfn);
+-
+-/*
+- * Enable / disable MMU for different requestors.
+- */
+-
+-
+-extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
+-extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
+-				unsigned long address, uint32_t num_pages,
+-				uint32_t desired_tile_stride,
+-				uint32_t hw_tile_stride, int type);
+-extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
+-				 unsigned long address, uint32_t num_pages,
+-				 uint32_t desired_tile_stride,
+-				 uint32_t hw_tile_stride);
+-/*
+- *psb_irq.c
+- */
+-
+-extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+-extern int psb_irq_enable_dpst(struct drm_device *dev);
+-extern int psb_irq_disable_dpst(struct drm_device *dev);
+-extern void psb_irq_preinstall(struct drm_device *dev);
+-extern int psb_irq_postinstall(struct drm_device *dev);
+-extern void psb_irq_uninstall(struct drm_device *dev);
+-extern void psb_irq_turn_on_dpst(struct drm_device *dev);
+-extern void psb_irq_turn_off_dpst(struct drm_device *dev);
+-
+-extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
+-extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+-extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+-extern int psb_enable_vblank(struct drm_device *dev, int crtc);
+-extern void psb_disable_vblank(struct drm_device *dev, int crtc);
+-void
+-psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+-
+-void
+-psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
+-
+-extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
+-
+-extern int mdfld_enable_te(struct drm_device *dev, int pipe);
+-extern void mdfld_disable_te(struct drm_device *dev, int pipe);
+-
+-/*
+- * intel_opregion.c
+- */
+-extern int gma_intel_opregion_init(struct drm_device *dev);
+-extern int gma_intel_opregion_exit(struct drm_device *dev);
+-
+-/*
+- * framebuffer.c
+- */
+-extern int psbfb_probed(struct drm_device *dev);
+-extern int psbfb_remove(struct drm_device *dev,
+-			struct drm_framebuffer *fb);
+-/*
+- * accel_2d.c
+- */
+-extern void psbfb_copyarea(struct fb_info *info,
+-					const struct fb_copyarea *region);
+-extern int psbfb_sync(struct fb_info *info);
+-extern void psb_spank(struct drm_psb_private *dev_priv);
+-extern int psb_accel_ioctl(struct drm_device *dev, void *data,
+-							struct drm_file *file);
+-
+-/*
+- * psb_reset.c
+- */
+-
+-extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
+-extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
+-extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
+-
+-/* modesetting */
+-extern void psb_modeset_init(struct drm_device *dev);
+-extern void psb_modeset_cleanup(struct drm_device *dev);
+-extern int psb_fbdev_init(struct drm_device *dev);
+-
+-/* backlight.c */
+-int gma_backlight_init(struct drm_device *dev);
+-void gma_backlight_exit(struct drm_device *dev);
+-
+-/* mrst_crtc.c */
+-extern const struct drm_crtc_helper_funcs mrst_helper_funcs;
+-
+-/* mrst_lvds.c */
+-extern void mrst_lvds_init(struct drm_device *dev,
+-		    struct psb_intel_mode_device *mode_dev);
+-
+-/* psb_intel_display.c */
+-extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
+-extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
+-
+-/* psb_intel_lvds.c */
+-extern const struct drm_connector_helper_funcs
+-					psb_intel_lvds_connector_helper_funcs;
+-extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
+-
+-/* gem.c */
+-extern int psb_gem_init_object(struct drm_gem_object *obj);
+-extern void psb_gem_free_object(struct drm_gem_object *obj);
+-extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
+-			struct drm_file *file);
+-extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+-			struct drm_mode_create_dumb *args);
+-extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+-			uint32_t handle);
+-extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
+-			uint32_t handle, uint64_t *offset);
+-extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+-extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
+-			struct drm_file *file);
+-extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
+-					struct drm_file *file);
+-
+-/* psb_device.c */
+-extern const struct psb_ops psb_chip_ops;
+-
+-/* mrst_device.c */
+-extern const struct psb_ops mrst_chip_ops;
+-
+-/* mdfld_device.c */
+-extern const struct psb_ops mdfld_chip_ops;
+-
+-/* cdv_device.c */
+-extern const struct psb_ops cdv_chip_ops;
+-
+-/*
+- * Debug print bits setting
+- */
+-#define PSB_D_GENERAL (1 << 0)
+-#define PSB_D_INIT    (1 << 1)
+-#define PSB_D_IRQ     (1 << 2)
+-#define PSB_D_ENTRY   (1 << 3)
+-/* debug the get H/V BP/FP count */
+-#define PSB_D_HV      (1 << 4)
+-#define PSB_D_DBI_BF  (1 << 5)
+-#define PSB_D_PM      (1 << 6)
+-#define PSB_D_RENDER  (1 << 7)
+-#define PSB_D_REG     (1 << 8)
+-#define PSB_D_MSVDX   (1 << 9)
+-#define PSB_D_TOPAZ   (1 << 10)
+-
+-extern int drm_psb_no_fb;
+-extern int drm_idle_check_interval;
+-
+-/*
+- *	Utilities
+- */
+-
+-static inline u32 MRST_MSG_READ32(uint port, uint offset)
+-{
+-	int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
+-	uint32_t ret_val = 0;
+-	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+-	pci_write_config_dword(pci_root, 0xD0, mcr);
+-	pci_read_config_dword(pci_root, 0xD4, &ret_val);
+-	pci_dev_put(pci_root);
+-	return ret_val;
+-}
+-static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
+-{
+-	int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
+-	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+-	pci_write_config_dword(pci_root, 0xD4, value);
+-	pci_write_config_dword(pci_root, 0xD0, mcr);
+-	pci_dev_put(pci_root);
+-}
+-static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
+-{
+-	int mcr = (0x10<<24) | (port << 16) | (offset << 8);
+-	uint32_t ret_val = 0;
+-	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+-	pci_write_config_dword(pci_root, 0xD0, mcr);
+-	pci_read_config_dword(pci_root, 0xD4, &ret_val);
+-	pci_dev_put(pci_root);
+-	return ret_val;
+-}
+-static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
+-{
+-	int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
+-	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
+-	pci_write_config_dword(pci_root, 0xD4, value);
+-	pci_write_config_dword(pci_root, 0xD0, mcr);
+-	pci_dev_put(pci_root);
+-}
+-
+-static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	return ioread32(dev_priv->vdc_reg + reg);
+-}
+-
+-#define REG_READ(reg)	       REGISTER_READ(dev, (reg))
+-
+-static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
+-				      uint32_t val)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	iowrite32((val), dev_priv->vdc_reg + (reg));
+-}
+-
+-#define REG_WRITE(reg, val)	REGISTER_WRITE(dev, (reg), (val))
+-
+-static inline void REGISTER_WRITE16(struct drm_device *dev,
+-					uint32_t reg, uint32_t val)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	iowrite16((val), dev_priv->vdc_reg + (reg));
+-}
+-
+-#define REG_WRITE16(reg, val)	  REGISTER_WRITE16(dev, (reg), (val))
+-
+-static inline void REGISTER_WRITE8(struct drm_device *dev,
+-				       uint32_t reg, uint32_t val)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	iowrite8((val), dev_priv->vdc_reg + (reg));
+-}
+-
+-#define REG_WRITE8(reg, val)		REGISTER_WRITE8(dev, (reg), (val))
+-
+-#define PSB_WVDC32(_val, _offs)		iowrite32(_val, dev_priv->vdc_reg + (_offs))
+-#define PSB_RVDC32(_offs)		ioread32(dev_priv->vdc_reg + (_offs))
+-
+-/* #define TRAP_SGX_PM_FAULT 1 */
+-#ifdef TRAP_SGX_PM_FAULT
+-#define PSB_RSGX32(_offs)						\
+-({									\
+-	if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) {		\
+-		printk(KERN_ERR						\
+-			"access sgx when it's off!! (READ) %s, %d\n",	\
+-	       __FILE__, __LINE__);					\
+-		melay(1000);						\
+-	}								\
+-	ioread32(dev_priv->sgx_reg + (_offs));				\
+-})
+-#else
+-#define PSB_RSGX32(_offs)		ioread32(dev_priv->sgx_reg + (_offs))
+-#endif
+-#define PSB_WSGX32(_val, _offs)		iowrite32(_val, dev_priv->sgx_reg + (_offs))
+-
+-#define MSVDX_REG_DUMP 0
+-
+-#define PSB_WMSVDX32(_val, _offs)	iowrite32(_val, dev_priv->msvdx_reg + (_offs))
+-#define PSB_RMSVDX32(_offs)		ioread32(dev_priv->msvdx_reg + (_offs))
+-
+-#endif
+diff --git a/drivers/staging/gma500/psb_intel_display.c b/drivers/staging/gma500/psb_intel_display.c
+deleted file mode 100644
+index caa9d86..0000000
+--- a/drivers/staging/gma500/psb_intel_display.c
++++ /dev/null
+@@ -1,1429 +0,0 @@
+-/*
+- * Copyright © 2006-2011 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *	Eric Anholt <eric at anholt.net>
+- */
+-
+-#include <linux/i2c.h>
+-#include <linux/pm_runtime.h>
+-
+-#include <drm/drmP.h>
+-#include "framebuffer.h"
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "psb_intel_display.h"
+-#include "power.h"
+-
+-#include "mdfld_output.h"
+-
+-struct psb_intel_clock_t {
+-	/* given values */
+-	int n;
+-	int m1, m2;
+-	int p1, p2;
+-	/* derived values */
+-	int dot;
+-	int vco;
+-	int m;
+-	int p;
+-};
+-
+-struct psb_intel_range_t {
+-	int min, max;
+-};
+-
+-struct psb_intel_p2_t {
+-	int dot_limit;
+-	int p2_slow, p2_fast;
+-};
+-
+-#define INTEL_P2_NUM		      2
+-
+-struct psb_intel_limit_t {
+-	struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
+-	struct psb_intel_p2_t p2;
+-};
+-
+-#define I8XX_DOT_MIN		  25000
+-#define I8XX_DOT_MAX		 350000
+-#define I8XX_VCO_MIN		 930000
+-#define I8XX_VCO_MAX		1400000
+-#define I8XX_N_MIN		      3
+-#define I8XX_N_MAX		     16
+-#define I8XX_M_MIN		     96
+-#define I8XX_M_MAX		    140
+-#define I8XX_M1_MIN		     18
+-#define I8XX_M1_MAX		     26
+-#define I8XX_M2_MIN		      6
+-#define I8XX_M2_MAX		     16
+-#define I8XX_P_MIN		      4
+-#define I8XX_P_MAX		    128
+-#define I8XX_P1_MIN		      2
+-#define I8XX_P1_MAX		     33
+-#define I8XX_P1_LVDS_MIN	      1
+-#define I8XX_P1_LVDS_MAX	      6
+-#define I8XX_P2_SLOW		      4
+-#define I8XX_P2_FAST		      2
+-#define I8XX_P2_LVDS_SLOW	      14
+-#define I8XX_P2_LVDS_FAST	      14	/* No fast option */
+-#define I8XX_P2_SLOW_LIMIT	 165000
+-
+-#define I9XX_DOT_MIN		  20000
+-#define I9XX_DOT_MAX		 400000
+-#define I9XX_VCO_MIN		1400000
+-#define I9XX_VCO_MAX		2800000
+-#define I9XX_N_MIN		      3
+-#define I9XX_N_MAX		      8
+-#define I9XX_M_MIN		     70
+-#define I9XX_M_MAX		    120
+-#define I9XX_M1_MIN		     10
+-#define I9XX_M1_MAX		     20
+-#define I9XX_M2_MIN		      5
+-#define I9XX_M2_MAX		      9
+-#define I9XX_P_SDVO_DAC_MIN	      5
+-#define I9XX_P_SDVO_DAC_MAX	     80
+-#define I9XX_P_LVDS_MIN		      7
+-#define I9XX_P_LVDS_MAX		     98
+-#define I9XX_P1_MIN		      1
+-#define I9XX_P1_MAX		      8
+-#define I9XX_P2_SDVO_DAC_SLOW		     10
+-#define I9XX_P2_SDVO_DAC_FAST		      5
+-#define I9XX_P2_SDVO_DAC_SLOW_LIMIT	 200000
+-#define I9XX_P2_LVDS_SLOW		     14
+-#define I9XX_P2_LVDS_FAST		      7
+-#define I9XX_P2_LVDS_SLOW_LIMIT		 112000
+-
+-#define INTEL_LIMIT_I8XX_DVO_DAC    0
+-#define INTEL_LIMIT_I8XX_LVDS	    1
+-#define INTEL_LIMIT_I9XX_SDVO_DAC   2
+-#define INTEL_LIMIT_I9XX_LVDS	    3
+-
+-static const struct psb_intel_limit_t psb_intel_limits[] = {
+-	{			/* INTEL_LIMIT_I8XX_DVO_DAC */
+-	 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
+-	 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
+-	 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
+-	 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
+-	 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
+-	 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
+-	 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
+-	 .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
+-	 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
+-		.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
+-	 },
+-	{			/* INTEL_LIMIT_I8XX_LVDS */
+-	 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
+-	 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
+-	 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
+-	 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
+-	 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
+-	 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
+-	 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
+-	 .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
+-	 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
+-		.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
+-	 },
+-	{			/* INTEL_LIMIT_I9XX_SDVO_DAC */
+-	 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+-	 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
+-	 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
+-	 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
+-	 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
+-	 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
+-	 .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
+-	 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+-	 .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+-		.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
+-		I9XX_P2_SDVO_DAC_FAST},
+-	 },
+-	{			/* INTEL_LIMIT_I9XX_LVDS */
+-	 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
+-	 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
+-	 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
+-	 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
+-	 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
+-	 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
+-	 .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
+-	 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
+-	 /* The single-channel range is 25-112Mhz, and dual-channel
+-	  * is 80-224Mhz.  Prefer single channel as much as possible.
+-	  */
+-	 .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+-		.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
+-	 },
+-};
+-
+-static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
+-{
+-	const struct psb_intel_limit_t *limit;
+-
+-	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+-		limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
+-	else
+-		limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
+-	return limit;
+-}
+-
+-/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
+-
+-static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
+-{
+-	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+-	clock->p = clock->p1 * clock->p2;
+-	clock->vco = refclk * clock->m / (clock->n + 2);
+-	clock->dot = clock->vco / clock->p;
+-}
+-
+-/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
+-
+-static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
+-{
+-	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+-	clock->p = clock->p1 * clock->p2;
+-	clock->vco = refclk * clock->m / (clock->n + 2);
+-	clock->dot = clock->vco / clock->p;
+-}
+-
+-static void psb_intel_clock(struct drm_device *dev, int refclk,
+-			struct psb_intel_clock_t *clock)
+-{
+-	return i9xx_clock(refclk, clock);
+-}
+-
+-/**
+- * Returns whether any output on the specified pipe is of the specified type
+- */
+-bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_mode_config *mode_config = &dev->mode_config;
+-	struct drm_connector *l_entry;
+-
+-	list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+-		if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+-			struct psb_intel_output *psb_intel_output =
+-			    to_psb_intel_output(l_entry);
+-			if (psb_intel_output->type == type)
+-				return true;
+-		}
+-	}
+-	return false;
+-}
+-
+-#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
+-/**
+- * Returns whether the given set of divisors are valid for a given refclk with
+- * the given connectors.
+- */
+-
+-static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
+-			       struct psb_intel_clock_t *clock)
+-{
+-	const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+-
+-	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+-		INTELPllInvalid("p1 out of range\n");
+-	if (clock->p < limit->p.min || limit->p.max < clock->p)
+-		INTELPllInvalid("p out of range\n");
+-	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+-		INTELPllInvalid("m2 out of range\n");
+-	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+-		INTELPllInvalid("m1 out of range\n");
+-	if (clock->m1 <= clock->m2)
+-		INTELPllInvalid("m1 <= m2\n");
+-	if (clock->m < limit->m.min || limit->m.max < clock->m)
+-		INTELPllInvalid("m out of range\n");
+-	if (clock->n < limit->n.min || limit->n.max < clock->n)
+-		INTELPllInvalid("n out of range\n");
+-	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+-		INTELPllInvalid("vco out of range\n");
+-	/* XXX: We may need to be checking "Dot clock"
+-	 * depending on the multiplier, connector, etc.,
+-	 * rather than just a single range.
+-	 */
+-	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+-		INTELPllInvalid("dot out of range\n");
+-
+-	return true;
+-}
+-
+-/**
+- * Returns a set of divisors for the desired target clock with the given
+- * refclk, or FALSE.  The returned values represent the clock equation:
+- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+- */
+-static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
+-				int refclk,
+-				struct psb_intel_clock_t *best_clock)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_clock_t clock;
+-	const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
+-	int err = target;
+-
+-	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+-	    (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+-		/*
+-		 * For LVDS, if the panel is on, just rely on its current
+-		 * settings for dual-channel.  We haven't figured out how to
+-		 * reliably set up different single/dual channel state, if we
+-		 * even can.
+-		 */
+-		if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+-		    LVDS_CLKB_POWER_UP)
+-			clock.p2 = limit->p2.p2_fast;
+-		else
+-			clock.p2 = limit->p2.p2_slow;
+-	} else {
+-		if (target < limit->p2.dot_limit)
+-			clock.p2 = limit->p2.p2_slow;
+-		else
+-			clock.p2 = limit->p2.p2_fast;
+-	}
+-
+-	memset(best_clock, 0, sizeof(*best_clock));
+-
+-	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+-	     clock.m1++) {
+-		for (clock.m2 = limit->m2.min;
+-		     clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
+-		     clock.m2++) {
+-			for (clock.n = limit->n.min;
+-			     clock.n <= limit->n.max; clock.n++) {
+-				for (clock.p1 = limit->p1.min;
+-				     clock.p1 <= limit->p1.max;
+-				     clock.p1++) {
+-					int this_err;
+-
+-					psb_intel_clock(dev, refclk, &clock);
+-
+-					if (!psb_intel_PLL_is_valid
+-					    (crtc, &clock))
+-						continue;
+-
+-					this_err = abs(clock.dot - target);
+-					if (this_err < err) {
+-						*best_clock = clock;
+-						err = this_err;
+-					}
+-				}
+-			}
+-		}
+-	}
+-
+-	return err != target;
+-}
+-
+-void psb_intel_wait_for_vblank(struct drm_device *dev)
+-{
+-	/* Wait for 20ms, i.e. one cycle at 50hz. */
+-	mdelay(20);
+-}
+-
+-int psb_intel_pipe_set_base(struct drm_crtc *crtc,
+-			    int x, int y, struct drm_framebuffer *old_fb)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	/* struct drm_i915_master_private *master_priv; */
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+-	int pipe = psb_intel_crtc->pipe;
+-	unsigned long start, offset;
+-	int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
+-	int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
+-	int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
+-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+-	u32 dspcntr;
+-	int ret = 0;
+-
+-	if (!gma_power_begin(dev, true))
+-		return 0;
+-
+-	/* no fb bound */
+-	if (!crtc->fb) {
+-		dev_dbg(dev->dev, "No FB bound\n");
+-		goto psb_intel_pipe_cleaner;
+-	}
+-
+-	/* We are displaying this buffer, make sure it is actually loaded
+-	   into the GTT */
+-	ret = psb_gtt_pin(psbfb->gtt);
+-	if (ret < 0)
+-		goto psb_intel_pipe_set_base_exit;
+-	start = psbfb->gtt->offset;
+-
+-	offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+-
+-	REG_WRITE(dspstride, crtc->fb->pitch);
+-
+-	dspcntr = REG_READ(dspcntr_reg);
+-	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+-
+-	switch (crtc->fb->bits_per_pixel) {
+-	case 8:
+-		dspcntr |= DISPPLANE_8BPP;
+-		break;
+-	case 16:
+-		if (crtc->fb->depth == 15)
+-			dspcntr |= DISPPLANE_15_16BPP;
+-		else
+-			dspcntr |= DISPPLANE_16BPP;
+-		break;
+-	case 24:
+-	case 32:
+-		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Unknown color depth\n");
+-		ret = -EINVAL;
+-		psb_gtt_unpin(psbfb->gtt);
+-		goto psb_intel_pipe_set_base_exit;
+-	}
+-	REG_WRITE(dspcntr_reg, dspcntr);
+-
+-
+-	if (0 /* FIXMEAC - check what PSB needs */) {
+-		REG_WRITE(dspbase, offset);
+-		REG_READ(dspbase);
+-		REG_WRITE(dspsurf, start);
+-		REG_READ(dspsurf);
+-	} else {
+-		REG_WRITE(dspbase, start + offset);
+-		REG_READ(dspbase);
+-	}
+-
+-psb_intel_pipe_cleaner:
+-	/* If there was a previous display we can now unpin it */
+-	if (old_fb)
+-		psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+-
+-psb_intel_pipe_set_base_exit:
+-	gma_power_end(dev);
+-	return ret;
+-}
+-
+-/**
+- * Sets the power management mode of the pipe and plane.
+- *
+- * This code should probably grow support for turning the cursor off and back
+- * on appropriately at the same time as we're turning the pipe off/on.
+- */
+-static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	/* struct drm_i915_master_private *master_priv; */
+-	/* struct drm_i915_private *dev_priv = dev->dev_private; */
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+-	int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	u32 temp;
+-	bool enabled;
+-
+-	/* XXX: When our outputs are all unaware of DPMS modes other than off
+-	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+-	 */
+-	switch (mode) {
+-	case DRM_MODE_DPMS_ON:
+-	case DRM_MODE_DPMS_STANDBY:
+-	case DRM_MODE_DPMS_SUSPEND:
+-		/* Enable the DPLL */
+-		temp = REG_READ(dpll_reg);
+-		if ((temp & DPLL_VCO_ENABLE) == 0) {
+-			REG_WRITE(dpll_reg, temp);
+-			REG_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+-			REG_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
+-			REG_READ(dpll_reg);
+-			/* Wait for the clocks to stabilize. */
+-			udelay(150);
+-		}
+-
+-		/* Enable the pipe */
+-		temp = REG_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) == 0)
+-			REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
+-
+-		/* Enable the plane */
+-		temp = REG_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+-			REG_WRITE(dspcntr_reg,
+-				  temp | DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-		}
+-
+-		psb_intel_crtc_load_lut(crtc);
+-
+-		/* Give the overlay scaler a chance to enable
+-		 * if it's on this pipe */
+-		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
+-		break;
+-	case DRM_MODE_DPMS_OFF:
+-		/* Give the overlay scaler a chance to disable
+-		 * if it's on this pipe */
+-		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+-
+-		/* Disable the VGA plane that we never use */
+-		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+-
+-		/* Disable display plane */
+-		temp = REG_READ(dspcntr_reg);
+-		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+-			REG_WRITE(dspcntr_reg,
+-				  temp & ~DISPLAY_PLANE_ENABLE);
+-			/* Flush the plane changes */
+-			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
+-			REG_READ(dspbase_reg);
+-		}
+-
+-		/* Next, disable display pipes */
+-		temp = REG_READ(pipeconf_reg);
+-		if ((temp & PIPEACONF_ENABLE) != 0) {
+-			REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
+-			REG_READ(pipeconf_reg);
+-		}
+-
+-		/* Wait for vblank for the disable to take effect. */
+-		psb_intel_wait_for_vblank(dev);
+-
+-		temp = REG_READ(dpll_reg);
+-		if ((temp & DPLL_VCO_ENABLE) != 0) {
+-			REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
+-			REG_READ(dpll_reg);
+-		}
+-
+-		/* Wait for the clocks to turn off. */
+-		udelay(150);
+-		break;
+-	}
+-
+-	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
+-
+-	/*Set FIFO Watermarks*/
+-	REG_WRITE(DSPARB, 0x3F3E);
+-}
+-
+-static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
+-{
+-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+-}
+-
+-static void psb_intel_crtc_commit(struct drm_crtc *crtc)
+-{
+-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+-}
+-
+-void psb_intel_encoder_prepare(struct drm_encoder *encoder)
+-{
+-	struct drm_encoder_helper_funcs *encoder_funcs =
+-	    encoder->helper_private;
+-	/* lvds has its own version of prepare see psb_intel_lvds_prepare */
+-	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+-}
+-
+-void psb_intel_encoder_commit(struct drm_encoder *encoder)
+-{
+-	struct drm_encoder_helper_funcs *encoder_funcs =
+-	    encoder->helper_private;
+-	/* lvds has its own version of commit see psb_intel_lvds_commit */
+-	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+-}
+-
+-static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
+-				  struct drm_display_mode *mode,
+-				  struct drm_display_mode *adjusted_mode)
+-{
+-	return true;
+-}
+-
+-
+-/**
+- * Return the pipe currently connected to the panel fitter,
+- * or -1 if the panel fitter is not present or not in use
+- */
+-static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
+-{
+-	u32 pfit_control;
+-
+-	pfit_control = REG_READ(PFIT_CONTROL);
+-
+-	/* See if the panel fitter is in use */
+-	if ((pfit_control & PFIT_ENABLE) == 0)
+-		return -1;
+-	/* Must be on PIPE 1 for PSB */
+-	return 1;
+-}
+-
+-static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
+-			       struct drm_display_mode *mode,
+-			       struct drm_display_mode *adjusted_mode,
+-			       int x, int y,
+-			       struct drm_framebuffer *old_fb)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+-	int pipe = psb_intel_crtc->pipe;
+-	int fp_reg = (pipe == 0) ? FPA0 : FPB0;
+-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+-	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
+-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
+-	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
+-	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
+-	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
+-	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
+-	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
+-	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
+-	int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
+-	int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
+-	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
+-	int refclk;
+-	struct psb_intel_clock_t clock;
+-	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
+-	bool ok, is_sdvo = false, is_dvo = false;
+-	bool is_crt = false, is_lvds = false, is_tv = false;
+-	struct drm_mode_config *mode_config = &dev->mode_config;
+-	struct drm_connector *connector;
+-
+-	/* No scan out no play */
+-	if (crtc->fb == NULL) {
+-		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+-		return 0;
+-	}
+-
+-	list_for_each_entry(connector, &mode_config->connector_list, head) {
+-		struct psb_intel_output *psb_intel_output =
+-		    to_psb_intel_output(connector);
+-
+-		if (!connector->encoder
+-		    || connector->encoder->crtc != crtc)
+-			continue;
+-
+-		switch (psb_intel_output->type) {
+-		case INTEL_OUTPUT_LVDS:
+-			is_lvds = true;
+-			break;
+-		case INTEL_OUTPUT_SDVO:
+-			is_sdvo = true;
+-			break;
+-		case INTEL_OUTPUT_DVO:
+-			is_dvo = true;
+-			break;
+-		case INTEL_OUTPUT_TVOUT:
+-			is_tv = true;
+-			break;
+-		case INTEL_OUTPUT_ANALOG:
+-			is_crt = true;
+-			break;
+-		}
+-	}
+-
+-	refclk = 96000;
+-
+-	ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+-				 &clock);
+-	if (!ok) {
+-		dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+-		return 0;
+-	}
+-
+-	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+-
+-	dpll = DPLL_VGA_MODE_DIS;
+-	if (is_lvds) {
+-		dpll |= DPLLB_MODE_LVDS;
+-		dpll |= DPLL_DVO_HIGH_SPEED;
+-	} else
+-		dpll |= DPLLB_MODE_DAC_SERIAL;
+-	if (is_sdvo) {
+-		int sdvo_pixel_multiply =
+-			    adjusted_mode->clock / mode->clock;
+-		dpll |= DPLL_DVO_HIGH_SPEED;
+-		dpll |=
+-		    (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+-	}
+-
+-	/* compute bitmask from p1 value */
+-	dpll |= (1 << (clock.p1 - 1)) << 16;
+-	switch (clock.p2) {
+-	case 5:
+-		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
+-		break;
+-	case 7:
+-		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
+-		break;
+-	case 10:
+-		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
+-		break;
+-	case 14:
+-		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+-		break;
+-	}
+-
+-	if (is_tv) {
+-		/* XXX: just matching BIOS for now */
+-/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
+-		dpll |= 3;
+-	}
+-	dpll |= PLL_REF_INPUT_DREFCLK;
+-
+-	/* setup pipeconf */
+-	pipeconf = REG_READ(pipeconf_reg);
+-
+-	/* Set up the display plane register */
+-	dspcntr = DISPPLANE_GAMMA_ENABLE;
+-
+-	if (pipe == 0)
+-		dspcntr |= DISPPLANE_SEL_PIPE_A;
+-	else
+-		dspcntr |= DISPPLANE_SEL_PIPE_B;
+-
+-	dspcntr |= DISPLAY_PLANE_ENABLE;
+-	pipeconf |= PIPEACONF_ENABLE;
+-	dpll |= DPLL_VCO_ENABLE;
+-
+-
+-	/* Disable the panel fitter if it was on our pipe */
+-	if (psb_intel_panel_fitter_pipe(dev) == pipe)
+-		REG_WRITE(PFIT_CONTROL, 0);
+-
+-	drm_mode_debug_printmodeline(mode);
+-
+-	if (dpll & DPLL_VCO_ENABLE) {
+-		REG_WRITE(fp_reg, fp);
+-		REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+-		REG_READ(dpll_reg);
+-		udelay(150);
+-	}
+-
+-	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
+-	 * This is an exception to the general rule that mode_set doesn't turn
+-	 * things on.
+-	 */
+-	if (is_lvds) {
+-		u32 lvds = REG_READ(LVDS);
+-
+-		lvds &= ~LVDS_PIPEB_SELECT;
+-		if (pipe == 1)
+-			lvds |= LVDS_PIPEB_SELECT;
+-
+-		lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
+-		/* Set the B0-B3 data pairs corresponding to
+-		 * whether we're going to
+-		 * set the DPLLs for dual-channel mode or not.
+-		 */
+-		lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
+-		if (clock.p2 == 7)
+-			lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
+-
+-		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
+-		 * appropriately here, but we need to look more
+-		 * thoroughly into how panels behave in the two modes.
+-		 */
+-
+-		REG_WRITE(LVDS, lvds);
+-		REG_READ(LVDS);
+-	}
+-
+-	REG_WRITE(fp_reg, fp);
+-	REG_WRITE(dpll_reg, dpll);
+-	REG_READ(dpll_reg);
+-	/* Wait for the clocks to stabilize. */
+-	udelay(150);
+-
+-	/* write it again -- the BIOS does, after all */
+-	REG_WRITE(dpll_reg, dpll);
+-
+-	REG_READ(dpll_reg);
+-	/* Wait for the clocks to stabilize. */
+-	udelay(150);
+-
+-	REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
+-		  ((adjusted_mode->crtc_htotal - 1) << 16));
+-	REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
+-		  ((adjusted_mode->crtc_hblank_end - 1) << 16));
+-	REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
+-		  ((adjusted_mode->crtc_hsync_end - 1) << 16));
+-	REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
+-		  ((adjusted_mode->crtc_vtotal - 1) << 16));
+-	REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
+-		  ((adjusted_mode->crtc_vblank_end - 1) << 16));
+-	REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
+-		  ((adjusted_mode->crtc_vsync_end - 1) << 16));
+-	/* pipesrc and dspsize control the size that is scaled from,
+-	 * which should always be the user's requested size.
+-	 */
+-	REG_WRITE(dspsize_reg,
+-		  ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
+-	REG_WRITE(dsppos_reg, 0);
+-	REG_WRITE(pipesrc_reg,
+-		  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+-	REG_WRITE(pipeconf_reg, pipeconf);
+-	REG_READ(pipeconf_reg);
+-
+-	psb_intel_wait_for_vblank(dev);
+-
+-	REG_WRITE(dspcntr_reg, dspcntr);
+-
+-	/* Flush the plane changes */
+-	crtc_funcs->mode_set_base(crtc, x, y, old_fb);
+-
+-	psb_intel_wait_for_vblank(dev);
+-
+-	return 0;
+-}
+-
+-/** Loads the palette/gamma unit for the CRTC with the prepared values */
+-void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_psb_private *dev_priv =
+-				(struct drm_psb_private *)dev->dev_private;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int palreg = PALETTE_A;
+-	int i;
+-
+-	/* The clocks have to be on to load the palette. */
+-	if (!crtc->enabled)
+-		return;
+-
+-	switch (psb_intel_crtc->pipe) {
+-	case 0:
+-		break;
+-	case 1:
+-		palreg = PALETTE_B;
+-		break;
+-	case 2:
+-		palreg = PALETTE_C;
+-		break;
+-	default:
+-		dev_err(dev->dev, "Illegal Pipe Number.\n");
+-		return;
+-	}
+-
+-	if (gma_power_begin(dev, false)) {
+-		for (i = 0; i < 256; i++) {
+-			REG_WRITE(palreg + 4 * i,
+-				  ((psb_intel_crtc->lut_r[i] +
+-				  psb_intel_crtc->lut_adj[i]) << 16) |
+-				  ((psb_intel_crtc->lut_g[i] +
+-				  psb_intel_crtc->lut_adj[i]) << 8) |
+-				  (psb_intel_crtc->lut_b[i] +
+-				  psb_intel_crtc->lut_adj[i]));
+-		}
+-		gma_power_end(dev);
+-	} else {
+-		for (i = 0; i < 256; i++) {
+-			dev_priv->save_palette_a[i] =
+-				  ((psb_intel_crtc->lut_r[i] +
+-				  psb_intel_crtc->lut_adj[i]) << 16) |
+-				  ((psb_intel_crtc->lut_g[i] +
+-				  psb_intel_crtc->lut_adj[i]) << 8) |
+-				  (psb_intel_crtc->lut_b[i] +
+-				  psb_intel_crtc->lut_adj[i]);
+-		}
+-
+-	}
+-}
+-
+-/**
+- * Save HW states of giving crtc
+- */
+-static void psb_intel_crtc_save(struct drm_crtc *crtc)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	/* struct drm_psb_private *dev_priv =
+-			(struct drm_psb_private *)dev->dev_private; */
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+-	int pipeA = (psb_intel_crtc->pipe == 0);
+-	uint32_t paletteReg;
+-	int i;
+-
+-	if (!crtc_state) {
+-		dev_err(dev->dev, "No CRTC state found\n");
+-		return;
+-	}
+-
+-	crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
+-	crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
+-	crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
+-	crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
+-	crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
+-	crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
+-	crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
+-	crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
+-	crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
+-	crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
+-	crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
+-	crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
+-	crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
+-
+-	/*NOTE: DSPSIZE DSPPOS only for psb*/
+-	crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
+-	crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
+-
+-	crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
+-
+-	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+-	for (i = 0; i < 256; ++i)
+-		crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
+-}
+-
+-/**
+- * Restore HW states of giving crtc
+- */
+-static void psb_intel_crtc_restore(struct drm_crtc *crtc)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	/* struct drm_psb_private * dev_priv =
+-				(struct drm_psb_private *)dev->dev_private; */
+-	struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
+-	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
+-	/* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
+-	int pipeA = (psb_intel_crtc->pipe == 0);
+-	uint32_t paletteReg;
+-	int i;
+-
+-	if (!crtc_state) {
+-		dev_err(dev->dev, "No crtc state\n");
+-		return;
+-	}
+-
+-	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+-		REG_WRITE(pipeA ? DPLL_A : DPLL_B,
+-			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+-		REG_READ(pipeA ? DPLL_A : DPLL_B);
+-		udelay(150);
+-	}
+-
+-	REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
+-	REG_READ(pipeA ? FPA0 : FPB0);
+-
+-	REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
+-	REG_READ(pipeA ? FPA1 : FPB1);
+-
+-	REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
+-	REG_READ(pipeA ? DPLL_A : DPLL_B);
+-	udelay(150);
+-
+-	REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
+-	REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
+-	REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
+-	REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
+-	REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
+-	REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
+-	REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
+-
+-	REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
+-	REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
+-
+-	REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
+-	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+-	REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
+-
+-	psb_intel_wait_for_vblank(dev);
+-
+-	REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
+-	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
+-
+-	psb_intel_wait_for_vblank(dev);
+-
+-	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
+-	for (i = 0; i < 256; ++i)
+-		REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
+-}
+-
+-static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
+-				 struct drm_file *file_priv,
+-				 uint32_t handle,
+-				 uint32_t width, uint32_t height)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+-	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+-	uint32_t temp;
+-	size_t addr = 0;
+-	struct gtt_range *gt;
+-	struct drm_gem_object *obj;
+-	int ret;
+-
+-	/* if we want to turn of the cursor ignore width and height */
+-	if (!handle) {
+-		/* turn off the cursor */
+-		temp = CURSOR_MODE_DISABLE;
+-
+-		if (gma_power_begin(dev, false)) {
+-			REG_WRITE(control, temp);
+-			REG_WRITE(base, 0);
+-			gma_power_end(dev);
+-		}
+-
+-		/* Unpin the old GEM object */
+-		if (psb_intel_crtc->cursor_obj) {
+-			gt = container_of(psb_intel_crtc->cursor_obj,
+-							struct gtt_range, gem);
+-			psb_gtt_unpin(gt);
+-			drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+-			psb_intel_crtc->cursor_obj = NULL;
+-		}
+-
+-		return 0;
+-	}
+-
+-	/* Currently we only support 64x64 cursors */
+-	if (width != 64 || height != 64) {
+-		dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
+-		return -EINVAL;
+-	}
+-
+-	obj = drm_gem_object_lookup(dev, file_priv, handle);
+-	if (!obj)
+-		return -ENOENT;
+-
+-	if (obj->size < width * height * 4) {
+-		dev_dbg(dev->dev, "buffer is to small\n");
+-		return -ENOMEM;
+-	}
+-
+-	gt = container_of(obj, struct gtt_range, gem);
+-
+-	/* Pin the memory into the GTT */
+-	ret = psb_gtt_pin(gt);
+-	if (ret) {
+-		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+-		return ret;
+-	}
+-
+-
+-	addr = gt->offset;	/* Or resource.start ??? */
+-
+-	psb_intel_crtc->cursor_addr = addr;
+-
+-	temp = 0;
+-	/* set the pipe for the cursor */
+-	temp |= (pipe << 28);
+-	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+-
+-	if (gma_power_begin(dev, false)) {
+-		REG_WRITE(control, temp);
+-		REG_WRITE(base, addr);
+-		gma_power_end(dev);
+-	}
+-
+-	/* unpin the old bo */
+-	if (psb_intel_crtc->cursor_obj) {
+-		gt = container_of(psb_intel_crtc->cursor_obj,
+-							struct gtt_range, gem);
+-		psb_gtt_unpin(gt);
+-		drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+-		psb_intel_crtc->cursor_obj = obj;
+-	}
+-	return 0;
+-}
+-
+-static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	uint32_t temp = 0;
+-	uint32_t addr;
+-
+-
+-	if (x < 0) {
+-		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+-		x = -x;
+-	}
+-	if (y < 0) {
+-		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+-		y = -y;
+-	}
+-
+-	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+-	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+-
+-	addr = psb_intel_crtc->cursor_addr;
+-
+-	if (gma_power_begin(dev, false)) {
+-		REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+-		REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
+-		gma_power_end(dev);
+-	}
+-	return 0;
+-}
+-
+-void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+-			 u16 *green, u16 *blue, uint32_t type, uint32_t size)
+-{
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int i;
+-
+-	if (size != 256)
+-		return;
+-
+-	for (i = 0; i < 256; i++) {
+-		psb_intel_crtc->lut_r[i] = red[i] >> 8;
+-		psb_intel_crtc->lut_g[i] = green[i] >> 8;
+-		psb_intel_crtc->lut_b[i] = blue[i] >> 8;
+-	}
+-
+-	psb_intel_crtc_load_lut(crtc);
+-}
+-
+-static int psb_crtc_set_config(struct drm_mode_set *set)
+-{
+-	int ret;
+-	struct drm_device *dev = set->crtc->dev;
+-
+-	pm_runtime_forbid(&dev->pdev->dev);
+-	ret = drm_crtc_helper_set_config(set);
+-	pm_runtime_allow(&dev->pdev->dev);
+-	return ret;
+-}
+-
+-/* Returns the clock of the currently programmed mode of the given pipe. */
+-static int psb_intel_crtc_clock_get(struct drm_device *dev,
+-				struct drm_crtc *crtc)
+-{
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	u32 dpll;
+-	u32 fp;
+-	struct psb_intel_clock_t clock;
+-	bool is_lvds;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (gma_power_begin(dev, false)) {
+-		dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
+-		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+-			fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
+-		else
+-			fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
+-		is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
+-		gma_power_end(dev);
+-	} else {
+-		dpll = (pipe == 0) ?
+-			dev_priv->saveDPLL_A : dev_priv->saveDPLL_B;
+-
+-		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+-			fp = (pipe == 0) ?
+-				dev_priv->saveFPA0 :
+-				dev_priv->saveFPB0;
+-		else
+-			fp = (pipe == 0) ?
+-				dev_priv->saveFPA1 :
+-				dev_priv->saveFPB1;
+-
+-		is_lvds = (pipe == 1) && (dev_priv->saveLVDS & LVDS_PORT_EN);
+-	}
+-
+-	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+-	clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+-	clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+-
+-	if (is_lvds) {
+-		clock.p1 =
+-		    ffs((dpll &
+-			 DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+-			DPLL_FPA01_P1_POST_DIV_SHIFT);
+-		clock.p2 = 14;
+-
+-		if ((dpll & PLL_REF_INPUT_MASK) ==
+-		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+-			/* XXX: might not be 66MHz */
+-			i8xx_clock(66000, &clock);
+-		} else
+-			i8xx_clock(48000, &clock);
+-	} else {
+-		if (dpll & PLL_P1_DIVIDE_BY_TWO)
+-			clock.p1 = 2;
+-		else {
+-			clock.p1 =
+-			    ((dpll &
+-			      DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+-			     DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+-		}
+-		if (dpll & PLL_P2_DIVIDE_BY_4)
+-			clock.p2 = 4;
+-		else
+-			clock.p2 = 2;
+-
+-		i8xx_clock(48000, &clock);
+-	}
+-
+-	/* XXX: It would be nice to validate the clocks, but we can't reuse
+-	 * i830PllIsValid() because it relies on the xf86_config connector
+-	 * configuration being accurate, which it isn't necessarily.
+-	 */
+-
+-	return clock.dot;
+-}
+-
+-/** Returns the currently programmed mode of the given pipe. */
+-struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+-					     struct drm_crtc *crtc)
+-{
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	int pipe = psb_intel_crtc->pipe;
+-	struct drm_display_mode *mode;
+-	int htot;
+-	int hsync;
+-	int vtot;
+-	int vsync;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	if (gma_power_begin(dev, false)) {
+-		htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
+-		hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
+-		vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
+-		vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+-		gma_power_end(dev);
+-	} else {
+-		htot = (pipe == 0) ?
+-			dev_priv->saveHTOTAL_A : dev_priv->saveHTOTAL_B;
+-		hsync = (pipe == 0) ?
+-			dev_priv->saveHSYNC_A : dev_priv->saveHSYNC_B;
+-		vtot = (pipe == 0) ?
+-			dev_priv->saveVTOTAL_A : dev_priv->saveVTOTAL_B;
+-		vsync = (pipe == 0) ?
+-			dev_priv->saveVSYNC_A : dev_priv->saveVSYNC_B;
+-	}
+-
+-	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+-	if (!mode)
+-		return NULL;
+-
+-	mode->clock = psb_intel_crtc_clock_get(dev, crtc);
+-	mode->hdisplay = (htot & 0xffff) + 1;
+-	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
+-	mode->hsync_start = (hsync & 0xffff) + 1;
+-	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
+-	mode->vdisplay = (vtot & 0xffff) + 1;
+-	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
+-	mode->vsync_start = (vsync & 0xffff) + 1;
+-	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
+-
+-	drm_mode_set_name(mode);
+-	drm_mode_set_crtcinfo(mode, 0);
+-
+-	return mode;
+-}
+-
+-void psb_intel_crtc_destroy(struct drm_crtc *crtc)
+-{
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct gtt_range *gt;
+-
+-	/* Unpin the old GEM object */
+-	if (psb_intel_crtc->cursor_obj) {
+-		gt = container_of(psb_intel_crtc->cursor_obj,
+-						struct gtt_range, gem);
+-		psb_gtt_unpin(gt);
+-		drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
+-		psb_intel_crtc->cursor_obj = NULL;
+-	}
+-	kfree(psb_intel_crtc->crtc_state);
+-	drm_crtc_cleanup(crtc);
+-	kfree(psb_intel_crtc);
+-}
+-
+-const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
+-	.dpms = psb_intel_crtc_dpms,
+-	.mode_fixup = psb_intel_crtc_mode_fixup,
+-	.mode_set = psb_intel_crtc_mode_set,
+-	.mode_set_base = psb_intel_pipe_set_base,
+-	.prepare = psb_intel_crtc_prepare,
+-	.commit = psb_intel_crtc_commit,
+-};
+-
+-const struct drm_crtc_funcs psb_intel_crtc_funcs = {
+-	.save = psb_intel_crtc_save,
+-	.restore = psb_intel_crtc_restore,
+-	.cursor_set = psb_intel_crtc_cursor_set,
+-	.cursor_move = psb_intel_crtc_cursor_move,
+-	.gamma_set = psb_intel_crtc_gamma_set,
+-	.set_config = psb_crtc_set_config,
+-	.destroy = psb_intel_crtc_destroy,
+-};
+-
+-/*
+- * Set the default value of cursor control and base register
+- * to zero. This is a workaround for h/w defect on Oaktrail
+- */
+-static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
+-{
+-	u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
+-	u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
+-
+-	REG_WRITE(control[pipe], 0);
+-	REG_WRITE(base[pipe], 0);
+-}
+-
+-void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+-		     struct psb_intel_mode_device *mode_dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct psb_intel_crtc *psb_intel_crtc;
+-	int i;
+-	uint16_t *r_base, *g_base, *b_base;
+-
+-	/* We allocate a extra array of drm_connector pointers
+-	 * for fbdev after the crtc */
+-	psb_intel_crtc =
+-	    kzalloc(sizeof(struct psb_intel_crtc) +
+-		    (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+-		    GFP_KERNEL);
+-	if (psb_intel_crtc == NULL)
+-		return;
+-
+-	psb_intel_crtc->crtc_state =
+-		kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
+-	if (!psb_intel_crtc->crtc_state) {
+-		dev_err(dev->dev, "Crtc state error: No memory\n");
+-		kfree(psb_intel_crtc);
+-		return;
+-	}
+-
+-	/* Set the CRTC operations from the chip specific data */
+-	drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
+-
+-	drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
+-	psb_intel_crtc->pipe = pipe;
+-	psb_intel_crtc->plane = pipe;
+-
+-	r_base = psb_intel_crtc->base.gamma_store;
+-	g_base = r_base + 256;
+-	b_base = g_base + 256;
+-	for (i = 0; i < 256; i++) {
+-		psb_intel_crtc->lut_r[i] = i;
+-		psb_intel_crtc->lut_g[i] = i;
+-		psb_intel_crtc->lut_b[i] = i;
+-		r_base[i] = i << 8;
+-		g_base[i] = i << 8;
+-		b_base[i] = i << 8;
+-
+-		psb_intel_crtc->lut_adj[i] = 0;
+-	}
+-
+-	psb_intel_crtc->mode_dev = mode_dev;
+-	psb_intel_crtc->cursor_addr = 0;
+-
+-	drm_crtc_helper_add(&psb_intel_crtc->base,
+-						dev_priv->ops->crtc_helper);
+-
+-	/* Setup the array of drm_connector pointer array */
+-	psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
+-	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
+-	       dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
+-	dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
+-							&psb_intel_crtc->base;
+-	dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
+-							&psb_intel_crtc->base;
+-	psb_intel_crtc->mode_set.connectors =
+-	    (struct drm_connector **) (psb_intel_crtc + 1);
+-	psb_intel_crtc->mode_set.num_connectors = 0;
+-	psb_intel_cursor_init(dev, pipe);
+-}
+-
+-int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+-				struct drm_file *file_priv)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
+-	struct drm_mode_object *drmmode_obj;
+-	struct psb_intel_crtc *crtc;
+-
+-	if (!dev_priv) {
+-		dev_err(dev->dev, "called with no initialization\n");
+-		return -EINVAL;
+-	}
+-
+-	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
+-			DRM_MODE_OBJECT_CRTC);
+-
+-	if (!drmmode_obj) {
+-		dev_err(dev->dev, "no such CRTC id\n");
+-		return -EINVAL;
+-	}
+-
+-	crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
+-	pipe_from_crtc_id->pipe = crtc->pipe;
+-
+-	return 0;
+-}
+-
+-struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
+-{
+-	struct drm_crtc *crtc = NULL;
+-
+-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+-		struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-		if (psb_intel_crtc->pipe == pipe)
+-			break;
+-	}
+-	return crtc;
+-}
+-
+-int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
+-{
+-	int index_mask = 0;
+-	struct drm_connector *connector;
+-	int entry = 0;
+-
+-	list_for_each_entry(connector, &dev->mode_config.connector_list,
+-			    head) {
+-		struct psb_intel_output *psb_intel_output =
+-		    to_psb_intel_output(connector);
+-		if (type_mask & (1 << psb_intel_output->type))
+-			index_mask |= (1 << entry);
+-		entry++;
+-	}
+-	return index_mask;
+-}
+-
+-
+-void psb_intel_modeset_cleanup(struct drm_device *dev)
+-{
+-	drm_mode_config_cleanup(dev);
+-}
+-
+-
+-/* current intel driver doesn't take advantage of encoders
+-   always give back the encoder for the connector
+-*/
+-struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-
+-	return &psb_intel_output->enc;
+-}
+-
+diff --git a/drivers/staging/gma500/psb_intel_display.h b/drivers/staging/gma500/psb_intel_display.h
+deleted file mode 100644
+index 535b49a..0000000
+--- a/drivers/staging/gma500/psb_intel_display.h
++++ /dev/null
+@@ -1,28 +0,0 @@
+-/* copyright (c) 2008, Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- * Eric Anholt <eric at anholt.net>
+- */
+-
+-#ifndef _INTEL_DISPLAY_H_
+-#define _INTEL_DISPLAY_H_
+-
+-bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+-void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
+-			 u16 *green, u16 *blue, uint32_t type, uint32_t size);
+-void psb_intel_crtc_destroy(struct drm_crtc *crtc);
+-
+-#endif
+diff --git a/drivers/staging/gma500/psb_intel_drv.h b/drivers/staging/gma500/psb_intel_drv.h
+deleted file mode 100644
+index 36b554b..0000000
+--- a/drivers/staging/gma500/psb_intel_drv.h
++++ /dev/null
+@@ -1,230 +0,0 @@
+-/*
+- * Copyright (c) 2009-2011, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- */
+-
+-#ifndef __INTEL_DRV_H__
+-#define __INTEL_DRV_H__
+-
+-#include <linux/i2c.h>
+-#include <linux/i2c-algo-bit.h>
+-#include <drm/drm_crtc.h>
+-#include <drm/drm_crtc_helper.h>
+-#include <linux/gpio.h>
+-
+-/*
+- * Display related stuff
+- */
+-
+-/* store information about an Ixxx DVO */
+-/* The i830->i865 use multiple DVOs with multiple i2cs */
+-/* the i915, i945 have a single sDVO i2c bus - which is different */
+-#define MAX_OUTPUTS 6
+-/* maximum connectors per crtcs in the mode set */
+-#define INTELFB_CONN_LIMIT 4
+-
+-#define INTEL_I2C_BUS_DVO 1
+-#define INTEL_I2C_BUS_SDVO 2
+-
+-/* these are outputs from the chip - integrated only
+- * external chips are via DVO or SDVO output */
+-#define INTEL_OUTPUT_UNUSED 0
+-#define INTEL_OUTPUT_ANALOG 1
+-#define INTEL_OUTPUT_DVO 2
+-#define INTEL_OUTPUT_SDVO 3
+-#define INTEL_OUTPUT_LVDS 4
+-#define INTEL_OUTPUT_TVOUT 5
+-#define INTEL_OUTPUT_HDMI 6
+-#define INTEL_OUTPUT_MIPI 7
+-#define INTEL_OUTPUT_MIPI2 8
+-
+-#define INTEL_DVO_CHIP_NONE 0
+-#define INTEL_DVO_CHIP_LVDS 1
+-#define INTEL_DVO_CHIP_TMDS 2
+-#define INTEL_DVO_CHIP_TVOUT 4
+-
+-/*
+- * Hold information useally put on the device driver privates here,
+- * since it needs to be shared across multiple of devices drivers privates.
+- */
+-struct psb_intel_mode_device {
+-
+-	/*
+-	 * Abstracted memory manager operations
+-	 */
+-	 size_t(*bo_offset) (struct drm_device *dev, void *bo);
+-
+-	/*
+-	 * Cursor (Can go ?)
+-	 */
+-	int cursor_needs_physical;
+-
+-	/*
+-	 * LVDS info
+-	 */
+-	int backlight_duty_cycle;	/* restore backlight to this value */
+-	bool panel_wants_dither;
+-	struct drm_display_mode *panel_fixed_mode;
+-	struct drm_display_mode *panel_fixed_mode2;
+-	struct drm_display_mode *vbt_mode;	/* if any */
+-
+-	uint32_t saveBLC_PWM_CTL;
+-};
+-
+-struct psb_intel_i2c_chan {
+-	/* for getting at dev. private (mmio etc.) */
+-	struct drm_device *drm_dev;
+-	u32 reg;		/* GPIO reg */
+-	struct i2c_adapter adapter;
+-	struct i2c_algo_bit_data algo;
+-	u8 slave_addr;
+-};
+-
+-struct psb_intel_output {
+-	struct drm_connector base;
+-
+-	struct drm_encoder enc;
+-	int type;
+-
+-	struct psb_intel_i2c_chan *i2c_bus;	/* for control functions */
+-	struct psb_intel_i2c_chan *ddc_bus;	/* for DDC only stuff */
+-	bool load_detect_temp;
+-	void *dev_priv;
+-
+-	struct psb_intel_mode_device *mode_dev;
+-	struct i2c_adapter *hdmi_i2c_adapter;	/* for control functions */
+-};
+-
+-struct psb_intel_crtc_state {
+-	uint32_t saveDSPCNTR;
+-	uint32_t savePIPECONF;
+-	uint32_t savePIPESRC;
+-	uint32_t saveDPLL;
+-	uint32_t saveFP0;
+-	uint32_t saveFP1;
+-	uint32_t saveHTOTAL;
+-	uint32_t saveHBLANK;
+-	uint32_t saveHSYNC;
+-	uint32_t saveVTOTAL;
+-	uint32_t saveVBLANK;
+-	uint32_t saveVSYNC;
+-	uint32_t saveDSPSTRIDE;
+-	uint32_t saveDSPSIZE;
+-	uint32_t saveDSPPOS;
+-	uint32_t saveDSPBASE;
+-	uint32_t savePalette[256];
+-};
+-
+-struct psb_intel_crtc {
+-	struct drm_crtc base;
+-	int pipe;
+-	int plane;
+-	uint32_t cursor_addr;
+-	u8 lut_r[256], lut_g[256], lut_b[256];
+-	u8 lut_adj[256];
+-	struct psb_intel_framebuffer *fbdev_fb;
+-	/* a mode_set for fbdev users on this crtc */
+-	struct drm_mode_set mode_set;
+-
+-	/* GEM object that holds our cursor */
+-	struct drm_gem_object *cursor_obj;
+-
+-	struct drm_display_mode saved_mode;
+-	struct drm_display_mode saved_adjusted_mode;
+-
+-	struct psb_intel_mode_device *mode_dev;
+-
+-	/*crtc mode setting flags*/
+-	u32 mode_flags;
+-
+-	/* Saved Crtc HW states */
+-	struct psb_intel_crtc_state *crtc_state;
+-};
+-
+-#define to_psb_intel_crtc(x)	\
+-		container_of(x, struct psb_intel_crtc, base)
+-#define to_psb_intel_output(x)	\
+-		container_of(x, struct psb_intel_output, base)
+-#define enc_to_psb_intel_output(x)	\
+-		container_of(x, struct psb_intel_output, enc)
+-#define to_psb_intel_framebuffer(x)	\
+-		container_of(x, struct psb_intel_framebuffer, base)
+-
+-struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
+-					const u32 reg, const char *name);
+-void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
+-int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
+-extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
+-
+-extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
+-			    struct psb_intel_mode_device *mode_dev);
+-extern void psb_intel_crt_init(struct drm_device *dev);
+-extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
+-extern void psb_intel_dvo_init(struct drm_device *dev);
+-extern void psb_intel_tv_init(struct drm_device *dev);
+-extern void psb_intel_lvds_init(struct drm_device *dev,
+-			    struct psb_intel_mode_device *mode_dev);
+-extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
+-extern void mrst_lvds_init(struct drm_device *dev,
+-			   struct psb_intel_mode_device *mode_dev);
+-extern void mrst_wait_for_INTR_PKT_SENT(struct drm_device *dev);
+-extern void mrst_dsi_init(struct drm_device *dev,
+-			   struct psb_intel_mode_device *mode_dev);
+-extern void mid_dsi_init(struct drm_device *dev,
+-		    struct psb_intel_mode_device *mode_dev, int dsi_num);
+-
+-extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
+-extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
+-extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
+-
+-extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
+-					      *connector);
+-
+-extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
+-						    struct drm_crtc *crtc);
+-extern void psb_intel_wait_for_vblank(struct drm_device *dev);
+-extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
+-				struct drm_file *file_priv);
+-extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
+-						 int pipe);
+-extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
+-					     int sdvoB);
+-extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
+-extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
+-				   int enable);
+-extern int intelfb_probe(struct drm_device *dev);
+-extern int intelfb_remove(struct drm_device *dev,
+-			  struct drm_framebuffer *fb);
+-extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
+-							*dev, struct
+-							drm_mode_fb_cmd
+-							*mode_cmd,
+-							void *mm_private);
+-extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+-				      struct drm_display_mode *mode,
+-				      struct drm_display_mode *adjusted_mode);
+-extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+-				     struct drm_display_mode *mode);
+-extern int psb_intel_lvds_set_property(struct drm_connector *connector,
+-					struct drm_property *property,
+-					uint64_t value);
+-extern void psb_intel_lvds_destroy(struct drm_connector *connector);
+-extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
+-
+-extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
+-extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
+-
+-#endif				/* __INTEL_DRV_H__ */
+diff --git a/drivers/staging/gma500/psb_intel_lvds.c b/drivers/staging/gma500/psb_intel_lvds.c
+deleted file mode 100644
+index 21022e1..0000000
+--- a/drivers/staging/gma500/psb_intel_lvds.c
++++ /dev/null
+@@ -1,854 +0,0 @@
+-/*
+- * Copyright © 2006-2007 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *	Eric Anholt <eric at anholt.net>
+- *	Dave Airlie <airlied at linux.ie>
+- *	Jesse Barnes <jesse.barnes at intel.com>
+- */
+-
+-#include <linux/i2c.h>
+-#include <drm/drmP.h>
+-
+-#include "intel_bios.h"
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "power.h"
+-#include <linux/pm_runtime.h>
+-
+-/*
+- * LVDS I2C backlight control macros
+- */
+-#define BRIGHTNESS_MAX_LEVEL 100
+-#define BRIGHTNESS_MASK 0xFF
+-#define BLC_I2C_TYPE	0x01
+-#define BLC_PWM_TYPT	0x02
+-
+-#define BLC_POLARITY_NORMAL 0
+-#define BLC_POLARITY_INVERSE 1
+-
+-#define PSB_BLC_MAX_PWM_REG_FREQ       (0xFFFE)
+-#define PSB_BLC_MIN_PWM_REG_FREQ	(0x2)
+-#define PSB_BLC_PWM_PRECISION_FACTOR	(10)
+-#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
+-#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
+-
+-struct psb_intel_lvds_priv {
+-	/*
+-	 * Saved LVDO output states
+-	 */
+-	uint32_t savePP_ON;
+-	uint32_t savePP_OFF;
+-	uint32_t saveLVDS;
+-	uint32_t savePP_CONTROL;
+-	uint32_t savePP_CYCLE;
+-	uint32_t savePFIT_CONTROL;
+-	uint32_t savePFIT_PGM_RATIOS;
+-	uint32_t saveBLC_PWM_CTL;
+-};
+-
+-
+-/*
+- * Returns the maximum level of the backlight duty cycle field.
+- */
+-static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 ret;
+-
+-	if (gma_power_begin(dev, false)) {
+-		ret = REG_READ(BLC_PWM_CTL);
+-		gma_power_end(dev);
+-	} else /* Powered off, use the saved value */
+-		ret = dev_priv->saveBLC_PWM_CTL;
+-
+-	/* Top 15bits hold the frequency mask */
+-	ret = (ret &  BACKLIGHT_MODULATION_FREQ_MASK) >>
+-					BACKLIGHT_MODULATION_FREQ_SHIFT;
+-
+-        ret *= 2;	/* Return a 16bit range as needed for setting */
+-        if (ret == 0)
+-                dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
+-                        REG_READ(BLC_PWM_CTL), dev_priv->saveBLC_PWM_CTL);
+-	return ret;
+-}
+-
+-/*
+- * Set LVDS backlight level by I2C command
+- *
+- * FIXME: at some point we need to both track this for PM and also
+- * disable runtime pm on MRST if the brightness is nil (ie blanked)
+- */
+-static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
+-					unsigned int level)
+-{
+-	struct drm_psb_private *dev_priv =
+-		(struct drm_psb_private *)dev->dev_private;
+-
+-	struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
+-	u8 out_buf[2];
+-	unsigned int blc_i2c_brightness;
+-
+-	struct i2c_msg msgs[] = {
+-		{
+-			.addr = lvds_i2c_bus->slave_addr,
+-			.flags = 0,
+-			.len = 2,
+-			.buf = out_buf,
+-		}
+-	};
+-
+-	blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
+-			     BRIGHTNESS_MASK /
+-			     BRIGHTNESS_MAX_LEVEL);
+-
+-	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+-		blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
+-
+-	out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
+-	out_buf[1] = (u8)blc_i2c_brightness;
+-
+-	if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
+-		dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
+-			dev_priv->lvds_bl->brightnesscmd,
+-			blc_i2c_brightness);
+-		return 0;
+-	}
+-
+-	dev_err(dev->dev, "I2C transfer error\n");
+-	return -1;
+-}
+-
+-
+-static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
+-{
+-	struct drm_psb_private *dev_priv =
+-			(struct drm_psb_private *)dev->dev_private;
+-
+-	u32 max_pwm_blc;
+-	u32 blc_pwm_duty_cycle;
+-
+-	max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
+-
+-	/*BLC_PWM_CTL Should be initiated while backlight device init*/
+-	BUG_ON(max_pwm_blc == 0);
+-
+-	blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
+-
+-	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
+-		blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
+-
+-	blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
+-	REG_WRITE(BLC_PWM_CTL,
+-		  (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+-		  (blc_pwm_duty_cycle));
+-
+-        dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
+-		  (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
+-		  (blc_pwm_duty_cycle));
+-
+-	return 0;
+-}
+-
+-/*
+- * Set LVDS backlight level either by I2C or PWM
+- */
+-void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-
+-	dev_dbg(dev->dev, "backlight level is %d\n", level);
+-
+-	if (!dev_priv->lvds_bl) {
+-		dev_err(dev->dev, "NO LVDS backlight info\n");
+-		return;
+-	}
+-
+-	if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
+-		psb_lvds_i2c_set_brightness(dev, level);
+-	else
+-		psb_lvds_pwm_set_brightness(dev, level);
+-}
+-
+-/*
+- * Sets the backlight level.
+- *
+- * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
+- */
+-static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 blc_pwm_ctl;
+-
+-	if (gma_power_begin(dev, false)) {
+-		blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
+-		blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+-		REG_WRITE(BLC_PWM_CTL,
+-				(blc_pwm_ctl |
+-				(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+-		dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+-					(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+-		gma_power_end(dev);
+-	} else {
+-		blc_pwm_ctl = dev_priv->saveBLC_PWM_CTL &
+-				~BACKLIGHT_DUTY_CYCLE_MASK;
+-		dev_priv->saveBLC_PWM_CTL = (blc_pwm_ctl |
+-					(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
+-	}
+-}
+-
+-/*
+- * Sets the power state for the panel.
+- */
+-static void psb_intel_lvds_set_power(struct drm_device *dev,
+-				 struct psb_intel_output *output, bool on)
+-{
+-	u32 pp_status;
+-
+-	if (!gma_power_begin(dev, true)) {
+-	        dev_err(dev->dev, "set power, chip off!\n");
+-		return;
+-        }
+-        
+-	if (on) {
+-		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+-			  POWER_TARGET_ON);
+-		do {
+-			pp_status = REG_READ(PP_STATUS);
+-		} while ((pp_status & PP_ON) == 0);
+-
+-		psb_intel_lvds_set_backlight(dev,
+-					 output->
+-					 mode_dev->backlight_duty_cycle);
+-	} else {
+-		psb_intel_lvds_set_backlight(dev, 0);
+-
+-		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+-			  ~POWER_TARGET_ON);
+-		do {
+-			pp_status = REG_READ(PP_STATUS);
+-		} while (pp_status & PP_ON);
+-	}
+-
+-	gma_power_end(dev);
+-}
+-
+-static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-
+-	if (mode == DRM_MODE_DPMS_ON)
+-		psb_intel_lvds_set_power(dev, output, true);
+-	else
+-		psb_intel_lvds_set_power(dev, output, false);
+-
+-	/* XXX: We never power down the LVDS pairs. */
+-}
+-
+-static void psb_intel_lvds_save(struct drm_connector *connector)
+-{
+-	struct drm_device *dev = connector->dev;
+-	struct drm_psb_private *dev_priv =
+-		(struct drm_psb_private *)dev->dev_private;
+-	struct psb_intel_output *psb_intel_output =
+-		to_psb_intel_output(connector);
+-	struct psb_intel_lvds_priv *lvds_priv =
+-		(struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
+-
+-	lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
+-	lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
+-	lvds_priv->saveLVDS = REG_READ(LVDS);
+-	lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
+-	lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
+-	/*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
+-	lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+-	lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
+-	lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
+-
+-	/*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
+-	dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
+-						BACKLIGHT_DUTY_CYCLE_MASK);
+-
+-	/*
+-	 * If the light is off at server startup,
+-	 * just make it full brightness
+-	 */
+-	if (dev_priv->backlight_duty_cycle == 0)
+-		dev_priv->backlight_duty_cycle =
+-		psb_intel_lvds_get_max_backlight(dev);
+-
+-	dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+-			lvds_priv->savePP_ON,
+-			lvds_priv->savePP_OFF,
+-			lvds_priv->saveLVDS,
+-			lvds_priv->savePP_CONTROL,
+-			lvds_priv->savePP_CYCLE,
+-			lvds_priv->saveBLC_PWM_CTL);
+-}
+-
+-static void psb_intel_lvds_restore(struct drm_connector *connector)
+-{
+-	struct drm_device *dev = connector->dev;
+-	u32 pp_status;
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-	struct psb_intel_lvds_priv *lvds_priv =
+-		(struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
+-
+-	dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
+-			lvds_priv->savePP_ON,
+-			lvds_priv->savePP_OFF,
+-			lvds_priv->saveLVDS,
+-			lvds_priv->savePP_CONTROL,
+-			lvds_priv->savePP_CYCLE,
+-			lvds_priv->saveBLC_PWM_CTL);
+-
+-	REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
+-	REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
+-	REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
+-	REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
+-	REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
+-	/*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
+-	REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
+-	REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
+-	REG_WRITE(LVDS, lvds_priv->saveLVDS);
+-
+-	if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
+-		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
+-			POWER_TARGET_ON);
+-		do {
+-			pp_status = REG_READ(PP_STATUS);
+-		} while ((pp_status & PP_ON) == 0);
+-	} else {
+-		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
+-			~POWER_TARGET_ON);
+-		do {
+-			pp_status = REG_READ(PP_STATUS);
+-		} while (pp_status & PP_ON);
+-	}
+-}
+-
+-int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+-				 struct drm_display_mode *mode)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-				to_psb_intel_output(connector);
+-	struct drm_display_mode *fixed_mode =
+-	    psb_intel_output->mode_dev->panel_fixed_mode;
+-
+-	if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
+-		fixed_mode = psb_intel_output->mode_dev->panel_fixed_mode2;
+-
+-	/* just in case */
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+-		return MODE_NO_DBLESCAN;
+-
+-	/* just in case */
+-	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+-		return MODE_NO_INTERLACE;
+-
+-	if (fixed_mode) {
+-		if (mode->hdisplay > fixed_mode->hdisplay)
+-			return MODE_PANEL;
+-		if (mode->vdisplay > fixed_mode->vdisplay)
+-			return MODE_PANEL;
+-	}
+-	return MODE_OK;
+-}
+-
+-bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
+-				  struct drm_display_mode *mode,
+-				  struct drm_display_mode *adjusted_mode)
+-{
+-	struct psb_intel_mode_device *mode_dev =
+-	    enc_to_psb_intel_output(encoder)->mode_dev;
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_crtc *psb_intel_crtc =
+-				to_psb_intel_crtc(encoder->crtc);
+-	struct drm_encoder *tmp_encoder;
+-	struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
+-	struct psb_intel_output *psb_intel_output =
+-					enc_to_psb_intel_output(encoder);
+-
+-	if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
+-		panel_fixed_mode = mode_dev->panel_fixed_mode2;
+-
+-	/* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
+-	if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
+-		printk(KERN_ERR "Can't support LVDS on pipe A\n");
+-		return false;
+-	}
+-	if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
+-		printk(KERN_ERR "Must use PIPE A\n");
+-		return false;
+-	}
+-	/* Should never happen!! */
+-	list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
+-			    head) {
+-		if (tmp_encoder != encoder
+-		    && tmp_encoder->crtc == encoder->crtc) {
+-			printk(KERN_ERR "Can't enable LVDS and another "
+-			       "encoder on the same pipe\n");
+-			return false;
+-		}
+-	}
+-
+-	/*
+-	 * If we have timings from the BIOS for the panel, put them in
+-	 * to the adjusted mode.  The CRTC will be set up for this mode,
+-	 * with the panel scaling set up to source from the H/VDisplay
+-	 * of the original mode.
+-	 */
+-	if (panel_fixed_mode != NULL) {
+-		adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
+-		adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
+-		adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
+-		adjusted_mode->htotal = panel_fixed_mode->htotal;
+-		adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
+-		adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
+-		adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
+-		adjusted_mode->vtotal = panel_fixed_mode->vtotal;
+-		adjusted_mode->clock = panel_fixed_mode->clock;
+-		drm_mode_set_crtcinfo(adjusted_mode,
+-				      CRTC_INTERLACE_HALVE_V);
+-	}
+-
+-	/*
+-	 * XXX: It would be nice to support lower refresh rates on the
+-	 * panels to reduce power consumption, and perhaps match the
+-	 * user's requested refresh rate.
+-	 */
+-
+-	return true;
+-}
+-
+-static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-	struct psb_intel_mode_device *mode_dev = output->mode_dev;
+-
+-	if (!gma_power_begin(dev, true))
+-		return;
+-
+-	mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
+-	mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
+-					  BACKLIGHT_DUTY_CYCLE_MASK);
+-
+-	psb_intel_lvds_set_power(dev, output, false);
+-
+-	gma_power_end(dev);
+-}
+-
+-static void psb_intel_lvds_commit(struct drm_encoder *encoder)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+-	struct psb_intel_mode_device *mode_dev = output->mode_dev;
+-
+-	if (mode_dev->backlight_duty_cycle == 0)
+-		mode_dev->backlight_duty_cycle =
+-		    psb_intel_lvds_get_max_backlight(dev);
+-
+-	psb_intel_lvds_set_power(dev, output, true);
+-}
+-
+-static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
+-				struct drm_display_mode *mode,
+-				struct drm_display_mode *adjusted_mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 pfit_control;
+-
+-	/*
+-	 * The LVDS pin pair will already have been turned on in the
+-	 * psb_intel_crtc_mode_set since it has a large impact on the DPLL
+-	 * settings.
+-	 */
+-
+-	/*
+-	 * Enable automatic panel scaling so that non-native modes fill the
+-	 * screen.  Should be enabled before the pipe is enabled, according to
+-	 * register description and PRM.
+-	 */
+-	if (mode->hdisplay != adjusted_mode->hdisplay ||
+-	    mode->vdisplay != adjusted_mode->vdisplay)
+-		pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
+-				HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
+-				HORIZ_INTERP_BILINEAR);
+-	else
+-		pfit_control = 0;
+-
+-	if (dev_priv->lvds_dither)
+-		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+-
+-	REG_WRITE(PFIT_CONTROL, pfit_control);
+-}
+-
+-/*
+- * Detect the LVDS connection.
+- *
+- * This always returns CONNECTOR_STATUS_CONNECTED.
+- * This connector should only have
+- * been set up if the LVDS was actually connected anyway.
+- */
+-static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
+-						   *connector, bool force)
+-{
+-	return connector_status_connected;
+-}
+-
+-/*
+- * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
+- */
+-static int psb_intel_lvds_get_modes(struct drm_connector *connector)
+-{
+-	struct drm_device *dev = connector->dev;
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-	struct psb_intel_mode_device *mode_dev =
+-					psb_intel_output->mode_dev;
+-	int ret = 0;
+-
+-	if (!IS_MRST(dev))
+-		ret = psb_intel_ddc_get_modes(psb_intel_output);
+-
+-	if (ret)
+-		return ret;
+-
+-	/* Didn't get an EDID, so
+-	 * Set wide sync ranges so we get all modes
+-	 * handed to valid_mode for checking
+-	 */
+-	connector->display_info.min_vfreq = 0;
+-	connector->display_info.max_vfreq = 200;
+-	connector->display_info.min_hfreq = 0;
+-	connector->display_info.max_hfreq = 200;
+-
+-	if (mode_dev->panel_fixed_mode != NULL) {
+-		struct drm_display_mode *mode =
+-		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
+-		drm_mode_probed_add(connector, mode);
+-		return 1;
+-	}
+-
+-	return 0;
+-}
+-
+-/**
+- * psb_intel_lvds_destroy - unregister and free LVDS structures
+- * @connector: connector to free
+- *
+- * Unregister the DDC bus for this connector then free the driver private
+- * structure.
+- */
+-void psb_intel_lvds_destroy(struct drm_connector *connector)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-
+-	if (psb_intel_output->ddc_bus)
+-		psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+-	drm_sysfs_connector_remove(connector);
+-	drm_connector_cleanup(connector);
+-	kfree(connector);
+-}
+-
+-int psb_intel_lvds_set_property(struct drm_connector *connector,
+-				       struct drm_property *property,
+-				       uint64_t value)
+-{
+-	struct drm_encoder *encoder = connector->encoder;
+-
+-	if (!encoder)
+-		return -1;
+-
+-	if (!strcmp(property->name, "scaling mode")) {
+-		struct psb_intel_crtc *crtc =
+-					to_psb_intel_crtc(encoder->crtc);
+-		uint64_t curval;
+-
+-		if (!crtc)
+-			goto set_prop_error;
+-
+-		switch (value) {
+-		case DRM_MODE_SCALE_FULLSCREEN:
+-			break;
+-		case DRM_MODE_SCALE_NO_SCALE:
+-			break;
+-		case DRM_MODE_SCALE_ASPECT:
+-			break;
+-		default:
+-			goto set_prop_error;
+-		}
+-
+-		if (drm_connector_property_get_value(connector,
+-						     property,
+-						     &curval))
+-			goto set_prop_error;
+-
+-		if (curval == value)
+-			goto set_prop_done;
+-
+-		if (drm_connector_property_set_value(connector,
+-							property,
+-							value))
+-			goto set_prop_error;
+-
+-		if (crtc->saved_mode.hdisplay != 0 &&
+-		    crtc->saved_mode.vdisplay != 0) {
+-			if (!drm_crtc_helper_set_mode(encoder->crtc,
+-						      &crtc->saved_mode,
+-						      encoder->crtc->x,
+-						      encoder->crtc->y,
+-						      encoder->crtc->fb))
+-				goto set_prop_error;
+-		}
+-	} else if (!strcmp(property->name, "backlight")) {
+-		if (drm_connector_property_set_value(connector,
+-							property,
+-							value))
+-			goto set_prop_error;
+-		else {
+-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+-			struct drm_psb_private *devp =
+-						encoder->dev->dev_private;
+-			struct backlight_device *bd = devp->backlight_device;
+-			if (bd) {
+-				bd->props.brightness = value;
+-				backlight_update_status(bd);
+-			}
+-#endif
+-		}
+-	} else if (!strcmp(property->name, "DPMS")) {
+-		struct drm_encoder_helper_funcs *hfuncs
+-						= encoder->helper_private;
+-		hfuncs->dpms(encoder, value);
+-	}
+-
+-set_prop_done:
+-	return 0;
+-set_prop_error:
+-	return -1;
+-}
+-
+-static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
+-	.dpms = psb_intel_lvds_encoder_dpms,
+-	.mode_fixup = psb_intel_lvds_mode_fixup,
+-	.prepare = psb_intel_lvds_prepare,
+-	.mode_set = psb_intel_lvds_mode_set,
+-	.commit = psb_intel_lvds_commit,
+-};
+-
+-const struct drm_connector_helper_funcs
+-				psb_intel_lvds_connector_helper_funcs = {
+-	.get_modes = psb_intel_lvds_get_modes,
+-	.mode_valid = psb_intel_lvds_mode_valid,
+-	.best_encoder = psb_intel_best_encoder,
+-};
+-
+-const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
+-	.dpms = drm_helper_connector_dpms,
+-	.save = psb_intel_lvds_save,
+-	.restore = psb_intel_lvds_restore,
+-	.detect = psb_intel_lvds_detect,
+-	.fill_modes = drm_helper_probe_single_connector_modes,
+-	.set_property = psb_intel_lvds_set_property,
+-	.destroy = psb_intel_lvds_destroy,
+-};
+-
+-
+-static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
+-{
+-	drm_encoder_cleanup(encoder);
+-}
+-
+-const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
+-	.destroy = psb_intel_lvds_enc_destroy,
+-};
+-
+-
+-
+-/**
+- * psb_intel_lvds_init - setup LVDS connectors on this device
+- * @dev: drm device
+- *
+- * Create the connector, register the LVDS DDC bus, and try to figure out what
+- * modes we can display on the LVDS panel (if present).
+- */
+-void psb_intel_lvds_init(struct drm_device *dev,
+-		     struct psb_intel_mode_device *mode_dev)
+-{
+-	struct psb_intel_output *psb_intel_output;
+-	struct psb_intel_lvds_priv *lvds_priv;
+-	struct drm_connector *connector;
+-	struct drm_encoder *encoder;
+-	struct drm_display_mode *scan;	/* *modes, *bios_mode; */
+-	struct drm_crtc *crtc;
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	u32 lvds;
+-	int pipe;
+-
+-	psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
+-	if (!psb_intel_output)
+-		return;
+-
+-	lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
+-	if (!lvds_priv) {
+-		kfree(psb_intel_output);
+-		dev_err(dev->dev, "LVDS private allocation error\n");
+-		return;
+-	}
+-
+-	psb_intel_output->dev_priv = lvds_priv;
+-	psb_intel_output->mode_dev = mode_dev;
+-
+-	connector = &psb_intel_output->base;
+-	encoder = &psb_intel_output->enc;
+-	drm_connector_init(dev, &psb_intel_output->base,
+-			   &psb_intel_lvds_connector_funcs,
+-			   DRM_MODE_CONNECTOR_LVDS);
+-
+-	drm_encoder_init(dev, &psb_intel_output->enc,
+-			 &psb_intel_lvds_enc_funcs,
+-			 DRM_MODE_ENCODER_LVDS);
+-
+-	drm_mode_connector_attach_encoder(&psb_intel_output->base,
+-					  &psb_intel_output->enc);
+-	psb_intel_output->type = INTEL_OUTPUT_LVDS;
+-
+-	drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
+-	drm_connector_helper_add(connector,
+-				 &psb_intel_lvds_connector_helper_funcs);
+-	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+-	connector->interlace_allowed = false;
+-	connector->doublescan_allowed = false;
+-
+-	/*Attach connector properties*/
+-	drm_connector_attach_property(connector,
+-				      dev->mode_config.scaling_mode_property,
+-				      DRM_MODE_SCALE_FULLSCREEN);
+-	drm_connector_attach_property(connector,
+-				      dev_priv->backlight_property,
+-				      BRIGHTNESS_MAX_LEVEL);
+-
+-	/*
+-	 * Set up I2C bus
+-	 * FIXME: distroy i2c_bus when exit
+-	 */
+-	psb_intel_output->i2c_bus = psb_intel_i2c_create(dev,
+-							 GPIOB,
+-							 "LVDSBLC_B");
+-	if (!psb_intel_output->i2c_bus) {
+-		dev_printk(KERN_ERR,
+-			&dev->pdev->dev, "I2C bus registration failed.\n");
+-		goto failed_blc_i2c;
+-	}
+-	psb_intel_output->i2c_bus->slave_addr = 0x2C;
+-	dev_priv->lvds_i2c_bus =  psb_intel_output->i2c_bus;
+-
+-	/*
+-	 * LVDS discovery:
+-	 * 1) check for EDID on DDC
+-	 * 2) check for VBT data
+-	 * 3) check to see if LVDS is already on
+-	 *    if none of the above, no panel
+-	 * 4) make sure lid is open
+-	 *    if closed, act like it's not there for now
+-	 */
+-
+-	/* Set up the DDC bus. */
+-	psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
+-							 GPIOC,
+-							 "LVDSDDC_C");
+-	if (!psb_intel_output->ddc_bus) {
+-		dev_printk(KERN_ERR, &dev->pdev->dev,
+-			   "DDC bus registration " "failed.\n");
+-		goto failed_ddc;
+-	}
+-
+-	/*
+-	 * Attempt to get the fixed panel mode from DDC.  Assume that the
+-	 * preferred mode is the right one.
+-	 */
+-	psb_intel_ddc_get_modes(psb_intel_output);
+-	list_for_each_entry(scan, &connector->probed_modes, head) {
+-		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+-			mode_dev->panel_fixed_mode =
+-			    drm_mode_duplicate(dev, scan);
+-			goto out;	/* FIXME: check for quirks */
+-		}
+-	}
+-
+-	/* Failed to get EDID, what about VBT? do we need this? */
+-	if (mode_dev->vbt_mode)
+-		mode_dev->panel_fixed_mode =
+-		    drm_mode_duplicate(dev, mode_dev->vbt_mode);
+-
+-	if (!mode_dev->panel_fixed_mode)
+-		if (dev_priv->lfp_lvds_vbt_mode)
+-			mode_dev->panel_fixed_mode =
+-				drm_mode_duplicate(dev,
+-					dev_priv->lfp_lvds_vbt_mode);
+-
+-	/*
+-	 * If we didn't get EDID, try checking if the panel is already turned
+-	 * on.	If so, assume that whatever is currently programmed is the
+-	 * correct mode.
+-	 */
+-	lvds = REG_READ(LVDS);
+-	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
+-	crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
+-
+-	if (crtc && (lvds & LVDS_PORT_EN)) {
+-		mode_dev->panel_fixed_mode =
+-		    psb_intel_crtc_mode_get(dev, crtc);
+-		if (mode_dev->panel_fixed_mode) {
+-			mode_dev->panel_fixed_mode->type |=
+-			    DRM_MODE_TYPE_PREFERRED;
+-			goto out;	/* FIXME: check for quirks */
+-		}
+-	}
+-
+-	/* If we still don't have a mode after all that, give up. */
+-	if (!mode_dev->panel_fixed_mode) {
+-		dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
+-		goto failed_find;
+-	}
+-
+-	/*
+-	 * Blacklist machines with BIOSes that list an LVDS panel without
+-	 * actually having one.
+-	 */
+-out:
+-	drm_sysfs_connector_add(connector);
+-	return;
+-
+-failed_find:
+-	if (psb_intel_output->ddc_bus)
+-		psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+-failed_ddc:
+-	if (psb_intel_output->i2c_bus)
+-		psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
+-failed_blc_i2c:
+-	drm_encoder_cleanup(encoder);
+-	drm_connector_cleanup(connector);
+-	kfree(connector);
+-}
+-
+diff --git a/drivers/staging/gma500/psb_intel_modes.c b/drivers/staging/gma500/psb_intel_modes.c
+deleted file mode 100644
+index bde1aff..0000000
+--- a/drivers/staging/gma500/psb_intel_modes.c
++++ /dev/null
+@@ -1,77 +0,0 @@
+-/*
+- * Copyright (c) 2007 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authers: Jesse Barnes <jesse.barnes at intel.com>
+- */
+-
+-#include <linux/i2c.h>
+-#include <linux/fb.h>
+-#include <drm/drmP.h>
+-#include "psb_intel_drv.h"
+-
+-/**
+- * psb_intel_ddc_probe
+- *
+- */
+-bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
+-{
+-	u8 out_buf[] = { 0x0, 0x0 };
+-	u8 buf[2];
+-	int ret;
+-	struct i2c_msg msgs[] = {
+-		{
+-		 .addr = 0x50,
+-		 .flags = 0,
+-		 .len = 1,
+-		 .buf = out_buf,
+-		 },
+-		{
+-		 .addr = 0x50,
+-		 .flags = I2C_M_RD,
+-		 .len = 1,
+-		 .buf = buf,
+-		 }
+-	};
+-
+-	ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
+-	if (ret == 2)
+-		return true;
+-
+-	return false;
+-}
+-
+-/**
+- * psb_intel_ddc_get_modes - get modelist from monitor
+- * @connector: DRM connector device to use
+- *
+- * Fetch the EDID information from @connector using the DDC bus.
+- */
+-int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
+-{
+-	struct edid *edid;
+-	int ret = 0;
+-
+-	edid =
+-	    drm_get_edid(&psb_intel_output->base,
+-			 &psb_intel_output->ddc_bus->adapter);
+-	if (edid) {
+-		drm_mode_connector_update_edid_property(&psb_intel_output->
+-							base, edid);
+-		ret = drm_add_edid_modes(&psb_intel_output->base, edid);
+-		kfree(edid);
+-	}
+-	return ret;
+-}
+diff --git a/drivers/staging/gma500/psb_intel_reg.h b/drivers/staging/gma500/psb_intel_reg.h
+deleted file mode 100644
+index 1ac16aa..0000000
+--- a/drivers/staging/gma500/psb_intel_reg.h
++++ /dev/null
+@@ -1,1235 +0,0 @@
+-/*
+- * Copyright (c) 2009, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- */
+-#ifndef __PSB_INTEL_REG_H__
+-#define __PSB_INTEL_REG_H__
+-
+-#define BLC_PWM_CTL		0x61254
+-#define BLC_PWM_CTL2		0x61250
+-#define BLC_PWM_CTL_C		0x62254
+-#define BLC_PWM_CTL2_C		0x62250
+-#define BACKLIGHT_MODULATION_FREQ_SHIFT		(17)
+-/*
+- * This is the most significant 15 bits of the number of backlight cycles in a
+- * complete cycle of the modulated backlight control.
+- *
+- * The actual value is this field multiplied by two.
+- */
+-#define BACKLIGHT_MODULATION_FREQ_MASK	(0x7fff << 17)
+-#define BLM_LEGACY_MODE			(1 << 16)
+-/*
+- * This is the number of cycles out of the backlight modulation cycle for which
+- * the backlight is on.
+- *
+- * This field must be no greater than the number of cycles in the complete
+- * backlight modulation cycle.
+- */
+-#define BACKLIGHT_DUTY_CYCLE_SHIFT	(0)
+-#define BACKLIGHT_DUTY_CYCLE_MASK	(0xffff)
+-
+-#define I915_GCFGC			0xf0
+-#define I915_LOW_FREQUENCY_ENABLE	(1 << 7)
+-#define I915_DISPLAY_CLOCK_190_200_MHZ	(0 << 4)
+-#define I915_DISPLAY_CLOCK_333_MHZ	(4 << 4)
+-#define I915_DISPLAY_CLOCK_MASK		(7 << 4)
+-
+-#define I855_HPLLCC			0xc0
+-#define I855_CLOCK_CONTROL_MASK		(3 << 0)
+-#define I855_CLOCK_133_200		(0 << 0)
+-#define I855_CLOCK_100_200		(1 << 0)
+-#define I855_CLOCK_100_133		(2 << 0)
+-#define I855_CLOCK_166_250		(3 << 0)
+-
+-/* I830 CRTC registers */
+-#define HTOTAL_A		0x60000
+-#define HBLANK_A		0x60004
+-#define HSYNC_A			0x60008
+-#define VTOTAL_A		0x6000c
+-#define VBLANK_A		0x60010
+-#define VSYNC_A			0x60014
+-#define PIPEASRC		0x6001c
+-#define BCLRPAT_A		0x60020
+-#define VSYNCSHIFT_A		0x60028
+-
+-#define HTOTAL_B		0x61000
+-#define HBLANK_B		0x61004
+-#define HSYNC_B			0x61008
+-#define VTOTAL_B		0x6100c
+-#define VBLANK_B		0x61010
+-#define VSYNC_B			0x61014
+-#define PIPEBSRC		0x6101c
+-#define BCLRPAT_B		0x61020
+-#define VSYNCSHIFT_B		0x61028
+-
+-#define HTOTAL_C		0x62000
+-#define HBLANK_C		0x62004
+-#define HSYNC_C			0x62008
+-#define VTOTAL_C		0x6200c
+-#define VBLANK_C		0x62010
+-#define VSYNC_C			0x62014
+-#define PIPECSRC		0x6201c
+-#define BCLRPAT_C		0x62020
+-#define VSYNCSHIFT_C		0x62028
+-
+-#define PP_STATUS		0x61200
+-# define PP_ON				(1 << 31)
+-/*
+- * Indicates that all dependencies of the panel are on:
+- *
+- * - PLL enabled
+- * - pipe enabled
+- * - LVDS/DVOB/DVOC on
+- */
+-#define PP_READY			(1 << 30)
+-#define PP_SEQUENCE_NONE		(0 << 28)
+-#define PP_SEQUENCE_ON			(1 << 28)
+-#define PP_SEQUENCE_OFF			(2 << 28)
+-#define PP_SEQUENCE_MASK		0x30000000
+-#define PP_CONTROL		0x61204
+-#define POWER_TARGET_ON			(1 << 0)
+-
+-#define LVDSPP_ON		0x61208
+-#define LVDSPP_OFF		0x6120c
+-#define PP_CYCLE		0x61210
+-
+-#define PFIT_CONTROL		0x61230
+-#define PFIT_ENABLE			(1 << 31)
+-#define PFIT_PIPE_MASK			(3 << 29)
+-#define PFIT_PIPE_SHIFT			29
+-#define PFIT_SCALING_MODE_PILLARBOX	(1 << 27)
+-#define PFIT_SCALING_MODE_LETTERBOX	(3 << 26)
+-#define VERT_INTERP_DISABLE		(0 << 10)
+-#define VERT_INTERP_BILINEAR		(1 << 10)
+-#define VERT_INTERP_MASK		(3 << 10)
+-#define VERT_AUTO_SCALE			(1 << 9)
+-#define HORIZ_INTERP_DISABLE		(0 << 6)
+-#define HORIZ_INTERP_BILINEAR		(1 << 6)
+-#define HORIZ_INTERP_MASK		(3 << 6)
+-#define HORIZ_AUTO_SCALE		(1 << 5)
+-#define PANEL_8TO6_DITHER_ENABLE	(1 << 3)
+-
+-#define PFIT_PGM_RATIOS		0x61234
+-#define PFIT_VERT_SCALE_MASK			0xfff00000
+-#define PFIT_HORIZ_SCALE_MASK			0x0000fff0
+-
+-#define PFIT_AUTO_RATIOS	0x61238
+-
+-#define DPLL_A			0x06014
+-#define DPLL_B			0x06018
+-#define DPLL_VCO_ENABLE			(1 << 31)
+-#define DPLL_DVO_HIGH_SPEED		(1 << 30)
+-#define DPLL_SYNCLOCK_ENABLE		(1 << 29)
+-#define DPLL_VGA_MODE_DIS		(1 << 28)
+-#define DPLLB_MODE_DAC_SERIAL		(1 << 26)	/* i915 */
+-#define DPLLB_MODE_LVDS			(2 << 26)	/* i915 */
+-#define DPLL_MODE_MASK			(3 << 26)
+-#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10	(0 << 24)	/* i915 */
+-#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5	(1 << 24)	/* i915 */
+-#define DPLLB_LVDS_P2_CLOCK_DIV_14	(0 << 24)	/* i915 */
+-#define DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24)	/* i915 */
+-#define DPLL_P2_CLOCK_DIV_MASK		0x03000000	/* i915 */
+-#define DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000	/* i915 */
+-#define DPLL_LOCK			(1 << 15)	/* CDV */
+-
+-/*
+- *  The i830 generation, in DAC/serial mode, defines p1 as two plus this
+- * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+- */
+-# define DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
+-/*
+- * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+- * this field (only one bit may be set).
+- */
+-#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
+-#define DPLL_FPA01_P1_POST_DIV_SHIFT	16
+-#define PLL_P2_DIVIDE_BY_4		(1 << 23)	/* i830, required
+-							 * in DVO non-gang */
+-# define PLL_P1_DIVIDE_BY_TWO		(1 << 21)	/* i830 */
+-#define PLL_REF_INPUT_DREFCLK		(0 << 13)
+-#define PLL_REF_INPUT_TVCLKINA		(1 << 13)	/* i830 */
+-#define PLL_REF_INPUT_TVCLKINBC		(2 << 13)	/* SDVO
+-								 * TVCLKIN */
+-#define PLLB_REF_INPUT_SPREADSPECTRUMIN	(3 << 13)
+-#define PLL_REF_INPUT_MASK		(3 << 13)
+-#define PLL_LOAD_PULSE_PHASE_SHIFT	9
+-/*
+- * Parallel to Serial Load Pulse phase selection.
+- * Selects the phase for the 10X DPLL clock for the PCIe
+- * digital display port. The range is 4 to 13; 10 or more
+- * is just a flip delay. The default is 6
+- */
+-#define PLL_LOAD_PULSE_PHASE_MASK	(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+-#define DISPLAY_RATE_SELECT_FPA1	(1 << 8)
+-
+-/*
+- * SDVO multiplier for 945G/GM. Not used on 965.
+- *
+- * DPLL_MD_UDI_MULTIPLIER_MASK
+- */
+-#define SDVO_MULTIPLIER_MASK		0x000000ff
+-#define SDVO_MULTIPLIER_SHIFT_HIRES	4
+-#define SDVO_MULTIPLIER_SHIFT_VGA	0
+-
+-/*
+- * PLL_MD
+- */
+-/* Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+-#define DPLL_A_MD		0x0601c
+-/* Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+-#define DPLL_B_MD		0x06020
+-/*
+- * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+- *
+- * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+- */
+-#define DPLL_MD_UDI_DIVIDER_MASK	0x3f000000
+-#define DPLL_MD_UDI_DIVIDER_SHIFT	24
+-/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+-#define DPLL_MD_VGA_UDI_DIVIDER_MASK	0x003f0000
+-#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT	16
+-/*
+- * SDVO/UDI pixel multiplier.
+- *
+- * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+- * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+- * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+- * dummy bytes in the datastream at an increased clock rate, with both sides of
+- * the link knowing how many bytes are fill.
+- *
+- * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+- * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+- * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+- * through an SDVO command.
+- *
+- * This register field has values of multiplication factor minus 1, with
+- * a maximum multiplier of 5 for SDVO.
+- */
+-#define DPLL_MD_UDI_MULTIPLIER_MASK	0x00003f00
+-#define DPLL_MD_UDI_MULTIPLIER_SHIFT	8
+-/*
+- * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+- * This best be set to the default value (3) or the CRT won't work. No,
+- * I don't entirely understand what this does...
+- */
+-#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
+-#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+-
+-#define DPLL_TEST		0x606c
+-#define DPLLB_TEST_SDVO_DIV_1		(0 << 22)
+-#define DPLLB_TEST_SDVO_DIV_2		(1 << 22)
+-#define DPLLB_TEST_SDVO_DIV_4		(2 << 22)
+-#define DPLLB_TEST_SDVO_DIV_MASK	(3 << 22)
+-#define DPLLB_TEST_N_BYPASS		(1 << 19)
+-#define DPLLB_TEST_M_BYPASS		(1 << 18)
+-#define DPLLB_INPUT_BUFFER_ENABLE	(1 << 16)
+-#define DPLLA_TEST_N_BYPASS		(1 << 3)
+-#define DPLLA_TEST_M_BYPASS		(1 << 2)
+-#define DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
+-
+-#define ADPA			0x61100
+-#define ADPA_DAC_ENABLE			(1 << 31)
+-#define ADPA_DAC_DISABLE		0
+-#define ADPA_PIPE_SELECT_MASK		(1 << 30)
+-#define ADPA_PIPE_A_SELECT		0
+-#define ADPA_PIPE_B_SELECT		(1 << 30)
+-#define ADPA_USE_VGA_HVPOLARITY		(1 << 15)
+-#define ADPA_SETS_HVPOLARITY		0
+-#define ADPA_VSYNC_CNTL_DISABLE		(1 << 11)
+-#define ADPA_VSYNC_CNTL_ENABLE		0
+-#define ADPA_HSYNC_CNTL_DISABLE		(1 << 10)
+-#define ADPA_HSYNC_CNTL_ENABLE		0
+-#define ADPA_VSYNC_ACTIVE_HIGH		(1 << 4)
+-#define ADPA_VSYNC_ACTIVE_LOW		0
+-#define ADPA_HSYNC_ACTIVE_HIGH		(1 << 3)
+-#define ADPA_HSYNC_ACTIVE_LOW		0
+-
+-#define FPA0			0x06040
+-#define FPA1			0x06044
+-#define FPB0			0x06048
+-#define FPB1			0x0604c
+-#define FP_N_DIV_MASK			0x003f0000
+-#define FP_N_DIV_SHIFT			16
+-#define FP_M1_DIV_MASK			0x00003f00
+-#define FP_M1_DIV_SHIFT			8
+-#define FP_M2_DIV_MASK			0x0000003f
+-#define FP_M2_DIV_SHIFT			0
+-
+-#define PORT_HOTPLUG_EN		0x61110
+-#define SDVOB_HOTPLUG_INT_EN		(1 << 26)
+-#define SDVOC_HOTPLUG_INT_EN		(1 << 25)
+-#define TV_HOTPLUG_INT_EN		(1 << 18)
+-#define CRT_HOTPLUG_INT_EN		(1 << 9)
+-#define CRT_HOTPLUG_FORCE_DETECT	(1 << 3)
+-/* CDV.. */
+-#define CRT_HOTPLUG_ACTIVATION_PERIOD_64	(1 << 8)
+-#define CRT_HOTPLUG_DAC_ON_TIME_2M		(0 << 7)
+-#define CRT_HOTPLUG_DAC_ON_TIME_4M		(1 << 7)
+-#define CRT_HOTPLUG_VOLTAGE_COMPARE_40		(0 << 5)
+-#define CRT_HOTPLUG_VOLTAGE_COMPARE_50		(1 << 5)
+-#define CRT_HOTPLUG_VOLTAGE_COMPARE_60		(2 << 5)
+-#define CRT_HOTPLUG_VOLTAGE_COMPARE_70		(3 << 5)
+-#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK	(3 << 5)
+-#define CRT_HOTPLUG_DETECT_DELAY_1G		(0 << 4)
+-#define CRT_HOTPLUG_DETECT_DELAY_2G		(1 << 4)
+-#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV	(0 << 2)
+-#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV	(1 << 2)
+-#define CRT_HOTPLUG_DETECT_MASK			0x000000F8
+-
+-#define PORT_HOTPLUG_STAT	0x61114
+-#define CRT_HOTPLUG_INT_STATUS		(1 << 11)
+-#define TV_HOTPLUG_INT_STATUS		(1 << 10)
+-#define CRT_HOTPLUG_MONITOR_MASK	(3 << 8)
+-#define CRT_HOTPLUG_MONITOR_COLOR	(3 << 8)
+-#define CRT_HOTPLUG_MONITOR_MONO	(2 << 8)
+-#define CRT_HOTPLUG_MONITOR_NONE	(0 << 8)
+-#define SDVOC_HOTPLUG_INT_STATUS	(1 << 7)
+-#define SDVOB_HOTPLUG_INT_STATUS	(1 << 6)
+-
+-#define SDVOB			0x61140
+-#define SDVOC			0x61160
+-#define SDVO_ENABLE			(1 << 31)
+-#define SDVO_PIPE_B_SELECT		(1 << 30)
+-#define SDVO_STALL_SELECT		(1 << 29)
+-#define SDVO_INTERRUPT_ENABLE		(1 << 26)
+-
+-/**
+- * 915G/GM SDVO pixel multiplier.
+- *
+- * Programmed value is multiplier - 1, up to 5x.
+- *
+- * DPLL_MD_UDI_MULTIPLIER_MASK
+- */
+-#define SDVO_PORT_MULTIPLY_MASK		(7 << 23)
+-#define SDVO_PORT_MULTIPLY_SHIFT	23
+-#define SDVO_PHASE_SELECT_MASK		(15 << 19)
+-#define SDVO_PHASE_SELECT_DEFAULT	(6 << 19)
+-#define SDVO_CLOCK_OUTPUT_INVERT	(1 << 18)
+-#define SDVOC_GANG_MODE			(1 << 16)
+-#define SDVO_BORDER_ENABLE		(1 << 7)
+-#define SDVOB_PCIE_CONCURRENCY		(1 << 3)
+-#define SDVO_DETECTED			(1 << 2)
+-/* Bits to be preserved when writing */
+-#define SDVOB_PRESERVE_MASK		((1 << 17) | (1 << 16) | (1 << 14))
+-#define SDVOC_PRESERVE_MASK		(1 << 17)
+-
+-/*
+- * This register controls the LVDS output enable, pipe selection, and data
+- * format selection.
+- *
+- * All of the clock/data pairs are force powered down by power sequencing.
+- */
+-#define LVDS			0x61180
+-/*
+- * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+- * the DPLL semantics change when the LVDS is assigned to that pipe.
+- */
+-#define LVDS_PORT_EN			(1 << 31)
+-/* Selects pipe B for LVDS data.  Must be set on pre-965. */
+-#define LVDS_PIPEB_SELECT		(1 << 30)
+-
+-/* Turns on border drawing to allow centered display. */
+-#define LVDS_BORDER_EN			(1 << 15)
+-
+-/*
+- * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+- * pixel.
+- */
+-#define LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
+-#define LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
+-#define LVDS_A0A2_CLKA_POWER_UP		(3 << 8)
+-/*
+- * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+- * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+- * on.
+- */
+-#define LVDS_A3_POWER_MASK		(3 << 6)
+-#define LVDS_A3_POWER_DOWN		(0 << 6)
+-#define LVDS_A3_POWER_UP		(3 << 6)
+-/*
+- * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+- * is set.
+- */
+-#define LVDS_CLKB_POWER_MASK		(3 << 4)
+-#define LVDS_CLKB_POWER_DOWN		(0 << 4)
+-#define LVDS_CLKB_POWER_UP		(3 << 4)
+-/*
+- * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+- * setting for whether we are in dual-channel mode.  The B3 pair will
+- * additionally only be powered up when LVDS_A3_POWER_UP is set.
+- */
+-#define LVDS_B0B3_POWER_MASK		(3 << 2)
+-#define LVDS_B0B3_POWER_DOWN		(0 << 2)
+-#define LVDS_B0B3_POWER_UP		(3 << 2)
+-
+-#define PIPEACONF		0x70008
+-#define PIPEACONF_ENABLE		(1 << 31)
+-#define PIPEACONF_DISABLE		0
+-#define PIPEACONF_DOUBLE_WIDE		(1 << 30)
+-#define PIPECONF_ACTIVE			(1 << 30)
+-#define I965_PIPECONF_ACTIVE		(1 << 30)
+-#define PIPECONF_DSIPLL_LOCK		(1 << 29)
+-#define PIPEACONF_SINGLE_WIDE		0
+-#define PIPEACONF_PIPE_UNLOCKED		0
+-#define PIPEACONF_DSR			(1 << 26)
+-#define PIPEACONF_PIPE_LOCKED		(1 << 25)
+-#define PIPEACONF_PALETTE		0
+-#define PIPECONF_FORCE_BORDER		(1 << 25)
+-#define PIPEACONF_GAMMA			(1 << 24)
+-#define PIPECONF_PROGRESSIVE		(0 << 21)
+-#define PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+-#define PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
+-#define PIPECONF_PLANE_OFF		(1 << 19)
+-#define PIPECONF_CURSOR_OFF		(1 << 18)
+-
+-#define PIPEBCONF		0x71008
+-#define PIPEBCONF_ENABLE		(1 << 31)
+-#define PIPEBCONF_DISABLE		0
+-#define PIPEBCONF_DOUBLE_WIDE		(1 << 30)
+-#define PIPEBCONF_DISABLE		0
+-#define PIPEBCONF_GAMMA			(1 << 24)
+-#define PIPEBCONF_PALETTE		0
+-
+-#define PIPECCONF		0x72008
+-
+-#define PIPEBGCMAXRED		0x71010
+-#define PIPEBGCMAXGREEN		0x71014
+-#define PIPEBGCMAXBLUE		0x71018
+-
+-#define PIPEASTAT		0x70024
+-#define PIPEBSTAT		0x71024
+-#define PIPECSTAT		0x72024
+-#define PIPE_VBLANK_INTERRUPT_STATUS		(1UL << 1)
+-#define PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL << 2)
+-#define PIPE_VBLANK_CLEAR			(1 << 1)
+-#define PIPE_VBLANK_STATUS			(1 << 1)
+-#define PIPE_TE_STATUS				(1UL << 6)
+-#define PIPE_DPST_EVENT_STATUS			(1UL << 7)
+-#define PIPE_VSYNC_CLEAR			(1UL << 9)
+-#define PIPE_VSYNC_STATUS			(1UL << 9)
+-#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS		(1UL << 10)
+-#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS	(1UL << 11)
+-#define PIPE_VBLANK_INTERRUPT_ENABLE		(1UL << 17)
+-#define PIPE_START_VBLANK_INTERRUPT_ENABLE	(1UL << 18)
+-#define PIPE_TE_ENABLE				(1UL << 22)
+-#define PIPE_DPST_EVENT_ENABLE			(1UL << 23)
+-#define PIPE_VSYNC_ENABL			(1UL << 25)
+-#define PIPE_HDMI_AUDIO_UNDERRUN		(1UL << 26)
+-#define PIPE_HDMI_AUDIO_BUFFER_DONE		(1UL << 27)
+-#define PIPE_HDMI_AUDIO_INT_MASK		(PIPE_HDMI_AUDIO_UNDERRUN | \
+-						PIPE_HDMI_AUDIO_BUFFER_DONE)
+-#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
+-#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
+-#define HISTOGRAM_INT_CONTROL		0x61268
+-#define HISTOGRAM_BIN_DATA		0X61264
+-#define HISTOGRAM_LOGIC_CONTROL		0x61260
+-#define PWM_CONTROL_LOGIC		0x61250
+-#define PIPE_HOTPLUG_INTERRUPT_STATUS		(1UL << 10)
+-#define HISTOGRAM_INTERRUPT_ENABLE		(1UL << 31)
+-#define HISTOGRAM_LOGIC_ENABLE			(1UL << 31)
+-#define PWM_LOGIC_ENABLE			(1UL << 31)
+-#define PWM_PHASEIN_ENABLE			(1UL << 25)
+-#define PWM_PHASEIN_INT_ENABLE			(1UL << 24)
+-#define PWM_PHASEIN_VB_COUNT			0x00001f00
+-#define PWM_PHASEIN_INC				0x0000001f
+-#define HISTOGRAM_INT_CTRL_CLEAR		(1UL << 30)
+-#define DPST_YUV_LUMA_MODE			0
+-
+-struct dpst_ie_histogram_control {
+-	union {
+-		uint32_t data;
+-		struct {
+-			uint32_t bin_reg_index:7;
+-			uint32_t reserved:4;
+-			uint32_t bin_reg_func_select:1;
+-			uint32_t sync_to_phase_in:1;
+-			uint32_t alt_enhancement_mode:2;
+-			uint32_t reserved1:1;
+-			uint32_t sync_to_phase_in_count:8;
+-			uint32_t histogram_mode_select:1;
+-			uint32_t reserved2:4;
+-			uint32_t ie_pipe_assignment:1;
+-			uint32_t ie_mode_table_enabled:1;
+-			uint32_t ie_histogram_enable:1;
+-		};
+-	};
+-};
+-
+-struct dpst_guardband {
+-	union {
+-		uint32_t data;
+-		struct {
+-			uint32_t guardband:22;
+-			uint32_t guardband_interrupt_delay:8;
+-			uint32_t interrupt_status:1;
+-			uint32_t interrupt_enable:1;
+-		};
+-	};
+-};
+-
+-#define PIPEAFRAMEHIGH		0x70040
+-#define PIPEAFRAMEPIXEL		0x70044
+-#define PIPEBFRAMEHIGH		0x71040
+-#define PIPEBFRAMEPIXEL		0x71044
+-#define PIPECFRAMEHIGH		0x72040
+-#define PIPECFRAMEPIXEL		0x72044
+-#define PIPE_FRAME_HIGH_MASK	0x0000ffff
+-#define PIPE_FRAME_HIGH_SHIFT	0
+-#define PIPE_FRAME_LOW_MASK	0xff000000
+-#define PIPE_FRAME_LOW_SHIFT	24
+-#define PIPE_PIXEL_MASK		0x00ffffff
+-#define PIPE_PIXEL_SHIFT	0
+-
+-#define DSPARB			0x70030
+-#define DSPFW1			0x70034
+-#define DSPFW2			0x70038
+-#define DSPFW3			0x7003c
+-#define DSPFW4			0x70050
+-#define DSPFW5			0x70054
+-#define DSPFW6			0x70058
+-#define DSPCHICKENBIT		0x70400
+-#define DSPACNTR		0x70180
+-#define DSPBCNTR		0x71180
+-#define DSPCCNTR		0x72180
+-#define DISPLAY_PLANE_ENABLE			(1 << 31)
+-#define DISPLAY_PLANE_DISABLE			0
+-#define DISPPLANE_GAMMA_ENABLE			(1 << 30)
+-#define DISPPLANE_GAMMA_DISABLE			0
+-#define DISPPLANE_PIXFORMAT_MASK		(0xf << 26)
+-#define DISPPLANE_8BPP				(0x2 << 26)
+-#define DISPPLANE_15_16BPP			(0x4 << 26)
+-#define DISPPLANE_16BPP				(0x5 << 26)
+-#define DISPPLANE_32BPP_NO_ALPHA		(0x6 << 26)
+-#define DISPPLANE_32BPP				(0x7 << 26)
+-#define DISPPLANE_STEREO_ENABLE			(1 << 25)
+-#define DISPPLANE_STEREO_DISABLE		0
+-#define DISPPLANE_SEL_PIPE_MASK			(1 << 24)
+-#define DISPPLANE_SEL_PIPE_POS			24
+-#define DISPPLANE_SEL_PIPE_A			0
+-#define DISPPLANE_SEL_PIPE_B			(1 << 24)
+-#define DISPPLANE_SRC_KEY_ENABLE		(1 << 22)
+-#define DISPPLANE_SRC_KEY_DISABLE		0
+-#define DISPPLANE_LINE_DOUBLE			(1 << 20)
+-#define DISPPLANE_NO_LINE_DOUBLE		0
+-#define DISPPLANE_STEREO_POLARITY_FIRST		0
+-#define DISPPLANE_STEREO_POLARITY_SECOND	(1 << 18)
+-/* plane B only */
+-#define DISPPLANE_ALPHA_TRANS_ENABLE		(1 << 15)
+-#define DISPPLANE_ALPHA_TRANS_DISABLE		0
+-#define DISPPLANE_SPRITE_ABOVE_DISPLAYA		0
+-#define DISPPLANE_SPRITE_ABOVE_OVERLAY		(1)
+-#define DISPPLANE_BOTTOM			(4)
+-
+-#define DSPABASE		0x70184
+-#define DSPALINOFF		0x70184
+-#define DSPASTRIDE		0x70188
+-
+-#define DSPBBASE		0x71184
+-#define DSPBLINOFF		0X71184
+-#define DSPBADDR		DSPBBASE
+-#define DSPBSTRIDE		0x71188
+-
+-#define DSPCBASE		0x72184
+-#define DSPCLINOFF		0x72184
+-#define DSPCSTRIDE		0x72188
+-
+-#define DSPAKEYVAL		0x70194
+-#define DSPAKEYMASK		0x70198
+-
+-#define DSPAPOS			0x7018C	/* reserved */
+-#define DSPASIZE		0x70190
+-#define DSPBPOS			0x7118C
+-#define DSPBSIZE		0x71190
+-#define DSPCPOS			0x7218C
+-#define DSPCSIZE		0x72190
+-
+-#define DSPASURF		0x7019C
+-#define DSPATILEOFF		0x701A4
+-
+-#define DSPBSURF		0x7119C
+-#define DSPBTILEOFF		0x711A4
+-
+-#define DSPCSURF		0x7219C
+-#define DSPCTILEOFF		0x721A4
+-#define DSPCKEYMAXVAL		0x721A0
+-#define DSPCKEYMINVAL		0x72194
+-#define DSPCKEYMSK		0x72198
+-
+-#define VGACNTRL		0x71400
+-#define VGA_DISP_DISABLE		(1 << 31)
+-#define VGA_2X_MODE			(1 << 30)
+-#define VGA_PIPE_B_SELECT		(1 << 29)
+-
+-/*
+- * Overlay registers
+- */
+-#define OV_C_OFFSET		0x08000
+-#define OV_OVADD		0x30000
+-#define OV_DOVASTA		0x30008
+-# define OV_PIPE_SELECT			((1 << 6)|(1 << 7))
+-# define OV_PIPE_SELECT_POS		6
+-# define OV_PIPE_A			0
+-# define OV_PIPE_C			1
+-#define OV_OGAMC5		0x30010
+-#define OV_OGAMC4		0x30014
+-#define OV_OGAMC3		0x30018
+-#define OV_OGAMC2		0x3001C
+-#define OV_OGAMC1		0x30020
+-#define OV_OGAMC0		0x30024
+-#define OVC_OVADD		0x38000
+-#define OVC_DOVCSTA		0x38008
+-#define OVC_OGAMC5		0x38010
+-#define OVC_OGAMC4		0x38014
+-#define OVC_OGAMC3		0x38018
+-#define OVC_OGAMC2		0x3801C
+-#define OVC_OGAMC1		0x38020
+-#define OVC_OGAMC0		0x38024
+-
+-/*
+- * Some BIOS scratch area registers.  The 845 (and 830?) store the amount
+- * of video memory available to the BIOS in SWF1.
+- */
+-#define SWF0			0x71410
+-#define SWF1			0x71414
+-#define SWF2			0x71418
+-#define SWF3			0x7141c
+-#define SWF4			0x71420
+-#define SWF5			0x71424
+-#define SWF6			0x71428
+-
+-/*
+- * 855 scratch registers.
+- */
+-#define SWF00			0x70410
+-#define SWF01			0x70414
+-#define SWF02			0x70418
+-#define SWF03			0x7041c
+-#define SWF04			0x70420
+-#define SWF05			0x70424
+-#define SWF06			0x70428
+-
+-#define SWF10			SWF0
+-#define SWF11			SWF1
+-#define SWF12			SWF2
+-#define SWF13			SWF3
+-#define SWF14			SWF4
+-#define SWF15			SWF5
+-#define SWF16			SWF6
+-
+-#define SWF30			0x72414
+-#define SWF31			0x72418
+-#define SWF32			0x7241c
+-
+-
+-/*
+- * Palette registers
+- */
+-#define PALETTE_A		0x0a000
+-#define PALETTE_B		0x0a800
+-#define PALETTE_C		0x0ac00
+-
+-/* Cursor A & B regs */
+-#define CURACNTR		0x70080
+-#define CURSOR_MODE_DISABLE		0x00
+-#define CURSOR_MODE_64_32B_AX		0x07
+-#define CURSOR_MODE_64_ARGB_AX		((1 << 5) | CURSOR_MODE_64_32B_AX)
+-#define MCURSOR_GAMMA_ENABLE		(1 << 26)
+-#define CURABASE		0x70084
+-#define CURAPOS			0x70088
+-#define CURSOR_POS_MASK			0x007FF
+-#define CURSOR_POS_SIGN			0x8000
+-#define CURSOR_X_SHIFT			0
+-#define CURSOR_Y_SHIFT			16
+-#define CURBCNTR		0x700c0
+-#define CURBBASE		0x700c4
+-#define CURBPOS			0x700c8
+-#define CURCCNTR		0x700e0
+-#define CURCBASE		0x700e4
+-#define CURCPOS			0x700e8
+-
+-/*
+- * Interrupt Registers
+- */
+-#define IER			0x020a0
+-#define IIR			0x020a4
+-#define IMR			0x020a8
+-#define ISR			0x020ac
+-
+-/*
+- * MOORESTOWN delta registers
+- */
+-#define MRST_DPLL_A		0x0f014
+-#define MDFLD_DPLL_B		0x0f018
+-#define MDFLD_INPUT_REF_SEL		(1 << 14)
+-#define MDFLD_VCO_SEL			(1 << 16)
+-#define DPLLA_MODE_LVDS			(2 << 26)	/* mrst */
+-#define MDFLD_PLL_LATCHEN		(1 << 28)
+-#define MDFLD_PWR_GATE_EN		(1 << 30)
+-#define MDFLD_P1_MASK			(0x1FF << 17)
+-#define MRST_FPA0		0x0f040
+-#define MRST_FPA1		0x0f044
+-#define MDFLD_DPLL_DIV0		0x0f048
+-#define MDFLD_DPLL_DIV1		0x0f04c
+-#define MRST_PERF_MODE		0x020f4
+-
+-/*
+- * MEDFIELD HDMI registers
+- */
+-#define HDMIPHYMISCCTL		0x61134
+-#define HDMI_PHY_POWER_DOWN		0x7f
+-#define HDMIB_CONTROL		0x61140
+-#define HDMIB_PORT_EN			(1 << 31)
+-#define HDMIB_PIPE_B_SELECT		(1 << 30)
+-#define HDMIB_NULL_PACKET		(1 << 9)
+-#define HDMIB_HDCP_PORT			(1 << 5)
+-
+-/* #define LVDS			0x61180 */
+-#define MRST_PANEL_8TO6_DITHER_ENABLE	(1 << 25)
+-#define MRST_PANEL_24_DOT_1_FORMAT	(1 << 24)
+-#define LVDS_A3_POWER_UP_0_OUTPUT	(1 << 6)
+-
+-#define MIPI			0x61190
+-#define MIPI_C			0x62190
+-#define MIPI_PORT_EN			(1 << 31)
+-/* Turns on border drawing to allow centered display. */
+-#define SEL_FLOPPED_HSTX		(1 << 23)
+-#define PASS_FROM_SPHY_TO_AFE		(1 << 16)
+-#define MIPI_BORDER_EN			(1 << 15)
+-#define MIPIA_3LANE_MIPIC_1LANE		0x1
+-#define MIPIA_2LANE_MIPIC_2LANE		0x2
+-#define TE_TRIGGER_DSI_PROTOCOL		(1 << 2)
+-#define TE_TRIGGER_GPIO_PIN		(1 << 3)
+-#define MIPI_TE_COUNT		0x61194
+-
+-/* #define PP_CONTROL	0x61204 */
+-#define POWER_DOWN_ON_RESET		(1 << 1)
+-
+-/* #define PFIT_CONTROL	0x61230 */
+-#define PFIT_PIPE_SELECT		(3 << 29)
+-#define PFIT_PIPE_SELECT_SHIFT		(29)
+-
+-/* #define BLC_PWM_CTL		0x61254 */
+-#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT	(16)
+-#define MRST_BACKLIGHT_MODULATION_FREQ_MASK	(0xffff << 16)
+-
+-/* #define PIPEACONF 0x70008 */
+-#define PIPEACONF_PIPE_STATE		(1 << 30)
+-/* #define DSPACNTR		0x70180 */
+-
+-#define MRST_DSPABASE		0x7019c
+-#define MRST_DSPBBASE		0x7119c
+-#define MDFLD_DSPCBASE		0x7219c
+-
+-/*
+- * Moorestown registers.
+- */
+-
+-/*
+- *	MIPI IP registers
+- */
+-#define MIPIC_REG_OFFSET		0x800
+-
+-#define DEVICE_READY_REG		0xb000
+-#define LP_OUTPUT_HOLD				(1 << 16)
+-#define EXIT_ULPS_DEV_READY			0x3
+-#define LP_OUTPUT_HOLD_RELEASE			0x810000
+-# define ENTERING_ULPS				(2 << 1)
+-# define EXITING_ULPS				(1 << 1)
+-# define ULPS_MASK				(3 << 1)
+-# define BUS_POSSESSION				(1 << 3)
+-#define INTR_STAT_REG			0xb004
+-#define RX_SOT_ERROR				(1 << 0)
+-#define RX_SOT_SYNC_ERROR			(1 << 1)
+-#define RX_ESCAPE_MODE_ENTRY_ERROR		(1 << 3)
+-#define RX_LP_TX_SYNC_ERROR			(1 << 4)
+-#define RX_HS_RECEIVE_TIMEOUT_ERROR		(1 << 5)
+-#define RX_FALSE_CONTROL_ERROR			(1 << 6)
+-#define RX_ECC_SINGLE_BIT_ERROR			(1 << 7)
+-#define RX_ECC_MULTI_BIT_ERROR			(1 << 8)
+-#define RX_CHECKSUM_ERROR			(1 << 9)
+-#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED		(1 << 10)
+-#define RX_DSI_VC_ID_INVALID			(1 << 11)
+-#define TX_FALSE_CONTROL_ERROR			(1 << 12)
+-#define TX_ECC_SINGLE_BIT_ERROR			(1 << 13)
+-#define TX_ECC_MULTI_BIT_ERROR			(1 << 14)
+-#define TX_CHECKSUM_ERROR			(1 << 15)
+-#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED		(1 << 16)
+-#define TX_DSI_VC_ID_INVALID			(1 << 17)
+-#define HIGH_CONTENTION				(1 << 18)
+-#define LOW_CONTENTION				(1 << 19)
+-#define DPI_FIFO_UNDER_RUN			(1 << 20)
+-#define HS_TX_TIMEOUT				(1 << 21)
+-#define LP_RX_TIMEOUT				(1 << 22)
+-#define TURN_AROUND_ACK_TIMEOUT			(1 << 23)
+-#define ACK_WITH_NO_ERROR			(1 << 24)
+-#define HS_GENERIC_WR_FIFO_FULL			(1 << 27)
+-#define LP_GENERIC_WR_FIFO_FULL			(1 << 28)
+-#define SPL_PKT_SENT				(1 << 30)
+-#define INTR_EN_REG			0xb008
+-#define DSI_FUNC_PRG_REG		0xb00c
+-#define DPI_CHANNEL_NUMBER_POS			0x03
+-#define DBI_CHANNEL_NUMBER_POS			0x05
+-#define FMT_DPI_POS				0x07
+-#define FMT_DBI_POS				0x0A
+-#define DBI_DATA_WIDTH_POS			0x0D
+-
+-/* DPI PIXEL FORMATS */
+-#define RGB_565_FMT				0x01	/* RGB 565 FORMAT */
+-#define RGB_666_FMT				0x02	/* RGB 666 FORMAT */
+-#define LRGB_666_FMT				0x03	/* RGB LOOSELY PACKED
+-							 * 666 FORMAT
+-							 */
+-#define RGB_888_FMT				0x04	/* RGB 888 FORMAT */
+-#define VIRTUAL_CHANNEL_NUMBER_0		0x00	/* Virtual channel 0 */
+-#define VIRTUAL_CHANNEL_NUMBER_1		0x01	/* Virtual channel 1 */
+-#define VIRTUAL_CHANNEL_NUMBER_2		0x02	/* Virtual channel 2 */
+-#define VIRTUAL_CHANNEL_NUMBER_3		0x03	/* Virtual channel 3 */
+-
+-#define DBI_NOT_SUPPORTED			0x00	/* command mode
+-							 * is not supported
+-							 */
+-#define DBI_DATA_WIDTH_16BIT			0x01	/* 16 bit data */
+-#define DBI_DATA_WIDTH_9BIT			0x02	/* 9 bit data */
+-#define DBI_DATA_WIDTH_8BIT			0x03	/* 8 bit data */
+-#define DBI_DATA_WIDTH_OPT1			0x04	/* option 1 */
+-#define DBI_DATA_WIDTH_OPT2			0x05	/* option 2 */
+-
+-#define HS_TX_TIMEOUT_REG		0xb010
+-#define LP_RX_TIMEOUT_REG		0xb014
+-#define TURN_AROUND_TIMEOUT_REG		0xb018
+-#define DEVICE_RESET_REG		0xb01C
+-#define DPI_RESOLUTION_REG		0xb020
+-#define RES_V_POS				0x10
+-#define DBI_RESOLUTION_REG		0xb024 /* Reserved for MDFLD */
+-#define HORIZ_SYNC_PAD_COUNT_REG	0xb028
+-#define HORIZ_BACK_PORCH_COUNT_REG	0xb02C
+-#define HORIZ_FRONT_PORCH_COUNT_REG	0xb030
+-#define HORIZ_ACTIVE_AREA_COUNT_REG	0xb034
+-#define VERT_SYNC_PAD_COUNT_REG		0xb038
+-#define VERT_BACK_PORCH_COUNT_REG	0xb03c
+-#define VERT_FRONT_PORCH_COUNT_REG	0xb040
+-#define HIGH_LOW_SWITCH_COUNT_REG	0xb044
+-#define DPI_CONTROL_REG			0xb048
+-#define DPI_SHUT_DOWN				(1 << 0)
+-#define DPI_TURN_ON				(1 << 1)
+-#define DPI_COLOR_MODE_ON			(1 << 2)
+-#define DPI_COLOR_MODE_OFF			(1 << 3)
+-#define DPI_BACK_LIGHT_ON			(1 << 4)
+-#define DPI_BACK_LIGHT_OFF			(1 << 5)
+-#define DPI_LP					(1 << 6)
+-#define DPI_DATA_REG			0xb04c
+-#define DPI_BACK_LIGHT_ON_DATA			0x07
+-#define DPI_BACK_LIGHT_OFF_DATA			0x17
+-#define INIT_COUNT_REG			0xb050
+-#define MAX_RET_PAK_REG			0xb054
+-#define VIDEO_FMT_REG			0xb058
+-#define COMPLETE_LAST_PCKT			(1 << 2)
+-#define EOT_DISABLE_REG			0xb05c
+-#define ENABLE_CLOCK_STOPPING			(1 << 1)
+-#define LP_BYTECLK_REG			0xb060
+-#define LP_GEN_DATA_REG			0xb064
+-#define HS_GEN_DATA_REG			0xb068
+-#define LP_GEN_CTRL_REG			0xb06C
+-#define HS_GEN_CTRL_REG			0xb070
+-#define DCS_CHANNEL_NUMBER_POS		0x6
+-#define MCS_COMMANDS_POS		0x8
+-#define WORD_COUNTS_POS			0x8
+-#define MCS_PARAMETER_POS			0x10
+-#define GEN_FIFO_STAT_REG		0xb074
+-#define HS_DATA_FIFO_FULL			(1 << 0)
+-#define HS_DATA_FIFO_HALF_EMPTY			(1 << 1)
+-#define HS_DATA_FIFO_EMPTY			(1 << 2)
+-#define LP_DATA_FIFO_FULL			(1 << 8)
+-#define LP_DATA_FIFO_HALF_EMPTY			(1 << 9)
+-#define LP_DATA_FIFO_EMPTY			(1 << 10)
+-#define HS_CTRL_FIFO_FULL			(1 << 16)
+-#define HS_CTRL_FIFO_HALF_EMPTY			(1 << 17)
+-#define HS_CTRL_FIFO_EMPTY			(1 << 18)
+-#define LP_CTRL_FIFO_FULL			(1 << 24)
+-#define LP_CTRL_FIFO_HALF_EMPTY			(1 << 25)
+-#define LP_CTRL_FIFO_EMPTY			(1 << 26)
+-#define DBI_FIFO_EMPTY				(1 << 27)
+-#define DPI_FIFO_EMPTY				(1 << 28)
+-#define HS_LS_DBI_ENABLE_REG		0xb078
+-#define TXCLKESC_REG			0xb07c
+-#define DPHY_PARAM_REG			0xb080
+-#define DBI_BW_CTRL_REG			0xb084
+-#define CLK_LANE_SWT_REG		0xb088
+-
+-/*
+- * MIPI Adapter registers
+- */
+-#define MIPI_CONTROL_REG		0xb104
+-#define MIPI_2X_CLOCK_BITS			((1 << 0) | (1 << 1))
+-#define MIPI_DATA_ADDRESS_REG		0xb108
+-#define MIPI_DATA_LENGTH_REG		0xb10C
+-#define MIPI_COMMAND_ADDRESS_REG	0xb110
+-#define MIPI_COMMAND_LENGTH_REG		0xb114
+-#define MIPI_READ_DATA_RETURN_REG0	0xb118
+-#define MIPI_READ_DATA_RETURN_REG1	0xb11C
+-#define MIPI_READ_DATA_RETURN_REG2	0xb120
+-#define MIPI_READ_DATA_RETURN_REG3	0xb124
+-#define MIPI_READ_DATA_RETURN_REG4	0xb128
+-#define MIPI_READ_DATA_RETURN_REG5	0xb12C
+-#define MIPI_READ_DATA_RETURN_REG6	0xb130
+-#define MIPI_READ_DATA_RETURN_REG7	0xb134
+-#define MIPI_READ_DATA_VALID_REG	0xb138
+-
+-/* DBI COMMANDS */
+-#define soft_reset			0x01
+-/*
+- *	The display module performs a software reset.
+- *	Registers are written with their SW Reset default values.
+- */
+-#define get_power_mode			0x0a
+-/*
+- *	The display module returns the current power mode
+- */
+-#define get_address_mode		0x0b
+-/*
+- *	The display module returns the current status.
+- */
+-#define get_pixel_format		0x0c
+-/*
+- *	This command gets the pixel format for the RGB image data
+- *	used by the interface.
+- */
+-#define get_display_mode		0x0d
+-/*
+- *	The display module returns the Display Image Mode status.
+- */
+-#define get_signal_mode			0x0e
+-/*
+- *	The display module returns the Display Signal Mode.
+- */
+-#define get_diagnostic_result		0x0f
+-/*
+- *	The display module returns the self-diagnostic results following
+- *	a Sleep Out command.
+- */
+-#define enter_sleep_mode		0x10
+-/*
+- *	This command causes the display module to enter the Sleep mode.
+- *	In this mode, all unnecessary blocks inside the display module are
+- *	disabled except interface communication. This is the lowest power
+- *	mode the display module supports.
+- */
+-#define exit_sleep_mode			0x11
+-/*
+- *	This command causes the display module to exit Sleep mode.
+- *	All blocks inside the display module are enabled.
+- */
+-#define enter_partial_mode		0x12
+-/*
+- *	This command causes the display module to enter the Partial Display
+- *	Mode. The Partial Display Mode window is described by the
+- *	set_partial_area command.
+- */
+-#define enter_normal_mode		0x13
+-/*
+- *	This command causes the display module to enter the Normal mode.
+- *	Normal Mode is defined as Partial Display mode and Scroll mode are off
+- */
+-#define exit_invert_mode		0x20
+-/*
+- *	This command causes the display module to stop inverting the image
+- *	data on the display device. The frame memory contents remain unchanged.
+- *	No status bits are changed.
+- */
+-#define enter_invert_mode		0x21
+-/*
+- *	This command causes the display module to invert the image data only on
+- *	the display device. The frame memory contents remain unchanged.
+- *	No status bits are changed.
+- */
+-#define set_gamma_curve			0x26
+-/*
+- *	This command selects the desired gamma curve for the display device.
+- *	Four fixed gamma curves are defined in section DCS spec.
+- */
+-#define set_display_off			0x28
+-/* ************************************************************************* *\
+-This command causes the display module to stop displaying the image data
+-on the display device. The frame memory contents remain unchanged.
+-No status bits are changed.
+-\* ************************************************************************* */
+-#define set_display_on			0x29
+-/* ************************************************************************* *\
+-This command causes the display module to start displaying the image data
+-on the display device. The frame memory contents remain unchanged.
+-No status bits are changed.
+-\* ************************************************************************* */
+-#define set_column_address		0x2a
+-/*
+- *	This command defines the column extent of the frame memory accessed by
+- *	the hostprocessor with the read_memory_continue and
+- *	write_memory_continue commands.
+- *	No status bits are changed.
+- */
+-#define set_page_addr			0x2b
+-/*
+- *	This command defines the page extent of the frame memory accessed by
+- *	the host processor with the write_memory_continue and
+- *	read_memory_continue command.
+- *	No status bits are changed.
+- */
+-#define write_mem_start			0x2c
+-/*
+- *	This command transfers image data from the host processor to the
+- *	display modules frame memory starting at the pixel location specified
+- *	by preceding set_column_address and set_page_address commands.
+- */
+-#define set_partial_area		0x30
+-/*
+- *	This command defines the Partial Display mode s display area.
+- *	There are two parameters associated with this command, the first
+- *	defines the Start Row (SR) and the second the End Row (ER). SR and ER
+- *	refer to the Frame Memory Line Pointer.
+- */
+-#define set_scroll_area			0x33
+-/*
+- *	This command defines the display modules Vertical Scrolling Area.
+- */
+-#define set_tear_off			0x34
+-/*
+- *	This command turns off the display modules Tearing Effect output
+- *	signal on the TE signal line.
+- */
+-#define set_tear_on			0x35
+-/*
+- *	This command turns on the display modules Tearing Effect output signal
+- *	on the TE signal line.
+- */
+-#define set_address_mode		0x36
+-/*
+- *	This command sets the data order for transfers from the host processor
+- *	to display modules frame memory,bits B[7:5] and B3, and from the
+- *	display modules frame memory to the display device, bits B[2:0] and B4.
+- */
+-#define set_scroll_start		0x37
+-/*
+- *	This command sets the start of the vertical scrolling area in the frame
+- *	memory. The vertical scrolling area is fully defined when this command
+- *	is used with the set_scroll_area command The set_scroll_start command
+- *	has one parameter, the Vertical Scroll Pointer. The VSP defines the
+- *	line in the frame memory that is written to the display device as the
+- *	first line of the vertical scroll area.
+- */
+-#define exit_idle_mode			0x38
+-/*
+- *	This command causes the display module to exit Idle mode.
+- */
+-#define enter_idle_mode			0x39
+-/*
+- *	This command causes the display module to enter Idle Mode.
+- *	In Idle Mode, color expression is reduced. Colors are shown on the
+- *	display device using the MSB of each of the R, G and B color
+- *	components in the frame memory
+- */
+-#define set_pixel_format		0x3a
+-/*
+- *	This command sets the pixel format for the RGB image data used by the
+- *	interface.
+- *	Bits D[6:4]  DPI Pixel Format Definition
+- *	Bits D[2:0]  DBI Pixel Format Definition
+- *	Bits D7 and D3 are not used.
+- */
+-#define DCS_PIXEL_FORMAT_3bpp		0x1
+-#define DCS_PIXEL_FORMAT_8bpp		0x2
+-#define DCS_PIXEL_FORMAT_12bpp		0x3
+-#define DCS_PIXEL_FORMAT_16bpp		0x5
+-#define DCS_PIXEL_FORMAT_18bpp		0x6
+-#define DCS_PIXEL_FORMAT_24bpp		0x7
+-
+-#define write_mem_cont			0x3c
+-
+-/*
+- *	This command transfers image data from the host processor to the
+- *	display module's frame memory continuing from the pixel location
+- *	following the previous write_memory_continue or write_memory_start
+- *	command.
+- */
+-#define set_tear_scanline		0x44
+-/*
+- *	This command turns on the display modules Tearing Effect output signal
+- *	on the TE signal line when the display module reaches line N.
+- */
+-#define get_scanline			0x45
+-/*
+- *	The display module returns the current scanline, N, used to update the
+- *	 display device. The total number of scanlines on a display device is
+- *	defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
+- *	the first line of V Sync and is denoted as Line 0.
+- *	When in Sleep Mode, the value returned by get_scanline is undefined.
+- */
+-
+-/* MCS or Generic COMMANDS */
+-/* MCS/generic data type */
+-#define GEN_SHORT_WRITE_0	0x03  /* generic short write, no parameters */
+-#define GEN_SHORT_WRITE_1	0x13  /* generic short write, 1 parameters */
+-#define GEN_SHORT_WRITE_2	0x23  /* generic short write, 2 parameters */
+-#define GEN_READ_0		0x04  /* generic read, no parameters */
+-#define GEN_READ_1		0x14  /* generic read, 1 parameters */
+-#define GEN_READ_2		0x24  /* generic read, 2 parameters */
+-#define GEN_LONG_WRITE		0x29  /* generic long write */
+-#define MCS_SHORT_WRITE_0	0x05  /* MCS short write, no parameters */
+-#define MCS_SHORT_WRITE_1	0x15  /* MCS short write, 1 parameters */
+-#define MCS_READ		0x06  /* MCS read, no parameters */
+-#define MCS_LONG_WRITE		0x39  /* MCS long write */
+-/* MCS/generic commands */
+-/* TPO MCS */
+-#define write_display_profile		0x50
+-#define write_display_brightness	0x51
+-#define write_ctrl_display		0x53
+-#define write_ctrl_cabc			0x55
+-  #define UI_IMAGE		0x01
+-  #define STILL_IMAGE		0x02
+-  #define MOVING_IMAGE		0x03
+-#define write_hysteresis		0x57
+-#define write_gamma_setting		0x58
+-#define write_cabc_min_bright		0x5e
+-#define write_kbbc_profile		0x60
+-/* TMD MCS */
+-#define tmd_write_display_brightness 0x8c
+-
+-/*
+- *	This command is used to control ambient light, panel backlight
+- *	brightness and gamma settings.
+- */
+-#define BRIGHT_CNTL_BLOCK_ON	(1 << 5)
+-#define AMBIENT_LIGHT_SENSE_ON	(1 << 4)
+-#define DISPLAY_DIMMING_ON	(1 << 3)
+-#define BACKLIGHT_ON		(1 << 2)
+-#define DISPLAY_BRIGHTNESS_AUTO	(1 << 1)
+-#define GAMMA_AUTO		(1 << 0)
+-
+-/* DCS Interface Pixel Formats */
+-#define DCS_PIXEL_FORMAT_3BPP	0x1
+-#define DCS_PIXEL_FORMAT_8BPP	0x2
+-#define DCS_PIXEL_FORMAT_12BPP	0x3
+-#define DCS_PIXEL_FORMAT_16BPP	0x5
+-#define DCS_PIXEL_FORMAT_18BPP	0x6
+-#define DCS_PIXEL_FORMAT_24BPP	0x7
+-/* ONE PARAMETER READ DATA */
+-#define addr_mode_data		0xfc
+-#define diag_res_data		0x00
+-#define disp_mode_data		0x23
+-#define pxl_fmt_data		0x77
+-#define pwr_mode_data		0x74
+-#define sig_mode_data		0x00
+-/* TWO PARAMETERS READ DATA */
+-#define scanline_data1		0xff
+-#define scanline_data2		0xff
+-#define NON_BURST_MODE_SYNC_PULSE	0x01	/* Non Burst Mode
+-						 * with Sync Pulse
+-						 */
+-#define NON_BURST_MODE_SYNC_EVENTS	0x02	/* Non Burst Mode
+-						 * with Sync events
+-						 */
+-#define BURST_MODE			0x03	/* Burst Mode */
+-#define DBI_COMMAND_BUFFER_SIZE		0x240   /* 0x32 */    /* 0x120 */
+-						/* Allocate at least
+-						 * 0x100 Byte with 32
+-						 * byte alignment
+-						 */
+-#define DBI_DATA_BUFFER_SIZE		0x120	/* Allocate at least
+-						 * 0x100 Byte with 32
+-						 * byte alignment
+-						 */
+-#define DBI_CB_TIME_OUT			0xFFFF
+-
+-#define GEN_FB_TIME_OUT			2000
+-
+-#define SKU_83				0x01
+-#define SKU_100				0x02
+-#define SKU_100L			0x04
+-#define SKU_BYPASS			0x08
+-
+-/* Some handy macros for playing with bitfields. */
+-#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
+-#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
+-#define GET_FIELD(word, field) (((word)  & field ## _MASK) >> field ## _SHIFT)
+-
+-#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+-
+-/* PCI config space */
+-
+-#define SB_PCKT         0x02100 /* cedarview */
+-# define SB_OPCODE_MASK                         PSB_MASK(31, 16)
+-# define SB_OPCODE_SHIFT                        16
+-# define SB_OPCODE_READ                         0
+-# define SB_OPCODE_WRITE                        1
+-# define SB_DEST_MASK                           PSB_MASK(15, 8)
+-# define SB_DEST_SHIFT                          8
+-# define SB_DEST_DPLL                           0x88
+-# define SB_BYTE_ENABLE_MASK                    PSB_MASK(7, 4)
+-# define SB_BYTE_ENABLE_SHIFT                   4
+-# define SB_BUSY                                (1 << 0)
+-
+-
+-/* 32-bit value read/written from the DPIO reg. */
+-#define SB_DATA		0x02104 /* cedarview */
+-/* 32-bit address of the DPIO reg to be read/written. */
+-#define SB_ADDR		0x02108 /* cedarview */
+-#define DPIO_CFG	0x02110 /* cedarview */
+-# define DPIO_MODE_SELECT_1			(1 << 3)
+-# define DPIO_MODE_SELECT_0			(1 << 2)
+-# define DPIO_SFR_BYPASS			(1 << 1)
+-/* reset is active low */
+-# define DPIO_CMN_RESET_N			(1 << 0)
+-
+-/* Cedarview sideband registers */
+-#define _SB_M_A			0x8008
+-#define _SB_M_B			0x8028
+-#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
+-# define SB_M_DIVIDER_MASK			(0xFF << 24)
+-# define SB_M_DIVIDER_SHIFT			24
+-
+-#define _SB_N_VCO_A		0x8014
+-#define _SB_N_VCO_B		0x8034
+-#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
+-#define SB_N_VCO_SEL_MASK			PSB_MASK(31, 30)
+-#define SB_N_VCO_SEL_SHIFT			30
+-#define SB_N_DIVIDER_MASK			PSB_MASK(29, 26)
+-#define SB_N_DIVIDER_SHIFT			26
+-#define SB_N_CB_TUNE_MASK			PSB_MASK(25, 24)
+-#define SB_N_CB_TUNE_SHIFT			24
+-
+-#define _SB_REF_A		0x8018
+-#define _SB_REF_B		0x8038
+-#define SB_REF_SFR(pipe)	_PIPE(pipe, _SB_REF_A, _SB_REF_B)
+-
+-#define _SB_P_A			0x801c
+-#define _SB_P_B			0x803c
+-#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
+-#define SB_P2_DIVIDER_MASK			PSB_MASK(31, 30)
+-#define SB_P2_DIVIDER_SHIFT			30
+-#define SB_P2_10				0 /* HDMI, DP, DAC */
+-#define SB_P2_5				1 /* DAC */
+-#define SB_P2_14				2 /* LVDS single */
+-#define SB_P2_7				3 /* LVDS double */
+-#define SB_P1_DIVIDER_MASK			PSB_MASK(15, 12)
+-#define SB_P1_DIVIDER_SHIFT			12
+-
+-#define PSB_LANE0		0x120
+-#define PSB_LANE1		0x220
+-#define PSB_LANE2		0x2320
+-#define PSB_LANE3		0x2420
+-
+-#define LANE_PLL_MASK		(0x7 << 20)
+-#define LANE_PLL_ENABLE		(0x3 << 20)
+-
+-
+-#endif
+diff --git a/drivers/staging/gma500/psb_intel_sdvo.c b/drivers/staging/gma500/psb_intel_sdvo.c
+deleted file mode 100644
+index a4bad1a..0000000
+--- a/drivers/staging/gma500/psb_intel_sdvo.c
++++ /dev/null
+@@ -1,1293 +0,0 @@
+-/*
+- * Copyright (c) 2006-2007 Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *	Eric Anholt <eric at anholt.net>
+- */
+-
+-#include <linux/i2c.h>
+-#include <linux/delay.h>
+-/* #include <drm/drm_crtc.h> */
+-#include <drm/drmP.h>
+-#include "psb_drv.h"
+-#include "psb_intel_drv.h"
+-#include "psb_intel_reg.h"
+-#include "psb_intel_sdvo_regs.h"
+-
+-struct psb_intel_sdvo_priv {
+-	struct psb_intel_i2c_chan *i2c_bus;
+-	int slaveaddr;
+-	int output_device;
+-
+-	u16 active_outputs;
+-
+-	struct psb_intel_sdvo_caps caps;
+-	int pixel_clock_min, pixel_clock_max;
+-
+-	int save_sdvo_mult;
+-	u16 save_active_outputs;
+-	struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
+-	struct psb_intel_sdvo_dtd save_output_dtd[16];
+-	u32 save_SDVOX;
+-	u8 in_out_map[4];
+-
+-	u8 by_input_wiring;
+-	u32 active_device;
+-};
+-
+-/**
+- * Writes the SDVOB or SDVOC with the given value, but always writes both
+- * SDVOB and SDVOC to work around apparent hardware issues (according to
+- * comments in the BIOS).
+- */
+-void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output,
+-				u32 val)
+-{
+-	struct drm_device *dev = psb_intel_output->base.dev;
+-	struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
+-	u32 bval = val, cval = val;
+-	int i;
+-
+-	if (sdvo_priv->output_device == SDVOB)
+-		cval = REG_READ(SDVOC);
+-	else
+-		bval = REG_READ(SDVOB);
+-	/*
+-	 * Write the registers twice for luck. Sometimes,
+-	 * writing them only once doesn't appear to 'stick'.
+-	 * The BIOS does this too. Yay, magic
+-	 */
+-	for (i = 0; i < 2; i++) {
+-		REG_WRITE(SDVOB, bval);
+-		REG_READ(SDVOB);
+-		REG_WRITE(SDVOC, cval);
+-		REG_READ(SDVOC);
+-	}
+-}
+-
+-static bool psb_intel_sdvo_read_byte(
+-				struct psb_intel_output *psb_intel_output,
+-				u8 addr, u8 *ch)
+-{
+-	struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
+-	u8 out_buf[2];
+-	u8 buf[2];
+-	int ret;
+-
+-	struct i2c_msg msgs[] = {
+-		{
+-		 .addr = sdvo_priv->i2c_bus->slave_addr,
+-		 .flags = 0,
+-		 .len = 1,
+-		 .buf = out_buf,
+-		 },
+-		{
+-		 .addr = sdvo_priv->i2c_bus->slave_addr,
+-		 .flags = I2C_M_RD,
+-		 .len = 1,
+-		 .buf = buf,
+-		 }
+-	};
+-
+-	out_buf[0] = addr;
+-	out_buf[1] = 0;
+-
+-	ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
+-	if (ret == 2) {
+-		*ch = buf[0];
+-		return true;
+-	}
+-
+-	return false;
+-}
+-
+-static bool psb_intel_sdvo_write_byte(
+-			struct psb_intel_output *psb_intel_output,
+-			int addr, u8 ch)
+-{
+-	u8 out_buf[2];
+-	struct i2c_msg msgs[] = {
+-		{
+-		 .addr = psb_intel_output->i2c_bus->slave_addr,
+-		 .flags = 0,
+-		 .len = 2,
+-		 .buf = out_buf,
+-		 }
+-	};
+-
+-	out_buf[0] = addr;
+-	out_buf[1] = ch;
+-
+-	if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
+-		return true;
+-	return false;
+-}
+-
+-#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+-/** Mapping of command numbers to names, for debug output */
+-static const struct _sdvo_cmd_name {
+-	u8 cmd;
+-	char *name;
+-} sdvo_cmd_names[] = {
+-SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+-	    SDVO_CMD_NAME_ENTRY
+-	    (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+-	    SDVO_CMD_NAME_ENTRY
+-	    (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+-	    SDVO_CMD_NAME_ENTRY
+-	    (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+-	    SDVO_CMD_NAME_ENTRY
+-	    (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+-	    SDVO_CMD_NAME_ENTRY
+-	    (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+-	    SDVO_CMD_NAME_ENTRY
+-	    (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+-	    SDVO_CMD_NAME_ENTRY
+-	    (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+-	    SDVO_CMD_NAME_ENTRY
+-	    (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
+-	    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
+-
+-#define SDVO_NAME(dev_priv) \
+-		 ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
+-#define SDVO_PRIV(output)   ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
+-
+-static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output,
+-				     u8 cmd,
+-				     void *args,
+-				     int args_len)
+-{
+-	struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
+-	int i;
+-
+-	if (0) {
+-		printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
+-		for (i = 0; i < args_len; i++)
+-			printk(KERN_CONT "%02X ", ((u8 *) args)[i]);
+-		for (; i < 8; i++)
+-			printk(KERN_CONT "   ");
+-		for (i = 0;
+-		     i <
+-		     sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
+-		     i++) {
+-			if (cmd == sdvo_cmd_names[i].cmd) {
+-				printk(KERN_CONT
+-					"(%s)", sdvo_cmd_names[i].name);
+-				break;
+-			}
+-		}
+-		if (i ==
+-		    sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
+-			printk(KERN_CONT "(%02X)", cmd);
+-		printk(KERN_CONT "\n");
+-	}
+-
+-	for (i = 0; i < args_len; i++) {
+-		psb_intel_sdvo_write_byte(psb_intel_output,
+-					SDVO_I2C_ARG_0 - i,
+-					((u8 *) args)[i]);
+-	}
+-
+-	psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
+-}
+-
+-static const char *const cmd_status_names[] = {
+-	"Power on",
+-	"Success",
+-	"Not supported",
+-	"Invalid arg",
+-	"Pending",
+-	"Target not specified",
+-	"Scaling not supported"
+-};
+-
+-static u8 psb_intel_sdvo_read_response(
+-				struct psb_intel_output *psb_intel_output,
+-				void *response, int response_len)
+-{
+-	struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
+-	int i;
+-	u8 status;
+-	u8 retry = 50;
+-
+-	while (retry--) {
+-		/* Read the command response */
+-		for (i = 0; i < response_len; i++) {
+-			psb_intel_sdvo_read_byte(psb_intel_output,
+-					     SDVO_I2C_RETURN_0 + i,
+-					     &((u8 *) response)[i]);
+-		}
+-
+-		/* read the return status */
+-		psb_intel_sdvo_read_byte(psb_intel_output,
+-					 SDVO_I2C_CMD_STATUS,
+-					 &status);
+-
+-		if (0) {
+-			pr_debug("%s: R: ", SDVO_NAME(sdvo_priv));
+-			for (i = 0; i < response_len; i++)
+-				printk(KERN_CONT "%02X ", ((u8 *) response)[i]);
+-			for (; i < 8; i++)
+-				printk("   ");
+-			if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+-				printk(KERN_CONT "(%s)",
+-					 cmd_status_names[status]);
+-			else
+-				printk(KERN_CONT "(??? %d)", status);
+-			printk(KERN_CONT "\n");
+-		}
+-
+-		if (status != SDVO_CMD_STATUS_PENDING)
+-			return status;
+-
+-		mdelay(50);
+-	}
+-
+-	return status;
+-}
+-
+-int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+-{
+-	if (mode->clock >= 100000)
+-		return 1;
+-	else if (mode->clock >= 50000)
+-		return 2;
+-	else
+-		return 4;
+-}
+-
+-/**
+- * Don't check status code from this as it switches the bus back to the
+- * SDVO chips which defeats the purpose of doing a bus switch in the first
+- * place.
+- */
+-void psb_intel_sdvo_set_control_bus_switch(
+-				struct psb_intel_output *psb_intel_output,
+-				u8 target)
+-{
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-				 SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+-				 &target,
+-				 1);
+-}
+-
+-static bool psb_intel_sdvo_set_target_input(
+-				struct psb_intel_output *psb_intel_output,
+-				bool target_0, bool target_1)
+-{
+-	struct psb_intel_sdvo_set_target_input_args targets = { 0 };
+-	u8 status;
+-
+-	if (target_0 && target_1)
+-		return SDVO_CMD_STATUS_NOTSUPP;
+-
+-	if (target_1)
+-		targets.target_1 = 1;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
+-			     &targets, sizeof(targets));
+-
+-	status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
+-
+-	return status == SDVO_CMD_STATUS_SUCCESS;
+-}
+-
+-/**
+- * Return whether each input is trained.
+- *
+- * This function is making an assumption about the layout of the response,
+- * which should be checked against the docs.
+- */
+-static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
+-					  *psb_intel_output, bool *input_1,
+-					  bool *input_2)
+-{
+-	struct psb_intel_sdvo_get_trained_inputs_response response;
+-	u8 status;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
+-			     NULL, 0);
+-	status =
+-	    psb_intel_sdvo_read_response(psb_intel_output, &response,
+-				     sizeof(response));
+-	if (status != SDVO_CMD_STATUS_SUCCESS)
+-		return false;
+-
+-	*input_1 = response.input0_trained;
+-	*input_2 = response.input1_trained;
+-	return true;
+-}
+-
+-static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
+-					  *psb_intel_output, u16 *outputs)
+-{
+-	u8 status;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
+-			     NULL, 0);
+-	status =
+-	    psb_intel_sdvo_read_response(psb_intel_output, outputs,
+-				     sizeof(*outputs));
+-
+-	return status == SDVO_CMD_STATUS_SUCCESS;
+-}
+-
+-static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
+-					  *psb_intel_output, u16 outputs)
+-{
+-	u8 status;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
+-			     &outputs, sizeof(outputs));
+-	status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
+-	return status == SDVO_CMD_STATUS_SUCCESS;
+-}
+-
+-static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
+-					       *psb_intel_output, int mode)
+-{
+-	u8 status, state = SDVO_ENCODER_STATE_ON;
+-
+-	switch (mode) {
+-	case DRM_MODE_DPMS_ON:
+-		state = SDVO_ENCODER_STATE_ON;
+-		break;
+-	case DRM_MODE_DPMS_STANDBY:
+-		state = SDVO_ENCODER_STATE_STANDBY;
+-		break;
+-	case DRM_MODE_DPMS_SUSPEND:
+-		state = SDVO_ENCODER_STATE_SUSPEND;
+-		break;
+-	case DRM_MODE_DPMS_OFF:
+-		state = SDVO_ENCODER_STATE_OFF;
+-		break;
+-	}
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-			     SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
+-			     sizeof(state));
+-	status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
+-
+-	return status == SDVO_CMD_STATUS_SUCCESS;
+-}
+-
+-static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
+-						   *psb_intel_output,
+-						   int *clock_min,
+-						   int *clock_max)
+-{
+-	struct psb_intel_sdvo_pixel_clock_range clocks;
+-	u8 status;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-			     SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
+-			     0);
+-
+-	status =
+-	    psb_intel_sdvo_read_response(psb_intel_output, &clocks,
+-				     sizeof(clocks));
+-
+-	if (status != SDVO_CMD_STATUS_SUCCESS)
+-		return false;
+-
+-	/* Convert the values from units of 10 kHz to kHz. */
+-	*clock_min = clocks.min * 10;
+-	*clock_max = clocks.max * 10;
+-
+-	return true;
+-}
+-
+-static bool psb_intel_sdvo_set_target_output(
+-				struct psb_intel_output *psb_intel_output,
+-				u16 outputs)
+-{
+-	u8 status;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
+-			     &outputs, sizeof(outputs));
+-
+-	status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
+-	return status == SDVO_CMD_STATUS_SUCCESS;
+-}
+-
+-static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
+-				  u8 cmd, struct psb_intel_sdvo_dtd *dtd)
+-{
+-	u8 status;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
+-	status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
+-					  sizeof(dtd->part1));
+-	if (status != SDVO_CMD_STATUS_SUCCESS)
+-		return false;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
+-	status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
+-					  sizeof(dtd->part2));
+-	if (status != SDVO_CMD_STATUS_SUCCESS)
+-		return false;
+-
+-	return true;
+-}
+-
+-static bool psb_intel_sdvo_get_input_timing(
+-				struct psb_intel_output *psb_intel_output,
+-				struct psb_intel_sdvo_dtd *dtd)
+-{
+-	return psb_intel_sdvo_get_timing(psb_intel_output,
+-				     SDVO_CMD_GET_INPUT_TIMINGS_PART1,
+-				     dtd);
+-}
+-
+-static bool psb_intel_sdvo_set_timing(
+-				struct psb_intel_output *psb_intel_output,
+-				u8 cmd,
+-				struct psb_intel_sdvo_dtd *dtd)
+-{
+-	u8 status;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
+-			     sizeof(dtd->part1));
+-	status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
+-	if (status != SDVO_CMD_STATUS_SUCCESS)
+-		return false;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
+-			     sizeof(dtd->part2));
+-	status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
+-	if (status != SDVO_CMD_STATUS_SUCCESS)
+-		return false;
+-
+-	return true;
+-}
+-
+-static bool psb_intel_sdvo_set_input_timing(
+-				struct psb_intel_output *psb_intel_output,
+-				struct psb_intel_sdvo_dtd *dtd)
+-{
+-	return psb_intel_sdvo_set_timing(psb_intel_output,
+-				     SDVO_CMD_SET_INPUT_TIMINGS_PART1,
+-				     dtd);
+-}
+-
+-static bool psb_intel_sdvo_set_output_timing(
+-				struct psb_intel_output *psb_intel_output,
+-				struct psb_intel_sdvo_dtd *dtd)
+-{
+-	return psb_intel_sdvo_set_timing(psb_intel_output,
+-				     SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
+-				     dtd);
+-}
+-
+-static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
+-						*psb_intel_output)
+-{
+-	u8 response, status;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-				 SDVO_CMD_GET_CLOCK_RATE_MULT,
+-				 NULL,
+-				 0);
+-
+-	status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
+-
+-	if (status != SDVO_CMD_STATUS_SUCCESS) {
+-		DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
+-		return SDVO_CLOCK_RATE_MULT_1X;
+-	} else {
+-		DRM_DEBUG("Current clock rate multiplier: %d\n", response);
+-	}
+-
+-	return response;
+-}
+-
+-static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
+-						*psb_intel_output, u8 val)
+-{
+-	u8 status;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-				SDVO_CMD_SET_CLOCK_RATE_MULT,
+-				&val,
+-				1);
+-
+-	status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
+-	if (status != SDVO_CMD_STATUS_SUCCESS)
+-		return false;
+-
+-	return true;
+-}
+-
+-static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output *output,
+-					  u32 in0outputmask,
+-					  u32 in1outputmask)
+-{
+-	u8 byArgs[4];
+-	u8 status;
+-	int i;
+-	struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
+-
+-	/* Make all fields of the  args/ret to zero */
+-	memset(byArgs, 0, sizeof(byArgs));
+-
+-	/* Fill up the argument values; */
+-	byArgs[0] = (u8) (in0outputmask & 0xFF);
+-	byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
+-	byArgs[2] = (u8) (in1outputmask & 0xFF);
+-	byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
+-
+-
+-	/*save inoutmap arg here*/
+-	for (i = 0; i < 4; i++)
+-		sdvo_priv->in_out_map[i] = byArgs[0];
+-
+-	psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
+-	status = psb_intel_sdvo_read_response(output, NULL, 0);
+-
+-	if (status != SDVO_CMD_STATUS_SUCCESS)
+-		return false;
+-	return true;
+-}
+-
+-
+-static void psb_intel_sdvo_set_iomap(struct psb_intel_output *output)
+-{
+-	u32 dwCurrentSDVOIn0 = 0;
+-	u32 dwCurrentSDVOIn1 = 0;
+-	u32 dwDevMask = 0;
+-
+-
+-	struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
+-
+-	/* Please DO NOT change the following code. */
+-	/* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
+-	/* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
+-	if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) {
+-		switch (sdvo_priv->active_device) {
+-		case SDVO_DEVICE_LVDS:
+-			dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
+-			break;
+-		case SDVO_DEVICE_TMDS:
+-			dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
+-			break;
+-		case SDVO_DEVICE_TV:
+-			dwDevMask =
+-			SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
+-			SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
+-			SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
+-			SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
+-			break;
+-		case SDVO_DEVICE_CRT:
+-			dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
+-			break;
+-		}
+-		dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
+-	} else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) {
+-		switch (sdvo_priv->active_device) {
+-		case SDVO_DEVICE_LVDS:
+-			dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
+-			break;
+-		case SDVO_DEVICE_TMDS:
+-			dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
+-			break;
+-		case SDVO_DEVICE_TV:
+-			dwDevMask =
+-			SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
+-			SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
+-			SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
+-			SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
+-			break;
+-		case SDVO_DEVICE_CRT:
+-			dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
+-			break;
+-		}
+-		dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
+-	}
+-
+-	psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
+-					  dwCurrentSDVOIn1);
+-}
+-
+-
+-static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+-				  struct drm_display_mode *mode,
+-				  struct drm_display_mode *adjusted_mode)
+-{
+-	/* Make the CRTC code factor in the SDVO pixel multiplier.  The SDVO
+-	 * device will be told of the multiplier during mode_set.
+-	 */
+-	adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
+-	return true;
+-}
+-
+-static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
+-				struct drm_display_mode *mode,
+-				struct drm_display_mode *adjusted_mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct drm_crtc *crtc = encoder->crtc;
+-	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+-	struct psb_intel_output *psb_intel_output =
+-					enc_to_psb_intel_output(encoder);
+-	struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
+-	u16 width, height;
+-	u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+-	u16 h_sync_offset, v_sync_offset;
+-	u32 sdvox;
+-	struct psb_intel_sdvo_dtd output_dtd;
+-	int sdvo_pixel_multiply;
+-
+-	if (!mode)
+-		return;
+-
+-	psb_intel_sdvo_set_target_output(psb_intel_output, 0);
+-
+-	width = mode->crtc_hdisplay;
+-	height = mode->crtc_vdisplay;
+-
+-	/* do some mode translations */
+-	h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+-	h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+-
+-	v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+-	v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+-
+-	h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+-	v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+-
+-	output_dtd.part1.clock = mode->clock / 10;
+-	output_dtd.part1.h_active = width & 0xff;
+-	output_dtd.part1.h_blank = h_blank_len & 0xff;
+-	output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
+-	    ((h_blank_len >> 8) & 0xf);
+-	output_dtd.part1.v_active = height & 0xff;
+-	output_dtd.part1.v_blank = v_blank_len & 0xff;
+-	output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
+-	    ((v_blank_len >> 8) & 0xf);
+-
+-	output_dtd.part2.h_sync_off = h_sync_offset;
+-	output_dtd.part2.h_sync_width = h_sync_len & 0xff;
+-	output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+-	    (v_sync_len & 0xf);
+-	output_dtd.part2.sync_off_width_high =
+-	    ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
+-	    ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
+-
+-	output_dtd.part2.dtd_flags = 0x18;
+-	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+-		output_dtd.part2.dtd_flags |= 0x2;
+-	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+-		output_dtd.part2.dtd_flags |= 0x4;
+-
+-	output_dtd.part2.sdvo_flags = 0;
+-	output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
+-	output_dtd.part2.reserved = 0;
+-
+-	/* Set the output timing to the screen */
+-	psb_intel_sdvo_set_target_output(psb_intel_output,
+-				     sdvo_priv->active_outputs);
+-
+-	/* Set the input timing to the screen. Assume always input 0. */
+-	psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
+-
+-	psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
+-
+-	/* We would like to use i830_sdvo_create_preferred_input_timing() to
+-	 * provide the device with a timing it can support, if it supports that
+-	 * feature.  However, presumably we would need to adjust the CRTC to
+-	 * output the preferred timing, and we don't support that currently.
+-	 */
+-	psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
+-
+-	switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
+-	case 1:
+-		psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
+-					       SDVO_CLOCK_RATE_MULT_1X);
+-		break;
+-	case 2:
+-		psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
+-					       SDVO_CLOCK_RATE_MULT_2X);
+-		break;
+-	case 4:
+-		psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
+-					       SDVO_CLOCK_RATE_MULT_4X);
+-		break;
+-	}
+-
+-	/* Set the SDVO control regs. */
+-	sdvox = REG_READ(sdvo_priv->output_device);
+-	switch (sdvo_priv->output_device) {
+-	case SDVOB:
+-		sdvox &= SDVOB_PRESERVE_MASK;
+-		break;
+-	case SDVOC:
+-		sdvox &= SDVOC_PRESERVE_MASK;
+-		break;
+-	}
+-	sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+-	if (psb_intel_crtc->pipe == 1)
+-		sdvox |= SDVO_PIPE_B_SELECT;
+-
+-	sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
+-
+-	psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
+-
+-	 psb_intel_sdvo_set_iomap(psb_intel_output);
+-}
+-
+-static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct psb_intel_output *psb_intel_output =
+-					enc_to_psb_intel_output(encoder);
+-	struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
+-	u32 temp;
+-
+-	if (mode != DRM_MODE_DPMS_ON) {
+-		psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
+-		if (0)
+-			psb_intel_sdvo_set_encoder_power_state(
+-							psb_intel_output,
+-							mode);
+-
+-		if (mode == DRM_MODE_DPMS_OFF) {
+-			temp = REG_READ(sdvo_priv->output_device);
+-			if ((temp & SDVO_ENABLE) != 0) {
+-				psb_intel_sdvo_write_sdvox(psb_intel_output,
+-						       temp &
+-						       ~SDVO_ENABLE);
+-			}
+-		}
+-	} else {
+-		bool input1, input2;
+-		int i;
+-		u8 status;
+-
+-		temp = REG_READ(sdvo_priv->output_device);
+-		if ((temp & SDVO_ENABLE) == 0)
+-			psb_intel_sdvo_write_sdvox(psb_intel_output,
+-					       temp | SDVO_ENABLE);
+-		for (i = 0; i < 2; i++)
+-			psb_intel_wait_for_vblank(dev);
+-
+-		status =
+-		    psb_intel_sdvo_get_trained_inputs(psb_intel_output,
+-							&input1,
+-							&input2);
+-
+-
+-		/* Warn if the device reported failure to sync.
+-		 * A lot of SDVO devices fail to notify of sync, but it's
+-		 * a given it the status is a success, we succeeded.
+-		 */
+-		if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+-			DRM_DEBUG
+-			    ("First %s output reported failure to sync\n",
+-			     SDVO_NAME(sdvo_priv));
+-		}
+-
+-		if (0)
+-			psb_intel_sdvo_set_encoder_power_state(
+-							psb_intel_output,
+-							mode);
+-		psb_intel_sdvo_set_active_outputs(psb_intel_output,
+-					      sdvo_priv->active_outputs);
+-	}
+-	return;
+-}
+-
+-static void psb_intel_sdvo_save(struct drm_connector *connector)
+-{
+-	struct drm_device *dev = connector->dev;
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-	struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
+-	/*int o;*/
+-
+-	sdvo_priv->save_sdvo_mult =
+-	    psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
+-	psb_intel_sdvo_get_active_outputs(psb_intel_output,
+-				      &sdvo_priv->save_active_outputs);
+-
+-	if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
+-		psb_intel_sdvo_set_target_input(psb_intel_output,
+-						true,
+-						false);
+-		psb_intel_sdvo_get_input_timing(psb_intel_output,
+-					    &sdvo_priv->save_input_dtd_1);
+-	}
+-
+-	if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
+-		psb_intel_sdvo_set_target_input(psb_intel_output,
+-						false,
+-						true);
+-		psb_intel_sdvo_get_input_timing(psb_intel_output,
+-					    &sdvo_priv->save_input_dtd_2);
+-	}
+-	sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
+-
+-	/*TODO: save the in_out_map state*/
+-}
+-
+-static void psb_intel_sdvo_restore(struct drm_connector *connector)
+-{
+-	struct drm_device *dev = connector->dev;
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-	struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
+-	/*int o;*/
+-	int i;
+-	bool input1, input2;
+-	u8 status;
+-
+-	psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
+-
+-	if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
+-		psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
+-		psb_intel_sdvo_set_input_timing(psb_intel_output,
+-					    &sdvo_priv->save_input_dtd_1);
+-	}
+-
+-	if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
+-		psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
+-		psb_intel_sdvo_set_input_timing(psb_intel_output,
+-					    &sdvo_priv->save_input_dtd_2);
+-	}
+-
+-	psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
+-				       sdvo_priv->save_sdvo_mult);
+-
+-	REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
+-
+-	if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
+-		for (i = 0; i < 2; i++)
+-			psb_intel_wait_for_vblank(dev);
+-		status =
+-		    psb_intel_sdvo_get_trained_inputs(psb_intel_output,
+-							&input1,
+-							&input2);
+-		if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
+-			DRM_DEBUG
+-			    ("First %s output reported failure to sync\n",
+-			     SDVO_NAME(sdvo_priv));
+-	}
+-
+-	psb_intel_sdvo_set_active_outputs(psb_intel_output,
+-				      sdvo_priv->save_active_outputs);
+-
+-	/*TODO: restore in_out_map*/
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-				 SDVO_CMD_SET_IN_OUT_MAP,
+-				 sdvo_priv->in_out_map,
+-				 4);
+-
+-	psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
+-}
+-
+-static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+-				 struct drm_display_mode *mode)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-				to_psb_intel_output(connector);
+-	struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
+-
+-	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+-		return MODE_NO_DBLESCAN;
+-
+-	if (sdvo_priv->pixel_clock_min > mode->clock)
+-		return MODE_CLOCK_LOW;
+-
+-	if (sdvo_priv->pixel_clock_max < mode->clock)
+-		return MODE_CLOCK_HIGH;
+-
+-	return MODE_OK;
+-}
+-
+-static bool psb_intel_sdvo_get_capabilities(
+-				struct psb_intel_output *psb_intel_output,
+-				struct psb_intel_sdvo_caps *caps)
+-{
+-	u8 status;
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-				 SDVO_CMD_GET_DEVICE_CAPS,
+-				 NULL,
+-				 0);
+-	status = psb_intel_sdvo_read_response(psb_intel_output,
+-						caps,
+-						sizeof(*caps));
+-	if (status != SDVO_CMD_STATUS_SUCCESS)
+-		return false;
+-
+-	return true;
+-}
+-
+-struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
+-{
+-	struct drm_connector *connector = NULL;
+-	struct psb_intel_output *iout = NULL;
+-	struct psb_intel_sdvo_priv *sdvo;
+-
+-	/* find the sdvo connector */
+-	list_for_each_entry(connector, &dev->mode_config.connector_list,
+-			    head) {
+-		iout = to_psb_intel_output(connector);
+-
+-		if (iout->type != INTEL_OUTPUT_SDVO)
+-			continue;
+-
+-		sdvo = iout->dev_priv;
+-
+-		if (sdvo->output_device == SDVOB && sdvoB)
+-			return connector;
+-
+-		if (sdvo->output_device == SDVOC && !sdvoB)
+-			return connector;
+-
+-	}
+-
+-	return NULL;
+-}
+-
+-int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
+-{
+-	u8 response[2];
+-	u8 status;
+-	struct psb_intel_output *psb_intel_output;
+-
+-	if (!connector)
+-		return 0;
+-
+-	psb_intel_output = to_psb_intel_output(connector);
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-				 SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+-				 NULL,
+-				 0);
+-	status = psb_intel_sdvo_read_response(psb_intel_output,
+-						&response,
+-						2);
+-
+-	if (response[0] != 0)
+-		return 1;
+-
+-	return 0;
+-}
+-
+-void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+-{
+-	u8 response[2];
+-	u8 status;
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-				 SDVO_CMD_GET_ACTIVE_HOT_PLUG,
+-				 NULL,
+-				 0);
+-	psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
+-
+-	if (on) {
+-		psb_intel_sdvo_write_cmd(psb_intel_output,
+-				     SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
+-				     0);
+-		status = psb_intel_sdvo_read_response(psb_intel_output,
+-						      &response,
+-						      2);
+-
+-		psb_intel_sdvo_write_cmd(psb_intel_output,
+-				     SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+-				     &response, 2);
+-	} else {
+-		response[0] = 0;
+-		response[1] = 0;
+-		psb_intel_sdvo_write_cmd(psb_intel_output,
+-				     SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+-				     &response, 2);
+-	}
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-				 SDVO_CMD_GET_ACTIVE_HOT_PLUG,
+-				 NULL,
+-				 0);
+-	psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
+-}
+-
+-static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
+-						   *connector, bool force)
+-{
+-	u8 response[2];
+-	u8 status;
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-
+-	psb_intel_sdvo_write_cmd(psb_intel_output,
+-				 SDVO_CMD_GET_ATTACHED_DISPLAYS,
+-				 NULL,
+-				 0);
+-	status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
+-
+-	DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
+-	if ((response[0] != 0) || (response[1] != 0))
+-		return connector_status_connected;
+-	else
+-		return connector_status_disconnected;
+-}
+-
+-static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-					to_psb_intel_output(connector);
+-
+-	/* set the bus switch and get the modes */
+-	psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
+-					  SDVO_CONTROL_BUS_DDC2);
+-	psb_intel_ddc_get_modes(psb_intel_output);
+-
+-	if (list_empty(&connector->probed_modes))
+-		return 0;
+-	return 1;
+-}
+-
+-static void psb_intel_sdvo_destroy(struct drm_connector *connector)
+-{
+-	struct psb_intel_output *psb_intel_output =
+-				to_psb_intel_output(connector);
+-
+-	if (psb_intel_output->i2c_bus)
+-		psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
+-	drm_sysfs_connector_remove(connector);
+-	drm_connector_cleanup(connector);
+-	kfree(psb_intel_output);
+-}
+-
+-static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
+-	.dpms = psb_intel_sdvo_dpms,
+-	.mode_fixup = psb_intel_sdvo_mode_fixup,
+-	.prepare = psb_intel_encoder_prepare,
+-	.mode_set = psb_intel_sdvo_mode_set,
+-	.commit = psb_intel_encoder_commit,
+-};
+-
+-static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
+-	.dpms = drm_helper_connector_dpms,
+-	.save = psb_intel_sdvo_save,
+-	.restore = psb_intel_sdvo_restore,
+-	.detect = psb_intel_sdvo_detect,
+-	.fill_modes = drm_helper_probe_single_connector_modes,
+-	.destroy = psb_intel_sdvo_destroy,
+-};
+-
+-static const struct drm_connector_helper_funcs
+-				psb_intel_sdvo_connector_helper_funcs = {
+-	.get_modes = psb_intel_sdvo_get_modes,
+-	.mode_valid = psb_intel_sdvo_mode_valid,
+-	.best_encoder = psb_intel_best_encoder,
+-};
+-
+-void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+-{
+-	drm_encoder_cleanup(encoder);
+-}
+-
+-static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
+-	.destroy = psb_intel_sdvo_enc_destroy,
+-};
+-
+-
+-void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
+-{
+-	struct drm_connector *connector;
+-	struct psb_intel_output *psb_intel_output;
+-	struct psb_intel_sdvo_priv *sdvo_priv;
+-	struct psb_intel_i2c_chan *i2cbus = NULL;
+-	int connector_type;
+-	u8 ch[0x40];
+-	int i;
+-	int encoder_type, output_id;
+-
+-	psb_intel_output =
+-	    kcalloc(sizeof(struct psb_intel_output) +
+-		    sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
+-	if (!psb_intel_output)
+-		return;
+-
+-	connector = &psb_intel_output->base;
+-
+-	drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
+-			   DRM_MODE_CONNECTOR_Unknown);
+-	drm_connector_helper_add(connector,
+-				 &psb_intel_sdvo_connector_helper_funcs);
+-	sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
+-	psb_intel_output->type = INTEL_OUTPUT_SDVO;
+-
+-	connector->interlace_allowed = 0;
+-	connector->doublescan_allowed = 0;
+-
+-	/* setup the DDC bus. */
+-	if (output_device == SDVOB)
+-		i2cbus =
+-		    psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
+-	else
+-		i2cbus =
+-		    psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
+-
+-	if (!i2cbus)
+-		goto err_connector;
+-
+-	sdvo_priv->i2c_bus = i2cbus;
+-
+-	if (output_device == SDVOB) {
+-		output_id = 1;
+-		sdvo_priv->by_input_wiring = SDVOB_IN0;
+-		sdvo_priv->i2c_bus->slave_addr = 0x38;
+-	} else {
+-		output_id = 2;
+-		sdvo_priv->i2c_bus->slave_addr = 0x39;
+-	}
+-
+-	sdvo_priv->output_device = output_device;
+-	psb_intel_output->i2c_bus = i2cbus;
+-	psb_intel_output->dev_priv = sdvo_priv;
+-
+-
+-	/* Read the regs to test if we can talk to the device */
+-	for (i = 0; i < 0x40; i++) {
+-		if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
+-			dev_dbg(dev->dev, "No SDVO device found on SDVO%c\n",
+-				  output_device == SDVOB ? 'B' : 'C');
+-			goto err_i2c;
+-		}
+-	}
+-
+-	psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
+-
+-	memset(&sdvo_priv->active_outputs, 0,
+-	       sizeof(sdvo_priv->active_outputs));
+-
+-	/* TODO, CVBS, SVID, YPRPB & SCART outputs. */
+-	if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
+-		sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
+-		sdvo_priv->active_device = SDVO_DEVICE_CRT;
+-		connector->display_info.subpixel_order =
+-		    SubPixelHorizontalRGB;
+-		encoder_type = DRM_MODE_ENCODER_DAC;
+-		connector_type = DRM_MODE_CONNECTOR_VGA;
+-	} else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
+-		sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
+-		sdvo_priv->active_outputs = SDVO_DEVICE_CRT;
+-		connector->display_info.subpixel_order =
+-		    SubPixelHorizontalRGB;
+-		encoder_type = DRM_MODE_ENCODER_DAC;
+-		connector_type = DRM_MODE_CONNECTOR_VGA;
+-	} else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
+-		sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
+-		sdvo_priv->active_device = SDVO_DEVICE_TMDS;
+-		connector->display_info.subpixel_order =
+-		    SubPixelHorizontalRGB;
+-		encoder_type = DRM_MODE_ENCODER_TMDS;
+-		connector_type = DRM_MODE_CONNECTOR_DVID;
+-	} else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
+-		sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
+-		sdvo_priv->active_device = SDVO_DEVICE_TMDS;
+-		connector->display_info.subpixel_order =
+-		    SubPixelHorizontalRGB;
+-		encoder_type = DRM_MODE_ENCODER_TMDS;
+-		connector_type = DRM_MODE_CONNECTOR_DVID;
+-	} else {
+-		unsigned char bytes[2];
+-
+-		memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
+-		dev_dbg(dev->dev, "%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
+-		     SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
+-		goto err_i2c;
+-	}
+-
+-	drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
+-			 encoder_type);
+-	drm_encoder_helper_add(&psb_intel_output->enc,
+-			       &psb_intel_sdvo_helper_funcs);
+-	connector->connector_type = connector_type;
+-
+-	drm_mode_connector_attach_encoder(&psb_intel_output->base,
+-					  &psb_intel_output->enc);
+-	drm_sysfs_connector_add(connector);
+-
+-	/* Set the input timing to the screen. Assume always input 0. */
+-	psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
+-
+-	psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
+-					       &sdvo_priv->pixel_clock_min,
+-					       &sdvo_priv->
+-					       pixel_clock_max);
+-
+-
+-	dev_dbg(dev->dev, "%s device VID/DID: %02X:%02X.%02X, "
+-		  "clock range %dMHz - %dMHz, "
+-		  "input 1: %c, input 2: %c, "
+-		  "output 1: %c, output 2: %c\n",
+-		  SDVO_NAME(sdvo_priv),
+-		  sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
+-		  sdvo_priv->caps.device_rev_id,
+-		  sdvo_priv->pixel_clock_min / 1000,
+-		  sdvo_priv->pixel_clock_max / 1000,
+-		  (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+-		  (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+-		  /* check currently supported outputs */
+-		  sdvo_priv->caps.output_flags &
+-		  (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+-		  sdvo_priv->caps.output_flags &
+-		  (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+-
+-	psb_intel_output->ddc_bus = i2cbus;
+-
+-	return;
+-
+-err_i2c:
+-	psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
+-err_connector:
+-	drm_connector_cleanup(connector);
+-	kfree(psb_intel_output);
+-
+-	return;
+-}
+diff --git a/drivers/staging/gma500/psb_intel_sdvo_regs.h b/drivers/staging/gma500/psb_intel_sdvo_regs.h
+deleted file mode 100644
+index 96862ea..0000000
+--- a/drivers/staging/gma500/psb_intel_sdvo_regs.h
++++ /dev/null
+@@ -1,338 +0,0 @@
+-/*
+- * SDVO command definitions and structures.
+- *
+- * Copyright (c) 2008, Intel Corporation
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *	Eric Anholt <eric at anholt.net>
+- */
+-
+-#define SDVO_OUTPUT_FIRST   (0)
+-#define SDVO_OUTPUT_TMDS0   (1 << 0)
+-#define SDVO_OUTPUT_RGB0    (1 << 1)
+-#define SDVO_OUTPUT_CVBS0   (1 << 2)
+-#define SDVO_OUTPUT_SVID0   (1 << 3)
+-#define SDVO_OUTPUT_YPRPB0  (1 << 4)
+-#define SDVO_OUTPUT_SCART0  (1 << 5)
+-#define SDVO_OUTPUT_LVDS0   (1 << 6)
+-#define SDVO_OUTPUT_TMDS1   (1 << 8)
+-#define SDVO_OUTPUT_RGB1    (1 << 9)
+-#define SDVO_OUTPUT_CVBS1   (1 << 10)
+-#define SDVO_OUTPUT_SVID1   (1 << 11)
+-#define SDVO_OUTPUT_YPRPB1  (1 << 12)
+-#define SDVO_OUTPUT_SCART1  (1 << 13)
+-#define SDVO_OUTPUT_LVDS1   (1 << 14)
+-#define SDVO_OUTPUT_LAST    (14)
+-
+-struct psb_intel_sdvo_caps {
+-	u8 vendor_id;
+-	u8 device_id;
+-	u8 device_rev_id;
+-	u8 sdvo_version_major;
+-	u8 sdvo_version_minor;
+-	unsigned int sdvo_inputs_mask:2;
+-	unsigned int smooth_scaling:1;
+-	unsigned int sharp_scaling:1;
+-	unsigned int up_scaling:1;
+-	unsigned int down_scaling:1;
+-	unsigned int stall_support:1;
+-	unsigned int pad:1;
+-	u16 output_flags;
+-} __packed;
+-
+-/** This matches the EDID DTD structure, more or less */
+-struct psb_intel_sdvo_dtd {
+-	struct {
+-		u16 clock;	/**< pixel clock, in 10kHz units */
+-		u8 h_active;	/**< lower 8 bits (pixels) */
+-		u8 h_blank;	/**< lower 8 bits (pixels) */
+-		u8 h_high;	/**< upper 4 bits each h_active, h_blank */
+-		u8 v_active;	/**< lower 8 bits (lines) */
+-		u8 v_blank;	/**< lower 8 bits (lines) */
+-		u8 v_high;	/**< upper 4 bits each v_active, v_blank */
+-	} part1;
+-
+-	struct {
+-		u8 h_sync_off;
+-			/**< lower 8 bits, from hblank start */
+-		u8 h_sync_width;/**< lower 8 bits (pixels) */
+-	/** lower 4 bits each vsync offset, vsync width */
+-		u8 v_sync_off_width;
+-	/**
+-	 * 2 high bits of hsync offset, 2 high bits of hsync width,
+-	 * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+-	 */
+-		u8 sync_off_width_high;
+-		u8 dtd_flags;
+-		u8 sdvo_flags;
+-	/** bits 6-7 of vsync offset at bits 6-7 */
+-		u8 v_sync_off_high;
+-		u8 reserved;
+-	} part2;
+-} __packed;
+-
+-struct psb_intel_sdvo_pixel_clock_range {
+-	u16 min;		/**< pixel clock, in 10kHz units */
+-	u16 max;		/**< pixel clock, in 10kHz units */
+-} __packed;
+-
+-struct psb_intel_sdvo_preferred_input_timing_args {
+-	u16 clock;
+-	u16 width;
+-	u16 height;
+-} __packed;
+-
+-/* I2C registers for SDVO */
+-#define SDVO_I2C_ARG_0				0x07
+-#define SDVO_I2C_ARG_1				0x06
+-#define SDVO_I2C_ARG_2				0x05
+-#define SDVO_I2C_ARG_3				0x04
+-#define SDVO_I2C_ARG_4				0x03
+-#define SDVO_I2C_ARG_5				0x02
+-#define SDVO_I2C_ARG_6				0x01
+-#define SDVO_I2C_ARG_7				0x00
+-#define SDVO_I2C_OPCODE				0x08
+-#define SDVO_I2C_CMD_STATUS			0x09
+-#define SDVO_I2C_RETURN_0			0x0a
+-#define SDVO_I2C_RETURN_1			0x0b
+-#define SDVO_I2C_RETURN_2			0x0c
+-#define SDVO_I2C_RETURN_3			0x0d
+-#define SDVO_I2C_RETURN_4			0x0e
+-#define SDVO_I2C_RETURN_5			0x0f
+-#define SDVO_I2C_RETURN_6			0x10
+-#define SDVO_I2C_RETURN_7			0x11
+-#define SDVO_I2C_VENDOR_BEGIN			0x20
+-
+-/* Status results */
+-#define SDVO_CMD_STATUS_POWER_ON		0x0
+-#define SDVO_CMD_STATUS_SUCCESS			0x1
+-#define SDVO_CMD_STATUS_NOTSUPP			0x2
+-#define SDVO_CMD_STATUS_INVALID_ARG		0x3
+-#define SDVO_CMD_STATUS_PENDING			0x4
+-#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED	0x5
+-#define SDVO_CMD_STATUS_SCALING_NOT_SUPP	0x6
+-
+-/* SDVO commands, argument/result registers */
+-
+-#define SDVO_CMD_RESET					0x01
+-
+-/** Returns a struct psb_intel_sdvo_caps */
+-#define SDVO_CMD_GET_DEVICE_CAPS			0x02
+-
+-#define SDVO_CMD_GET_FIRMWARE_REV			0x86
+-# define SDVO_DEVICE_FIRMWARE_MINOR			SDVO_I2C_RETURN_0
+-# define SDVO_DEVICE_FIRMWARE_MAJOR			SDVO_I2C_RETURN_1
+-# define SDVO_DEVICE_FIRMWARE_PATCH			SDVO_I2C_RETURN_2
+-
+-/**
+- * Reports which inputs are trained (managed to sync).
+- *
+- * Devices must have trained within 2 vsyncs of a mode change.
+- */
+-#define SDVO_CMD_GET_TRAINED_INPUTS			0x03
+-struct psb_intel_sdvo_get_trained_inputs_response {
+-	unsigned int input0_trained:1;
+-	unsigned int input1_trained:1;
+-	unsigned int pad:6;
+-} __packed;
+-
+-/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
+-#define SDVO_CMD_GET_ACTIVE_OUTPUTS			0x04
+-
+-/**
+- * Sets the current set of active outputs.
+- *
+- * Takes a struct psb_intel_sdvo_output_flags.
+- * Must be preceded by a SET_IN_OUT_MAP
+- * on multi-output devices.
+- */
+-#define SDVO_CMD_SET_ACTIVE_OUTPUTS			0x05
+-
+-/**
+- * Returns the current mapping of SDVO inputs to outputs on the device.
+- *
+- * Returns two struct psb_intel_sdvo_output_flags structures.
+- */
+-#define SDVO_CMD_GET_IN_OUT_MAP				0x06
+-
+-/**
+- * Sets the current mapping of SDVO inputs to outputs on the device.
+- *
+- * Takes two struct i380_sdvo_output_flags structures.
+- */
+-#define SDVO_CMD_SET_IN_OUT_MAP				0x07
+-
+-/**
+- * Returns a struct psb_intel_sdvo_output_flags of attached displays.
+- */
+-#define SDVO_CMD_GET_ATTACHED_DISPLAYS			0x0b
+-
+-/**
+- * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
+- */
+-#define SDVO_CMD_GET_HOT_PLUG_SUPPORT			0x0c
+-
+-/**
+- * Takes a struct psb_intel_sdvo_output_flags.
+- */
+-#define SDVO_CMD_SET_ACTIVE_HOT_PLUG			0x0d
+-
+-/**
+- * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
+- * interrupts enabled.
+- */
+-#define SDVO_CMD_GET_ACTIVE_HOT_PLUG			0x0e
+-
+-#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE		0x0f
+-struct psb_intel_sdvo_get_interrupt_event_source_response {
+-	u16 interrupt_status;
+-	unsigned int ambient_light_interrupt:1;
+-	unsigned int pad:7;
+-} __packed;
+-
+-/**
+- * Selects which input is affected by future input commands.
+- *
+- * Commands affected include SET_INPUT_TIMINGS_PART[12],
+- * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+- * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+- */
+-#define SDVO_CMD_SET_TARGET_INPUT			0x10
+-struct psb_intel_sdvo_set_target_input_args {
+-	unsigned int target_1:1;
+-	unsigned int pad:7;
+-} __packed;
+-
+-/**
+- * Takes a struct psb_intel_sdvo_output_flags of which outputs are targeted by
+- * future output commands.
+- *
+- * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+- * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+- */
+-#define SDVO_CMD_SET_TARGET_OUTPUT			0x11
+-
+-#define SDVO_CMD_GET_INPUT_TIMINGS_PART1		0x12
+-#define SDVO_CMD_GET_INPUT_TIMINGS_PART2		0x13
+-#define SDVO_CMD_SET_INPUT_TIMINGS_PART1		0x14
+-#define SDVO_CMD_SET_INPUT_TIMINGS_PART2		0x15
+-#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1		0x16
+-#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2		0x17
+-#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1		0x18
+-#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2		0x19
+-/* Part 1 */
+-# define SDVO_DTD_CLOCK_LOW				SDVO_I2C_ARG_0
+-# define SDVO_DTD_CLOCK_HIGH				SDVO_I2C_ARG_1
+-# define SDVO_DTD_H_ACTIVE				SDVO_I2C_ARG_2
+-# define SDVO_DTD_H_BLANK				SDVO_I2C_ARG_3
+-# define SDVO_DTD_H_HIGH				SDVO_I2C_ARG_4
+-# define SDVO_DTD_V_ACTIVE				SDVO_I2C_ARG_5
+-# define SDVO_DTD_V_BLANK				SDVO_I2C_ARG_6
+-# define SDVO_DTD_V_HIGH				SDVO_I2C_ARG_7
+-/* Part 2 */
+-# define SDVO_DTD_HSYNC_OFF				SDVO_I2C_ARG_0
+-# define SDVO_DTD_HSYNC_WIDTH				SDVO_I2C_ARG_1
+-# define SDVO_DTD_VSYNC_OFF_WIDTH			SDVO_I2C_ARG_2
+-# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH			SDVO_I2C_ARG_3
+-# define SDVO_DTD_DTD_FLAGS				SDVO_I2C_ARG_4
+-# define SDVO_DTD_DTD_FLAG_INTERLACED				(1 << 7)
+-# define SDVO_DTD_DTD_FLAG_STEREO_MASK				(3 << 5)
+-# define SDVO_DTD_DTD_FLAG_INPUT_MASK				(3 << 3)
+-# define SDVO_DTD_DTD_FLAG_SYNC_MASK				(3 << 1)
+-# define SDVO_DTD_SDVO_FLAS				SDVO_I2C_ARG_5
+-# define SDVO_DTD_SDVO_FLAG_STALL				(1 << 7)
+-# define SDVO_DTD_SDVO_FLAG_CENTERED				(0 << 6)
+-# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT				(1 << 6)
+-# define SDVO_DTD_SDVO_FLAG_SCALING_MASK			(3 << 4)
+-# define SDVO_DTD_SDVO_FLAG_SCALING_NONE			(0 << 4)
+-# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP			(1 << 4)
+-# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH			(2 << 4)
+-# define SDVO_DTD_VSYNC_OFF_HIGH			SDVO_I2C_ARG_6
+-
+-/**
+- * Generates a DTD based on the given width, height, and flags.
+- *
+- * This will be supported by any device supporting scaling or interlaced
+- * modes.
+- */
+-#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING		0x1a
+-# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW		SDVO_I2C_ARG_0
+-# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH		SDVO_I2C_ARG_1
+-# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW		SDVO_I2C_ARG_2
+-# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH		SDVO_I2C_ARG_3
+-# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW		SDVO_I2C_ARG_4
+-# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH	SDVO_I2C_ARG_5
+-# define SDVO_PREFERRED_INPUT_TIMING_FLAGS		SDVO_I2C_ARG_6
+-# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED		(1 << 0)
+-# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED		(1 << 1)
+-
+-#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1	0x1b
+-#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2	0x1c
+-
+-/** Returns a struct psb_intel_sdvo_pixel_clock_range */
+-#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE		0x1d
+-/** Returns a struct psb_intel_sdvo_pixel_clock_range */
+-#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE		0x1e
+-
+-/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+-#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS		0x1f
+-
+-/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+-#define SDVO_CMD_GET_CLOCK_RATE_MULT			0x20
+-/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+-#define SDVO_CMD_SET_CLOCK_RATE_MULT			0x21
+-# define SDVO_CLOCK_RATE_MULT_1X				(1 << 0)
+-# define SDVO_CLOCK_RATE_MULT_2X				(1 << 1)
+-# define SDVO_CLOCK_RATE_MULT_4X				(1 << 3)
+-
+-#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS		0x27
+-
+-#define SDVO_CMD_GET_TV_FORMAT				0x28
+-
+-#define SDVO_CMD_SET_TV_FORMAT				0x29
+-
+-#define SDVO_CMD_GET_SUPPORTED_POWER_STATES		0x2a
+-#define SDVO_CMD_GET_ENCODER_POWER_STATE		0x2b
+-#define SDVO_CMD_SET_ENCODER_POWER_STATE		0x2c
+-# define SDVO_ENCODER_STATE_ON					(1 << 0)
+-# define SDVO_ENCODER_STATE_STANDBY				(1 << 1)
+-# define SDVO_ENCODER_STATE_SUSPEND				(1 << 2)
+-# define SDVO_ENCODER_STATE_OFF					(1 << 3)
+-
+-#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT		0x93
+-
+-#define SDVO_CMD_SET_CONTROL_BUS_SWITCH			0x7a
+-# define SDVO_CONTROL_BUS_PROM				0x0
+-# define SDVO_CONTROL_BUS_DDC1				0x1
+-# define SDVO_CONTROL_BUS_DDC2				0x2
+-# define SDVO_CONTROL_BUS_DDC3				0x3
+-
+-/* SDVO Bus & SDVO Inputs wiring details*/
+-/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
+-/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
+-/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
+-/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
+-#define SDVOB_IN0 0x01
+-#define SDVOB_IN1 0x02
+-#define SDVOC_IN0 0x04
+-#define SDVOC_IN1 0x08
+-
+-#define SDVO_DEVICE_NONE 0x00
+-#define        SDVO_DEVICE_CRT 0x01
+-#define        SDVO_DEVICE_TV 0x02
+-#define        SDVO_DEVICE_LVDS 0x04
+-#define        SDVO_DEVICE_TMDS 0x08
+-
+diff --git a/drivers/staging/gma500/psb_irq.c b/drivers/staging/gma500/psb_irq.c
+deleted file mode 100644
+index 36dd630..0000000
+--- a/drivers/staging/gma500/psb_irq.c
++++ /dev/null
+@@ -1,627 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2007, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+- * develop this driver.
+- *
+- **************************************************************************/
+-/*
+- */
+-
+-#include <drm/drmP.h>
+-#include "psb_drv.h"
+-#include "psb_reg.h"
+-#include "psb_intel_reg.h"
+-#include "power.h"
+-#include "mdfld_output.h"
+-
+-/*
+- * inline functions
+- */
+-
+-static inline u32
+-psb_pipestat(int pipe)
+-{
+-	if (pipe == 0)
+-		return PIPEASTAT;
+-	if (pipe == 1)
+-		return PIPEBSTAT;
+-	if (pipe == 2)
+-		return PIPECSTAT;
+-	BUG();
+-}
+-
+-static inline u32
+-mid_pipe_event(int pipe)
+-{
+-	if (pipe == 0)
+-		return _PSB_PIPEA_EVENT_FLAG;
+-	if (pipe == 1)
+-		return _MDFLD_PIPEB_EVENT_FLAG;
+-	if (pipe == 2)
+-		return _MDFLD_PIPEC_EVENT_FLAG;
+-	BUG();
+-}
+-
+-static inline u32
+-mid_pipe_vsync(int pipe)
+-{
+-	if (pipe == 0)
+-		return _PSB_VSYNC_PIPEA_FLAG;
+-	if (pipe == 1)
+-		return _PSB_VSYNC_PIPEB_FLAG;
+-	if (pipe == 2)
+-		return _MDFLD_PIPEC_VBLANK_FLAG;
+-	BUG();
+-}
+-
+-static inline u32
+-mid_pipeconf(int pipe)
+-{
+-	if (pipe == 0)
+-		return PIPEACONF;
+-	if (pipe == 1)
+-		return PIPEBCONF;
+-	if (pipe == 2)
+-		return PIPECCONF;
+-	BUG();
+-}
+-
+-void
+-psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+-{
+-	if ((dev_priv->pipestat[pipe] & mask) != mask) {
+-		u32 reg = psb_pipestat(pipe);
+-		dev_priv->pipestat[pipe] |= mask;
+-		/* Enable the interrupt, clear any pending status */
+-		if (gma_power_begin(dev_priv->dev, false)) {
+-			u32 writeVal = PSB_RVDC32(reg);
+-			writeVal |= (mask | (mask >> 16));
+-			PSB_WVDC32(writeVal, reg);
+-			(void) PSB_RVDC32(reg);
+-			gma_power_end(dev_priv->dev);
+-		}
+-	}
+-}
+-
+-void
+-psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
+-{
+-	if ((dev_priv->pipestat[pipe] & mask) != 0) {
+-		u32 reg = psb_pipestat(pipe);
+-		dev_priv->pipestat[pipe] &= ~mask;
+-		if (gma_power_begin(dev_priv->dev, false)) {
+-			u32 writeVal = PSB_RVDC32(reg);
+-			writeVal &= ~mask;
+-			PSB_WVDC32(writeVal, reg);
+-			(void) PSB_RVDC32(reg);
+-			gma_power_end(dev_priv->dev);
+-		}
+-	}
+-}
+-
+-void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+-{
+-	if (gma_power_begin(dev_priv->dev, false)) {
+-		u32 pipe_event = mid_pipe_event(pipe);
+-		dev_priv->vdc_irq_mask |= pipe_event;
+-		PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+-		PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+-		gma_power_end(dev_priv->dev);
+-	}
+-}
+-
+-void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
+-{
+-	if (dev_priv->pipestat[pipe] == 0) {
+-		if (gma_power_begin(dev_priv->dev, false)) {
+-			u32 pipe_event = mid_pipe_event(pipe);
+-			dev_priv->vdc_irq_mask &= ~pipe_event;
+-			PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+-			PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+-			gma_power_end(dev_priv->dev);
+-		}
+-	}
+-}
+-
+-/**
+- * Display controller interrupt handler for pipe event.
+- *
+- */
+-static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
+-{
+-	struct drm_psb_private *dev_priv =
+-	    (struct drm_psb_private *) dev->dev_private;
+-
+-	uint32_t pipe_stat_val = 0;
+-	uint32_t pipe_stat_reg = psb_pipestat(pipe);
+-	uint32_t pipe_enable = dev_priv->pipestat[pipe];
+-	uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
+-	uint32_t pipe_clear;
+-	uint32_t i = 0;
+-
+-	spin_lock(&dev_priv->irqmask_lock);
+-
+-	pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
+-	pipe_stat_val &= pipe_enable | pipe_status;
+-	pipe_stat_val &= pipe_stat_val >> 16;
+-
+-	spin_unlock(&dev_priv->irqmask_lock);
+-
+-	/* Clear the 2nd level interrupt status bits
+-	 * Sometimes the bits are very sticky so we repeat until they unstick */
+-	for (i = 0; i < 0xffff; i++) {
+-		PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
+-		pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
+-
+-		if (pipe_clear == 0)
+-			break;
+-	}
+-
+-	if (pipe_clear)
+-		dev_err(dev->dev,
+-		"%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
+-		__func__, pipe, PSB_RVDC32(pipe_stat_reg));
+-
+-	if (pipe_stat_val & PIPE_VBLANK_STATUS)
+-		drm_handle_vblank(dev, pipe);
+-
+-	if (pipe_stat_val & PIPE_TE_STATUS)
+-		drm_handle_vblank(dev, pipe);
+-}
+-
+-/*
+- * Display controller interrupt handler.
+- */
+-static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
+-{
+-	if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
+-		mid_pipe_event_handler(dev, 0);
+-
+-	if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
+-		mid_pipe_event_handler(dev, 1);
+-}
+-
+-irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
+-{
+-	struct drm_device *dev = (struct drm_device *) arg;
+-	struct drm_psb_private *dev_priv =
+-	    (struct drm_psb_private *) dev->dev_private;
+-
+-	uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
+-	int handled = 0;
+-
+-	spin_lock(&dev_priv->irqmask_lock);
+-
+-	vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+-
+-	if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
+-		dsp_int = 1;
+-
+-	/* FIXME: Handle Medfield
+-	if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
+-		dsp_int = 1;
+-	*/
+-
+-	if (vdc_stat & _PSB_IRQ_SGX_FLAG)
+-		sgx_int = 1;
+-
+-	vdc_stat &= dev_priv->vdc_irq_mask;
+-	spin_unlock(&dev_priv->irqmask_lock);
+-
+-	if (dsp_int && gma_power_is_on(dev)) {
+-		psb_vdc_interrupt(dev, vdc_stat);
+-		handled = 1;
+-	}
+-
+-	if (sgx_int) {
+-		/* Not expected - we have it masked, shut it up */
+-		u32 s, s2;
+-		s = PSB_RSGX32(PSB_CR_EVENT_STATUS);
+-		s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
+-		PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
+-		PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
+-		/* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
+-		   we may as well poll even if we add that ! */
+-		handled = 1;
+-	}
+-
+-	PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
+-	(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
+-	DRM_READMEMORYBARRIER();
+-
+-	if (!handled)
+-		return IRQ_NONE;
+-
+-	return IRQ_HANDLED;
+-}
+-
+-void psb_irq_preinstall(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv =
+-	    (struct drm_psb_private *) dev->dev_private;
+-	unsigned long irqflags;
+-
+-	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+-
+-	if (gma_power_is_on(dev))
+-		PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+-	if (dev->vblank_enabled[0])
+-		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+-	if (dev->vblank_enabled[1])
+-		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+-
+-	/* FIXME: Handle Medfield irq mask
+-	if (dev->vblank_enabled[1])
+-		dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
+-	if (dev->vblank_enabled[2])
+-		dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
+-	*/
+-
+-	/* This register is safe even if display island is off */
+-	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+-}
+-
+-int psb_irq_postinstall(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv =
+-	    (struct drm_psb_private *) dev->dev_private;
+-	unsigned long irqflags;
+-
+-	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+-
+-	/* This register is safe even if display island is off */
+-	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+-	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+-
+-	if (dev->vblank_enabled[0])
+-		psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+-	else
+-		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	if (dev->vblank_enabled[1])
+-		psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+-	else
+-		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	if (dev->vblank_enabled[2])
+-		psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+-	else
+-		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+-	return 0;
+-}
+-
+-void psb_irq_uninstall(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv =
+-	    (struct drm_psb_private *) dev->dev_private;
+-	unsigned long irqflags;
+-
+-	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+-
+-	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+-
+-	if (dev->vblank_enabled[0])
+-		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	if (dev->vblank_enabled[1])
+-		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	if (dev->vblank_enabled[2])
+-		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+-				  _PSB_IRQ_MSVDX_FLAG |
+-				  _LNC_IRQ_TOPAZ_FLAG;
+-
+-	/* These two registers are safe even if display island is off */
+-	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+-	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+-
+-	wmb();
+-
+-	/* This register is safe even if display island is off */
+-	PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
+-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+-}
+-
+-void psb_irq_turn_on_dpst(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv =
+-		(struct drm_psb_private *) dev->dev_private;
+-	u32 hist_reg;
+-	u32 pwm_reg;
+-
+-	if (gma_power_begin(dev, false)) {
+-		PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
+-		hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
+-		PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
+-		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+-
+-		PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
+-		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+-		PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
+-						| PWM_PHASEIN_INT_ENABLE,
+-							   PWM_CONTROL_LOGIC);
+-		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+-
+-		psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+-
+-		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+-		PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
+-							HISTOGRAM_INT_CONTROL);
+-		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+-		PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
+-							PWM_CONTROL_LOGIC);
+-
+-		gma_power_end(dev);
+-	}
+-}
+-
+-int psb_irq_enable_dpst(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv =
+-		(struct drm_psb_private *) dev->dev_private;
+-	unsigned long irqflags;
+-
+-	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+-
+-	/* enable DPST */
+-	mid_enable_pipe_event(dev_priv, 0);
+-	psb_irq_turn_on_dpst(dev);
+-
+-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+-	return 0;
+-}
+-
+-void psb_irq_turn_off_dpst(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv =
+-	    (struct drm_psb_private *) dev->dev_private;
+-	u32 hist_reg;
+-	u32 pwm_reg;
+-
+-	if (gma_power_begin(dev, false)) {
+-		PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
+-		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
+-
+-		psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
+-
+-		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+-		PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
+-							PWM_CONTROL_LOGIC);
+-		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
+-
+-		gma_power_end(dev);
+-	}
+-}
+-
+-int psb_irq_disable_dpst(struct drm_device *dev)
+-{
+-	struct drm_psb_private *dev_priv =
+-	    (struct drm_psb_private *) dev->dev_private;
+-	unsigned long irqflags;
+-
+-	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+-
+-	mid_disable_pipe_event(dev_priv, 0);
+-	psb_irq_turn_off_dpst(dev);
+-
+-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+-
+-	return 0;
+-}
+-
+-#ifdef PSB_FIXME
+-static int psb_vblank_do_wait(struct drm_device *dev,
+-			      unsigned int *sequence, atomic_t *counter)
+-{
+-	unsigned int cur_vblank;
+-	int ret = 0;
+-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+-		    (((cur_vblank = atomic_read(counter))
+-		      - *sequence) <= (1 << 23)));
+-	*sequence = cur_vblank;
+-
+-	return ret;
+-}
+-#endif
+-
+-/*
+- * It is used to enable VBLANK interrupt
+- */
+-int psb_enable_vblank(struct drm_device *dev, int pipe)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned long irqflags;
+-	uint32_t reg_val = 0;
+-	uint32_t pipeconf_reg = mid_pipeconf(pipe);
+-
+-#if defined(CONFIG_DRM_PSB_MFLD)
+-	/* Medfield is different - we should perhaps extract out vblank
+-	   and blacklight etc ops */
+-	if (IS_MFLD(dev) && !mdfld_panel_dpi(dev))
+-		return mdfld_enable_te(dev, pipe);
+-#endif
+-	if (gma_power_begin(dev, false)) {
+-		reg_val = REG_READ(pipeconf_reg);
+-		gma_power_end(dev);
+-	}
+-
+-	if (!(reg_val & PIPEACONF_ENABLE))
+-		return -EINVAL;
+-
+-	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+-
+-	if (pipe == 0)
+-		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
+-	else if (pipe == 1)
+-		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+-
+-	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+-	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+-	psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+-
+-	return 0;
+-}
+-
+-/*
+- * It is used to disable VBLANK interrupt
+- */
+-void psb_disable_vblank(struct drm_device *dev, int pipe)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned long irqflags;
+-
+-#if defined(CONFIG_DRM_PSB_MFLD)
+-	if (IS_MFLD(dev) && !mdfld_panel_dpi(dev))
+-		mdfld_disable_te(dev, pipe);
+-#endif
+-	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+-
+-	if (pipe == 0)
+-		dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
+-	else if (pipe == 1)
+-		dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
+-
+-	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
+-	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
+-	psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
+-
+-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+-}
+-
+-/**
+- *	mdfld_enable_te		-	enable TE events
+- *	@dev: our DRM device
+- *	@pipe: which pipe to work on
+- *
+- *	Enable TE events on a Medfield display pipe. Medfield specific.
+- */
+-int mdfld_enable_te(struct drm_device *dev, int pipe)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned long flags;
+-	uint32_t reg_val = 0;
+-	uint32_t pipeconf_reg = mid_pipeconf(pipe);
+-
+-	if (gma_power_begin(dev, false)) {
+-		reg_val = REG_READ(pipeconf_reg);
+-		gma_power_end(dev);
+-	}
+-
+-	if (!(reg_val & PIPEACONF_ENABLE))
+-		return -EINVAL;
+-
+-	spin_lock_irqsave(&dev_priv->irqmask_lock, flags);
+-
+-	mid_enable_pipe_event(dev_priv, pipe);
+-	psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+-
+-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, flags);
+-
+-	return 0;
+-}
+-
+-/**
+- *	mdfld_disable_te		-	disable TE events
+- *	@dev: our DRM device
+- *	@pipe: which pipe to work on
+- *
+- *	Disable TE events on a Medfield display pipe. Medfield specific.
+- */
+-void mdfld_disable_te(struct drm_device *dev, int pipe)
+-{
+-	struct drm_psb_private *dev_priv = dev->dev_private;
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&dev_priv->irqmask_lock, flags);
+-
+-	mid_disable_pipe_event(dev_priv, pipe);
+-	psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
+-
+-	spin_unlock_irqrestore(&dev_priv->irqmask_lock, flags);
+-}
+-
+-/* Called from drm generic code, passed a 'crtc', which
+- * we use as a pipe index
+- */
+-u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
+-{
+-	uint32_t high_frame = PIPEAFRAMEHIGH;
+-	uint32_t low_frame = PIPEAFRAMEPIXEL;
+-	uint32_t pipeconf_reg = PIPEACONF;
+-	uint32_t reg_val = 0;
+-	uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
+-
+-	switch (pipe) {
+-	case 0:
+-		break;
+-	case 1:
+-		high_frame = PIPEBFRAMEHIGH;
+-		low_frame = PIPEBFRAMEPIXEL;
+-		pipeconf_reg = PIPEBCONF;
+-		break;
+-	case 2:
+-		high_frame = PIPECFRAMEHIGH;
+-		low_frame = PIPECFRAMEPIXEL;
+-		pipeconf_reg = PIPECCONF;
+-		break;
+-	default:
+-		dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
+-		return 0;
+-	}
+-
+-	if (!gma_power_begin(dev, false))
+-		return 0;
+-
+-	reg_val = REG_READ(pipeconf_reg);
+-
+-	if (!(reg_val & PIPEACONF_ENABLE)) {
+-		dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
+-								pipe);
+-		goto psb_get_vblank_counter_exit;
+-	}
+-
+-	/*
+-	 * High & low register fields aren't synchronized, so make sure
+-	 * we get a low value that's stable across two reads of the high
+-	 * register.
+-	 */
+-	do {
+-		high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+-			 PIPE_FRAME_HIGH_SHIFT);
+-		low =  ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+-			PIPE_FRAME_LOW_SHIFT);
+-		high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+-			 PIPE_FRAME_HIGH_SHIFT);
+-	} while (high1 != high2);
+-
+-	count = (high1 << 8) | low;
+-
+-psb_get_vblank_counter_exit:
+-
+-	gma_power_end(dev);
+-
+-	return count;
+-}
+-
+diff --git a/drivers/staging/gma500/psb_irq.h b/drivers/staging/gma500/psb_irq.h
+deleted file mode 100644
+index 216fda3..0000000
+--- a/drivers/staging/gma500/psb_irq.h
++++ /dev/null
+@@ -1,45 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2009-2011, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors:
+- *    Benjamin Defnet <benjamin.r.defnet at intel.com>
+- *    Rajesh Poornachandran <rajesh.poornachandran at intel.com>
+- *
+- **************************************************************************/
+-
+-#ifndef _SYSIRQ_H_
+-#define _SYSIRQ_H_
+-
+-#include <drm/drmP.h>
+-
+-bool sysirq_init(struct drm_device *dev);
+-void sysirq_uninit(struct drm_device *dev);
+-
+-void psb_irq_preinstall(struct drm_device *dev);
+-int  psb_irq_postinstall(struct drm_device *dev);
+-void psb_irq_uninstall(struct drm_device *dev);
+-irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
+-
+-int psb_irq_enable_dpst(struct drm_device *dev);
+-int psb_irq_disable_dpst(struct drm_device *dev);
+-void psb_irq_turn_on_dpst(struct drm_device *dev);
+-void psb_irq_turn_off_dpst(struct drm_device *dev);
+-int  psb_enable_vblank(struct drm_device *dev, int pipe);
+-void psb_disable_vblank(struct drm_device *dev, int pipe);
+-u32  psb_get_vblank_counter(struct drm_device *dev, int pipe);
+-
+-#endif /* _SYSIRQ_H_ */
+diff --git a/drivers/staging/gma500/psb_lid.c b/drivers/staging/gma500/psb_lid.c
+deleted file mode 100644
+index b867aabe..0000000
+--- a/drivers/staging/gma500/psb_lid.c
++++ /dev/null
+@@ -1,88 +0,0 @@
+-/**************************************************************************
+- * Copyright (c) 2007, Intel Corporation.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+- *
+- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+- **************************************************************************/
+-
+-#include <drm/drmP.h>
+-#include "psb_drv.h"
+-#include "psb_reg.h"
+-#include "psb_intel_reg.h"
+-#include <linux/spinlock.h>
+-
+-static void psb_lid_timer_func(unsigned long data)
+-{
+-	struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
+-	struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+-	struct timer_list *lid_timer = &dev_priv->lid_timer;
+-	unsigned long irq_flags;
+-	u32 *lid_state = dev_priv->lid_state;
+-	u32 pp_status;
+-
+-	if (readl(lid_state) == dev_priv->lid_last_state)
+-		goto lid_timer_schedule;
+-
+-	if ((readl(lid_state)) & 0x01) {
+-		/*lid state is open*/
+-		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
+-		do {
+-			pp_status = REG_READ(PP_STATUS);
+-		} while ((pp_status & PP_ON) == 0);
+-
+-		/*FIXME: should be backlight level before*/
+-		psb_intel_lvds_set_brightness(dev, 100);
+-	} else {
+-		psb_intel_lvds_set_brightness(dev, 0);
+-
+-		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
+-		do {
+-			pp_status = REG_READ(PP_STATUS);
+-		} while ((pp_status & PP_ON) == 0);
+-	}
+-	dev_priv->lid_last_state =  readl(lid_state);
+-
+-lid_timer_schedule:
+-	spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+-	if (!timer_pending(lid_timer)) {
+-		lid_timer->expires = jiffies + PSB_LID_DELAY;
+-		add_timer(lid_timer);
+-	}
+-	spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+-}
+-
+-void psb_lid_timer_init(struct drm_psb_private *dev_priv)
+-{
+-	struct timer_list *lid_timer = &dev_priv->lid_timer;
+-	unsigned long irq_flags;
+-
+-	spin_lock_init(&dev_priv->lid_lock);
+-	spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
+-
+-	init_timer(lid_timer);
+-
+-	lid_timer->data = (unsigned long)dev_priv;
+-	lid_timer->function = psb_lid_timer_func;
+-	lid_timer->expires = jiffies + PSB_LID_DELAY;
+-
+-	add_timer(lid_timer);
+-	spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
+-}
+-
+-void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
+-{
+-	del_timer_sync(&dev_priv->lid_timer);
+-}
+-
+diff --git a/drivers/staging/gma500/psb_reg.h b/drivers/staging/gma500/psb_reg.h
+deleted file mode 100644
+index b81c7c1..0000000
+--- a/drivers/staging/gma500/psb_reg.h
++++ /dev/null
+@@ -1,582 +0,0 @@
+-/**************************************************************************
+- *
+- * Copyright (c) (2005-2007) Imagination Technologies Limited.
+- * Copyright (c) 2007, Intel Corporation.
+- * All Rights Reserved.
+- *
+- * This program is free software; you can redistribute it and/or modify it
+- * under the terms and conditions of the GNU General Public License,
+- * version 2, as published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope it will be useful, but WITHOUT
+- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+- * more details.
+- *
+- * You should have received a copy of the GNU General Public License along with
+- * this program; if not, write to the Free Software Foundation, Inc.,
+- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
+- *
+- **************************************************************************/
+-
+-#ifndef _PSB_REG_H_
+-#define _PSB_REG_H_
+-
+-#define PSB_CR_CLKGATECTL		0x0000
+-#define _PSB_C_CLKGATECTL_AUTO_MAN_REG		(1 << 24)
+-#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT	(20)
+-#define _PSB_C_CLKGATECTL_USE_CLKG_MASK		(0x3 << 20)
+-#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT	(16)
+-#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK		(0x3 << 16)
+-#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT		(12)
+-#define _PSB_C_CLKGATECTL_TA_CLKG_MASK		(0x3 << 12)
+-#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT	(8)
+-#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK		(0x3 << 8)
+-#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT	(4)
+-#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK		(0x3 << 4)
+-#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT		(0)
+-#define _PSB_C_CLKGATECTL_2D_CLKG_MASK		(0x3 << 0)
+-#define _PSB_C_CLKGATECTL_CLKG_ENABLED		(0)
+-#define _PSB_C_CLKGATECTL_CLKG_DISABLED		(1)
+-#define _PSB_C_CLKGATECTL_CLKG_AUTO		(2)
+-
+-#define PSB_CR_CORE_ID			0x0010
+-#define _PSB_CC_ID_ID_SHIFT			(16)
+-#define _PSB_CC_ID_ID_MASK			(0xFFFF << 16)
+-#define _PSB_CC_ID_CONFIG_SHIFT			(0)
+-#define _PSB_CC_ID_CONFIG_MASK			(0xFFFF << 0)
+-
+-#define PSB_CR_CORE_REVISION		0x0014
+-#define _PSB_CC_REVISION_DESIGNER_SHIFT		(24)
+-#define _PSB_CC_REVISION_DESIGNER_MASK		(0xFF << 24)
+-#define _PSB_CC_REVISION_MAJOR_SHIFT		(16)
+-#define _PSB_CC_REVISION_MAJOR_MASK		(0xFF << 16)
+-#define _PSB_CC_REVISION_MINOR_SHIFT		(8)
+-#define _PSB_CC_REVISION_MINOR_MASK		(0xFF << 8)
+-#define _PSB_CC_REVISION_MAINTENANCE_SHIFT	(0)
+-#define _PSB_CC_REVISION_MAINTENANCE_MASK	(0xFF << 0)
+-
+-#define PSB_CR_DESIGNER_REV_FIELD1	0x0018
+-
+-#define PSB_CR_SOFT_RESET		0x0080
+-#define _PSB_CS_RESET_TSP_RESET		(1 << 6)
+-#define _PSB_CS_RESET_ISP_RESET		(1 << 5)
+-#define _PSB_CS_RESET_USE_RESET		(1 << 4)
+-#define _PSB_CS_RESET_TA_RESET		(1 << 3)
+-#define _PSB_CS_RESET_DPM_RESET		(1 << 2)
+-#define _PSB_CS_RESET_TWOD_RESET	(1 << 1)
+-#define _PSB_CS_RESET_BIF_RESET			(1 << 0)
+-
+-#define PSB_CR_DESIGNER_REV_FIELD2	0x001C
+-
+-#define PSB_CR_EVENT_HOST_ENABLE2	0x0110
+-
+-#define PSB_CR_EVENT_STATUS2		0x0118
+-
+-#define PSB_CR_EVENT_HOST_CLEAR2	0x0114
+-#define _PSB_CE2_BIF_REQUESTER_FAULT		(1 << 4)
+-
+-#define PSB_CR_EVENT_STATUS		0x012C
+-
+-#define PSB_CR_EVENT_HOST_ENABLE	0x0130
+-
+-#define PSB_CR_EVENT_HOST_CLEAR		0x0134
+-#define _PSB_CE_MASTER_INTERRUPT		(1 << 31)
+-#define _PSB_CE_TA_DPM_FAULT			(1 << 28)
+-#define _PSB_CE_TWOD_COMPLETE			(1 << 27)
+-#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS		(1 << 25)
+-#define _PSB_CE_DPM_TA_MEM_FREE			(1 << 24)
+-#define _PSB_CE_PIXELBE_END_RENDER		(1 << 18)
+-#define _PSB_CE_SW_EVENT			(1 << 14)
+-#define _PSB_CE_TA_FINISHED			(1 << 13)
+-#define _PSB_CE_TA_TERMINATE			(1 << 12)
+-#define _PSB_CE_DPM_REACHED_MEM_THRESH		(1 << 3)
+-#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL		(1 << 2)
+-#define _PSB_CE_DPM_OUT_OF_MEMORY_MT		(1 << 1)
+-#define _PSB_CE_DPM_3D_MEM_FREE			(1 << 0)
+-
+-
+-#define PSB_USE_OFFSET_MASK		0x0007FFFF
+-#define PSB_USE_OFFSET_SIZE		(PSB_USE_OFFSET_MASK + 1)
+-#define PSB_CR_USE_CODE_BASE0		0x0A0C
+-#define PSB_CR_USE_CODE_BASE1		0x0A10
+-#define PSB_CR_USE_CODE_BASE2		0x0A14
+-#define PSB_CR_USE_CODE_BASE3		0x0A18
+-#define PSB_CR_USE_CODE_BASE4		0x0A1C
+-#define PSB_CR_USE_CODE_BASE5		0x0A20
+-#define PSB_CR_USE_CODE_BASE6		0x0A24
+-#define PSB_CR_USE_CODE_BASE7		0x0A28
+-#define PSB_CR_USE_CODE_BASE8		0x0A2C
+-#define PSB_CR_USE_CODE_BASE9		0x0A30
+-#define PSB_CR_USE_CODE_BASE10		0x0A34
+-#define PSB_CR_USE_CODE_BASE11		0x0A38
+-#define PSB_CR_USE_CODE_BASE12		0x0A3C
+-#define PSB_CR_USE_CODE_BASE13		0x0A40
+-#define PSB_CR_USE_CODE_BASE14		0x0A44
+-#define PSB_CR_USE_CODE_BASE15		0x0A48
+-#define PSB_CR_USE_CODE_BASE(_i)	(0x0A0C + ((_i) << 2))
+-#define _PSB_CUC_BASE_DM_SHIFT			(25)
+-#define _PSB_CUC_BASE_DM_MASK			(0x3 << 25)
+-#define _PSB_CUC_BASE_ADDR_SHIFT		(0)	/* 1024-bit aligned address? */
+-#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT		(7)
+-#define _PSB_CUC_BASE_ADDR_MASK			(0x1FFFFFF << 0)
+-#define _PSB_CUC_DM_VERTEX			(0)
+-#define _PSB_CUC_DM_PIXEL			(1)
+-#define _PSB_CUC_DM_RESERVED			(2)
+-#define _PSB_CUC_DM_EDM				(3)
+-
+-#define PSB_CR_PDS_EXEC_BASE		0x0AB8
+-#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT	(20)	/* 1MB aligned address */
+-#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT	(20)
+-
+-#define PSB_CR_EVENT_KICKER		0x0AC4
+-#define _PSB_CE_KICKER_ADDRESS_SHIFT		(4)	/* 128-bit aligned address */
+-
+-#define PSB_CR_EVENT_KICK		0x0AC8
+-#define _PSB_CE_KICK_NOW			(1 << 0)
+-
+-#define PSB_CR_BIF_DIR_LIST_BASE1	0x0C38
+-
+-#define PSB_CR_BIF_CTRL			0x0C00
+-#define _PSB_CB_CTRL_CLEAR_FAULT		(1 << 4)
+-#define _PSB_CB_CTRL_INVALDC			(1 << 3)
+-#define _PSB_CB_CTRL_FLUSH			(1 << 2)
+-
+-#define PSB_CR_BIF_INT_STAT		0x0C04
+-
+-#define PSB_CR_BIF_FAULT		0x0C08
+-#define _PSB_CBI_STAT_PF_N_RW			(1 << 14)
+-#define _PSB_CBI_STAT_FAULT_SHIFT		(0)
+-#define _PSB_CBI_STAT_FAULT_MASK		(0x3FFF << 0)
+-#define _PSB_CBI_STAT_FAULT_CACHE		(1 << 1)
+-#define _PSB_CBI_STAT_FAULT_TA			(1 << 2)
+-#define _PSB_CBI_STAT_FAULT_VDM			(1 << 3)
+-#define _PSB_CBI_STAT_FAULT_2D			(1 << 4)
+-#define _PSB_CBI_STAT_FAULT_PBE			(1 << 5)
+-#define _PSB_CBI_STAT_FAULT_TSP			(1 << 6)
+-#define _PSB_CBI_STAT_FAULT_ISP			(1 << 7)
+-#define _PSB_CBI_STAT_FAULT_USSEPDS		(1 << 8)
+-#define _PSB_CBI_STAT_FAULT_HOST		(1 << 9)
+-
+-#define PSB_CR_BIF_BANK0		0x0C78
+-#define PSB_CR_BIF_BANK1		0x0C7C
+-#define PSB_CR_BIF_DIR_LIST_BASE0	0x0C84
+-#define PSB_CR_BIF_TWOD_REQ_BASE	0x0C88
+-#define PSB_CR_BIF_3D_REQ_BASE		0x0CAC
+-
+-#define PSB_CR_2D_SOCIF			0x0E18
+-#define _PSB_C2_SOCIF_FREESPACE_SHIFT		(0)
+-#define _PSB_C2_SOCIF_FREESPACE_MASK		(0xFF << 0)
+-#define _PSB_C2_SOCIF_EMPTY			(0x80 << 0)
+-
+-#define PSB_CR_2D_BLIT_STATUS		0x0E04
+-#define _PSB_C2B_STATUS_BUSY			(1 << 24)
+-#define _PSB_C2B_STATUS_COMPLETE_SHIFT		(0)
+-#define _PSB_C2B_STATUS_COMPLETE_MASK		(0xFFFFFF << 0)
+-
+-/*
+- * 2D defs.
+- */
+-
+-/*
+- * 2D Slave Port Data : Block Header's Object Type
+- */
+-
+-#define	PSB_2D_CLIP_BH			(0x00000000)
+-#define	PSB_2D_PAT_BH			(0x10000000)
+-#define	PSB_2D_CTRL_BH			(0x20000000)
+-#define	PSB_2D_SRC_OFF_BH		(0x30000000)
+-#define	PSB_2D_MASK_OFF_BH		(0x40000000)
+-#define	PSB_2D_RESERVED1_BH		(0x50000000)
+-#define	PSB_2D_RESERVED2_BH		(0x60000000)
+-#define	PSB_2D_FENCE_BH			(0x70000000)
+-#define	PSB_2D_BLIT_BH			(0x80000000)
+-#define	PSB_2D_SRC_SURF_BH		(0x90000000)
+-#define	PSB_2D_DST_SURF_BH		(0xA0000000)
+-#define	PSB_2D_PAT_SURF_BH		(0xB0000000)
+-#define	PSB_2D_SRC_PAL_BH		(0xC0000000)
+-#define	PSB_2D_PAT_PAL_BH		(0xD0000000)
+-#define	PSB_2D_MASK_SURF_BH		(0xE0000000)
+-#define	PSB_2D_FLUSH_BH			(0xF0000000)
+-
+-/*
+- * Clip Definition block (PSB_2D_CLIP_BH)
+- */
+-#define PSB_2D_CLIPCOUNT_MAX		(1)
+-#define PSB_2D_CLIPCOUNT_MASK		(0x00000000)
+-#define PSB_2D_CLIPCOUNT_CLRMASK	(0xFFFFFFFF)
+-#define PSB_2D_CLIPCOUNT_SHIFT		(0)
+-/* clip rectangle min & max */
+-#define PSB_2D_CLIP_XMAX_MASK		(0x00FFF000)
+-#define PSB_2D_CLIP_XMAX_CLRMASK	(0xFF000FFF)
+-#define PSB_2D_CLIP_XMAX_SHIFT		(12)
+-#define PSB_2D_CLIP_XMIN_MASK		(0x00000FFF)
+-#define PSB_2D_CLIP_XMIN_CLRMASK	(0x00FFF000)
+-#define PSB_2D_CLIP_XMIN_SHIFT		(0)
+-/* clip rectangle offset */
+-#define PSB_2D_CLIP_YMAX_MASK		(0x00FFF000)
+-#define PSB_2D_CLIP_YMAX_CLRMASK	(0xFF000FFF)
+-#define PSB_2D_CLIP_YMAX_SHIFT		(12)
+-#define PSB_2D_CLIP_YMIN_MASK		(0x00000FFF)
+-#define PSB_2D_CLIP_YMIN_CLRMASK	(0x00FFF000)
+-#define PSB_2D_CLIP_YMIN_SHIFT		(0)
+-
+-/*
+- * Pattern Control (PSB_2D_PAT_BH)
+- */
+-#define PSB_2D_PAT_HEIGHT_MASK		(0x0000001F)
+-#define PSB_2D_PAT_HEIGHT_SHIFT		(0)
+-#define PSB_2D_PAT_WIDTH_MASK		(0x000003E0)
+-#define PSB_2D_PAT_WIDTH_SHIFT		(5)
+-#define PSB_2D_PAT_YSTART_MASK		(0x00007C00)
+-#define PSB_2D_PAT_YSTART_SHIFT		(10)
+-#define PSB_2D_PAT_XSTART_MASK		(0x000F8000)
+-#define PSB_2D_PAT_XSTART_SHIFT		(15)
+-
+-/*
+- * 2D Control block (PSB_2D_CTRL_BH)
+- */
+-/* Present Flags */
+-#define PSB_2D_SRCCK_CTRL		(0x00000001)
+-#define PSB_2D_DSTCK_CTRL		(0x00000002)
+-#define PSB_2D_ALPHA_CTRL		(0x00000004)
+-/* Colour Key Colour (SRC/DST)*/
+-#define PSB_2D_CK_COL_MASK		(0xFFFFFFFF)
+-#define PSB_2D_CK_COL_CLRMASK		(0x00000000)
+-#define PSB_2D_CK_COL_SHIFT		(0)
+-/* Colour Key Mask (SRC/DST)*/
+-#define PSB_2D_CK_MASK_MASK		(0xFFFFFFFF)
+-#define PSB_2D_CK_MASK_CLRMASK		(0x00000000)
+-#define PSB_2D_CK_MASK_SHIFT		(0)
+-/* Alpha Control (Alpha/RGB)*/
+-#define PSB_2D_GBLALPHA_MASK		(0x000FF000)
+-#define PSB_2D_GBLALPHA_CLRMASK		(0xFFF00FFF)
+-#define PSB_2D_GBLALPHA_SHIFT		(12)
+-#define PSB_2D_SRCALPHA_OP_MASK		(0x00700000)
+-#define PSB_2D_SRCALPHA_OP_CLRMASK	(0xFF8FFFFF)
+-#define PSB_2D_SRCALPHA_OP_SHIFT	(20)
+-#define PSB_2D_SRCALPHA_OP_ONE		(0x00000000)
+-#define PSB_2D_SRCALPHA_OP_SRC		(0x00100000)
+-#define PSB_2D_SRCALPHA_OP_DST		(0x00200000)
+-#define PSB_2D_SRCALPHA_OP_SG		(0x00300000)
+-#define PSB_2D_SRCALPHA_OP_DG		(0x00400000)
+-#define PSB_2D_SRCALPHA_OP_GBL		(0x00500000)
+-#define PSB_2D_SRCALPHA_OP_ZERO		(0x00600000)
+-#define PSB_2D_SRCALPHA_INVERT		(0x00800000)
+-#define PSB_2D_SRCALPHA_INVERT_CLR	(0xFF7FFFFF)
+-#define PSB_2D_DSTALPHA_OP_MASK		(0x07000000)
+-#define PSB_2D_DSTALPHA_OP_CLRMASK	(0xF8FFFFFF)
+-#define PSB_2D_DSTALPHA_OP_SHIFT	(24)
+-#define PSB_2D_DSTALPHA_OP_ONE		(0x00000000)
+-#define PSB_2D_DSTALPHA_OP_SRC		(0x01000000)
+-#define PSB_2D_DSTALPHA_OP_DST		(0x02000000)
+-#define PSB_2D_DSTALPHA_OP_SG		(0x03000000)
+-#define PSB_2D_DSTALPHA_OP_DG		(0x04000000)
+-#define PSB_2D_DSTALPHA_OP_GBL		(0x05000000)
+-#define PSB_2D_DSTALPHA_OP_ZERO		(0x06000000)
+-#define PSB_2D_DSTALPHA_INVERT		(0x08000000)
+-#define PSB_2D_DSTALPHA_INVERT_CLR	(0xF7FFFFFF)
+-
+-#define PSB_2D_PRE_MULTIPLICATION_ENABLE	(0x10000000)
+-#define PSB_2D_PRE_MULTIPLICATION_CLRMASK	(0xEFFFFFFF)
+-#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE		(0x20000000)
+-#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK	(0xDFFFFFFF)
+-
+-/*
+- *Source Offset (PSB_2D_SRC_OFF_BH)
+- */
+-#define PSB_2D_SRCOFF_XSTART_MASK	((0x00000FFF) << 12)
+-#define PSB_2D_SRCOFF_XSTART_SHIFT	(12)
+-#define PSB_2D_SRCOFF_YSTART_MASK	(0x00000FFF)
+-#define PSB_2D_SRCOFF_YSTART_SHIFT	(0)
+-
+-/*
+- * Mask Offset (PSB_2D_MASK_OFF_BH)
+- */
+-#define PSB_2D_MASKOFF_XSTART_MASK	((0x00000FFF) << 12)
+-#define PSB_2D_MASKOFF_XSTART_SHIFT	(12)
+-#define PSB_2D_MASKOFF_YSTART_MASK	(0x00000FFF)
+-#define PSB_2D_MASKOFF_YSTART_SHIFT	(0)
+-
+-/*
+- * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
+- */
+-
+-/*
+- *Blit Rectangle (PSB_2D_BLIT_BH)
+- */
+-
+-#define PSB_2D_ROT_MASK			(3 << 25)
+-#define PSB_2D_ROT_CLRMASK		(~PSB_2D_ROT_MASK)
+-#define PSB_2D_ROT_NONE			(0 << 25)
+-#define PSB_2D_ROT_90DEGS		(1 << 25)
+-#define PSB_2D_ROT_180DEGS		(2 << 25)
+-#define PSB_2D_ROT_270DEGS		(3 << 25)
+-
+-#define PSB_2D_COPYORDER_MASK		(3 << 23)
+-#define PSB_2D_COPYORDER_CLRMASK	(~PSB_2D_COPYORDER_MASK)
+-#define PSB_2D_COPYORDER_TL2BR		(0 << 23)
+-#define PSB_2D_COPYORDER_BR2TL		(1 << 23)
+-#define PSB_2D_COPYORDER_TR2BL		(2 << 23)
+-#define PSB_2D_COPYORDER_BL2TR		(3 << 23)
+-
+-#define PSB_2D_DSTCK_CLRMASK		(0xFF9FFFFF)
+-#define PSB_2D_DSTCK_DISABLE		(0x00000000)
+-#define PSB_2D_DSTCK_PASS		(0x00200000)
+-#define PSB_2D_DSTCK_REJECT		(0x00400000)
+-
+-#define PSB_2D_SRCCK_CLRMASK		(0xFFE7FFFF)
+-#define PSB_2D_SRCCK_DISABLE		(0x00000000)
+-#define PSB_2D_SRCCK_PASS		(0x00080000)
+-#define PSB_2D_SRCCK_REJECT		(0x00100000)
+-
+-#define PSB_2D_CLIP_ENABLE		(0x00040000)
+-
+-#define PSB_2D_ALPHA_ENABLE		(0x00020000)
+-
+-#define PSB_2D_PAT_CLRMASK		(0xFFFEFFFF)
+-#define PSB_2D_PAT_MASK			(0x00010000)
+-#define PSB_2D_USE_PAT			(0x00010000)
+-#define PSB_2D_USE_FILL			(0x00000000)
+-/*
+- * Tungsten Graphics note on rop codes: If rop A and rop B are
+- * identical, the mask surface will not be read and need not be
+- * set up.
+- */
+-
+-#define PSB_2D_ROP3B_MASK		(0x0000FF00)
+-#define PSB_2D_ROP3B_CLRMASK		(0xFFFF00FF)
+-#define PSB_2D_ROP3B_SHIFT		(8)
+-/* rop code A */
+-#define PSB_2D_ROP3A_MASK		(0x000000FF)
+-#define PSB_2D_ROP3A_CLRMASK		(0xFFFFFF00)
+-#define PSB_2D_ROP3A_SHIFT		(0)
+-
+-#define PSB_2D_ROP4_MASK		(0x0000FFFF)
+-/*
+- *	DWORD0:	(Only pass if Pattern control == Use Fill Colour)
+- *	Fill Colour RGBA8888
+- */
+-#define PSB_2D_FILLCOLOUR_MASK		(0xFFFFFFFF)
+-#define PSB_2D_FILLCOLOUR_SHIFT		(0)
+-/*
+- *	DWORD1: (Always Present)
+- *	X Start (Dest)
+- *	Y Start (Dest)
+- */
+-#define PSB_2D_DST_XSTART_MASK		(0x00FFF000)
+-#define PSB_2D_DST_XSTART_CLRMASK	(0xFF000FFF)
+-#define PSB_2D_DST_XSTART_SHIFT		(12)
+-#define PSB_2D_DST_YSTART_MASK		(0x00000FFF)
+-#define PSB_2D_DST_YSTART_CLRMASK	(0xFFFFF000)
+-#define PSB_2D_DST_YSTART_SHIFT		(0)
+-/*
+- *	DWORD2: (Always Present)
+- *	X Size (Dest)
+- *	Y Size (Dest)
+- */
+-#define PSB_2D_DST_XSIZE_MASK		(0x00FFF000)
+-#define PSB_2D_DST_XSIZE_CLRMASK	(0xFF000FFF)
+-#define PSB_2D_DST_XSIZE_SHIFT		(12)
+-#define PSB_2D_DST_YSIZE_MASK		(0x00000FFF)
+-#define PSB_2D_DST_YSIZE_CLRMASK	(0xFFFFF000)
+-#define PSB_2D_DST_YSIZE_SHIFT		(0)
+-
+-/*
+- * Source Surface (PSB_2D_SRC_SURF_BH)
+- */
+-/*
+- * WORD 0
+- */
+-
+-#define PSB_2D_SRC_FORMAT_MASK		(0x00078000)
+-#define PSB_2D_SRC_1_PAL		(0x00000000)
+-#define PSB_2D_SRC_2_PAL		(0x00008000)
+-#define PSB_2D_SRC_4_PAL		(0x00010000)
+-#define PSB_2D_SRC_8_PAL		(0x00018000)
+-#define PSB_2D_SRC_8_ALPHA		(0x00020000)
+-#define PSB_2D_SRC_4_ALPHA		(0x00028000)
+-#define PSB_2D_SRC_332RGB		(0x00030000)
+-#define PSB_2D_SRC_4444ARGB		(0x00038000)
+-#define PSB_2D_SRC_555RGB		(0x00040000)
+-#define PSB_2D_SRC_1555ARGB		(0x00048000)
+-#define PSB_2D_SRC_565RGB		(0x00050000)
+-#define PSB_2D_SRC_0888ARGB		(0x00058000)
+-#define PSB_2D_SRC_8888ARGB		(0x00060000)
+-#define PSB_2D_SRC_8888UYVY		(0x00068000)
+-#define PSB_2D_SRC_RESERVED		(0x00070000)
+-#define PSB_2D_SRC_1555ARGB_LOOKUP	(0x00078000)
+-
+-
+-#define PSB_2D_SRC_STRIDE_MASK		(0x00007FFF)
+-#define PSB_2D_SRC_STRIDE_CLRMASK	(0xFFFF8000)
+-#define PSB_2D_SRC_STRIDE_SHIFT		(0)
+-/*
+- *  WORD 1 - Base Address
+- */
+-#define PSB_2D_SRC_ADDR_MASK		(0x0FFFFFFC)
+-#define PSB_2D_SRC_ADDR_CLRMASK		(0x00000003)
+-#define PSB_2D_SRC_ADDR_SHIFT		(2)
+-#define PSB_2D_SRC_ADDR_ALIGNSHIFT	(2)
+-
+-/*
+- * Pattern Surface (PSB_2D_PAT_SURF_BH)
+- */
+-/*
+- *  WORD 0
+- */
+-
+-#define PSB_2D_PAT_FORMAT_MASK		(0x00078000)
+-#define PSB_2D_PAT_1_PAL		(0x00000000)
+-#define PSB_2D_PAT_2_PAL		(0x00008000)
+-#define PSB_2D_PAT_4_PAL		(0x00010000)
+-#define PSB_2D_PAT_8_PAL		(0x00018000)
+-#define PSB_2D_PAT_8_ALPHA		(0x00020000)
+-#define PSB_2D_PAT_4_ALPHA		(0x00028000)
+-#define PSB_2D_PAT_332RGB		(0x00030000)
+-#define PSB_2D_PAT_4444ARGB		(0x00038000)
+-#define PSB_2D_PAT_555RGB		(0x00040000)
+-#define PSB_2D_PAT_1555ARGB		(0x00048000)
+-#define PSB_2D_PAT_565RGB		(0x00050000)
+-#define PSB_2D_PAT_0888ARGB		(0x00058000)
+-#define PSB_2D_PAT_8888ARGB		(0x00060000)
+-
+-#define PSB_2D_PAT_STRIDE_MASK		(0x00007FFF)
+-#define PSB_2D_PAT_STRIDE_CLRMASK	(0xFFFF8000)
+-#define PSB_2D_PAT_STRIDE_SHIFT		(0)
+-/*
+- *  WORD 1 - Base Address
+- */
+-#define PSB_2D_PAT_ADDR_MASK		(0x0FFFFFFC)
+-#define PSB_2D_PAT_ADDR_CLRMASK		(0x00000003)
+-#define PSB_2D_PAT_ADDR_SHIFT		(2)
+-#define PSB_2D_PAT_ADDR_ALIGNSHIFT	(2)
+-
+-/*
+- * Destination Surface (PSB_2D_DST_SURF_BH)
+- */
+-/*
+- * WORD 0
+- */
+-
+-#define PSB_2D_DST_FORMAT_MASK		(0x00078000)
+-#define PSB_2D_DST_332RGB		(0x00030000)
+-#define PSB_2D_DST_4444ARGB		(0x00038000)
+-#define PSB_2D_DST_555RGB		(0x00040000)
+-#define PSB_2D_DST_1555ARGB		(0x00048000)
+-#define PSB_2D_DST_565RGB		(0x00050000)
+-#define PSB_2D_DST_0888ARGB		(0x00058000)
+-#define PSB_2D_DST_8888ARGB		(0x00060000)
+-#define PSB_2D_DST_8888AYUV		(0x00070000)
+-
+-#define PSB_2D_DST_STRIDE_MASK		(0x00007FFF)
+-#define PSB_2D_DST_STRIDE_CLRMASK	(0xFFFF8000)
+-#define PSB_2D_DST_STRIDE_SHIFT		(0)
+-/*
+- * WORD 1 - Base Address
+- */
+-#define PSB_2D_DST_ADDR_MASK		(0x0FFFFFFC)
+-#define PSB_2D_DST_ADDR_CLRMASK		(0x00000003)
+-#define PSB_2D_DST_ADDR_SHIFT		(2)
+-#define PSB_2D_DST_ADDR_ALIGNSHIFT	(2)
+-
+-/*
+- * Mask Surface (PSB_2D_MASK_SURF_BH)
+- */
+-/*
+- * WORD 0
+- */
+-#define PSB_2D_MASK_STRIDE_MASK		(0x00007FFF)
+-#define PSB_2D_MASK_STRIDE_CLRMASK	(0xFFFF8000)
+-#define PSB_2D_MASK_STRIDE_SHIFT	(0)
+-/*
+- *  WORD 1 - Base Address
+- */
+-#define PSB_2D_MASK_ADDR_MASK		(0x0FFFFFFC)
+-#define PSB_2D_MASK_ADDR_CLRMASK	(0x00000003)
+-#define PSB_2D_MASK_ADDR_SHIFT		(2)
+-#define PSB_2D_MASK_ADDR_ALIGNSHIFT	(2)
+-
+-/*
+- * Source Palette (PSB_2D_SRC_PAL_BH)
+- */
+-
+-#define PSB_2D_SRCPAL_ADDR_SHIFT	(0)
+-#define PSB_2D_SRCPAL_ADDR_CLRMASK	(0xF0000007)
+-#define PSB_2D_SRCPAL_ADDR_MASK		(0x0FFFFFF8)
+-#define PSB_2D_SRCPAL_BYTEALIGN		(1024)
+-
+-/*
+- * Pattern Palette (PSB_2D_PAT_PAL_BH)
+- */
+-
+-#define PSB_2D_PATPAL_ADDR_SHIFT	(0)
+-#define PSB_2D_PATPAL_ADDR_CLRMASK	(0xF0000007)
+-#define PSB_2D_PATPAL_ADDR_MASK		(0x0FFFFFF8)
+-#define PSB_2D_PATPAL_BYTEALIGN		(1024)
+-
+-/*
+- * Rop3 Codes (2 LS bytes)
+- */
+-
+-#define PSB_2D_ROP3_SRCCOPY		(0xCCCC)
+-#define PSB_2D_ROP3_PATCOPY		(0xF0F0)
+-#define PSB_2D_ROP3_WHITENESS		(0xFFFF)
+-#define PSB_2D_ROP3_BLACKNESS		(0x0000)
+-#define PSB_2D_ROP3_SRC			(0xCC)
+-#define PSB_2D_ROP3_PAT			(0xF0)
+-#define PSB_2D_ROP3_DST			(0xAA)
+-
+-/*
+- * Sizes.
+- */
+-
+-#define PSB_SCENE_HW_COOKIE_SIZE	16
+-#define PSB_TA_MEM_HW_COOKIE_SIZE	16
+-
+-/*
+- * Scene stuff.
+- */
+-
+-#define PSB_NUM_HW_SCENES		2
+-
+-/*
+- * Scheduler completion actions.
+- */
+-
+-#define PSB_RASTER_BLOCK		0
+-#define PSB_RASTER			1
+-#define PSB_RETURN			2
+-#define PSB_TA				3
+-
+-/* Power management */
+-#define PSB_PUNIT_PORT			0x04
+-#define PSB_OSPMBA			0x78
+-#define PSB_APMBA			0x7a
+-#define PSB_APM_CMD			0x0
+-#define PSB_APM_STS			0x04
+-#define PSB_PWRGT_VID_ENC_MASK		0x30
+-#define PSB_PWRGT_VID_DEC_MASK		0xc
+-#define PSB_PWRGT_GL3_MASK		0xc0
+-
+-#define PSB_PM_SSC			0x20
+-#define PSB_PM_SSS			0x30
+-#define PSB_PWRGT_DISPLAY_MASK		0xc /*on a different BA than video/gfx*/
+-#define MDFLD_PWRGT_DISPLAY_A_CNTR	0x0000000c
+-#define MDFLD_PWRGT_DISPLAY_B_CNTR	0x0000c000
+-#define MDFLD_PWRGT_DISPLAY_C_CNTR	0x00030000
+-#define MDFLD_PWRGT_DISP_MIPI_CNTR	0x000c0000
+-#define MDFLD_PWRGT_DISPLAY_CNTR    (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
+-/* Display SSS register bits are different in A0 vs. B0 */
+-#define PSB_PWRGT_GFX_MASK		0x3
+-#define MDFLD_PWRGT_DISPLAY_A_STS	0x000000c0
+-#define MDFLD_PWRGT_DISPLAY_B_STS	0x00000300
+-#define MDFLD_PWRGT_DISPLAY_C_STS	0x00000c00
+-#define PSB_PWRGT_GFX_MASK_B0		0xc3
+-#define MDFLD_PWRGT_DISPLAY_A_STS_B0	0x0000000c
+-#define MDFLD_PWRGT_DISPLAY_B_STS_B0	0x0000c000
+-#define MDFLD_PWRGT_DISPLAY_C_STS_B0	0x00030000
+-#define MDFLD_PWRGT_DISP_MIPI_STS	0x000c0000
+-#define MDFLD_PWRGT_DISPLAY_STS_A0    (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+-#define MDFLD_PWRGT_DISPLAY_STS_B0    (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
+-#endif
+-- 
+1.7.10.4
+

Added: dists/sid/linux/debian/patches/features/all/drm/Revert-VM-add-vm_mmap-helper-function.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/Revert-VM-add-vm_mmap-helper-function.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,100 @@
+From 97d4bb2582eba9bbb9d0a283f5be6fa358421914 Mon Sep 17 00:00:00 2001
+From: Julien Cristau <jcristau at debian.org>
+Date: Wed, 22 Aug 2012 18:56:22 +0200
+Subject: [PATCH 2/7] Revert "VM: add "vm_mmap()" helper function"
+
+This reverts commit 6be5ceb02e98eaf6cfc4f8b12a896d04023f340d.
+
+Conflicts:
+	include/linux/mm.h
+---
+ drivers/gpu/drm/drm_bufs.c              |   12 ++++++++----
+ drivers/gpu/drm/exynos/exynos_drm_gem.c |    4 +++-
+ drivers/gpu/drm/i810/i810_dma.c         |    1 -
+ drivers/gpu/drm/i915/i915_gem.c         |    4 +++-
+ 4 files changed, 14 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
+index 348b367..30372f7 100644
+--- a/drivers/gpu/drm/drm_bufs.c
++++ b/drivers/gpu/drm/drm_bufs.c
+@@ -1510,8 +1510,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
+  * \param arg pointer to a drm_buf_map structure.
+  * \return zero on success or a negative number on failure.
+  *
+- * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
+- * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
++ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
++ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+  * drm_mmap_dma().
+  */
+@@ -1553,14 +1553,18 @@ int drm_mapbufs(struct drm_device *dev, void *data,
+ 				retcode = -EINVAL;
+ 				goto done;
+ 			}
+-			virtual = vm_mmap(file_priv->filp, 0, map->size,
++			down_write(&current->mm->mmap_sem);
++			virtual = do_mmap(file_priv->filp, 0, map->size,
+ 					  PROT_READ | PROT_WRITE,
+ 					  MAP_SHARED,
+ 					  token);
++			up_write(&current->mm->mmap_sem);
+ 		} else {
+-			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
++			down_write(&current->mm->mmap_sem);
++			virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
+ 					  PROT_READ | PROT_WRITE,
+ 					  MAP_SHARED, 0);
++			up_write(&current->mm->mmap_sem);
+ 		}
+ 		if (virtual > -1024UL) {
+ 			/* Real error */
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
+index 1dffa83..01139c8 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
+@@ -573,8 +573,10 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ 	obj->filp->f_op = &exynos_drm_gem_fops;
+ 	obj->filp->private_data = obj;
+ 
+-	addr = vm_mmap(obj->filp, 0, args->size,
++	down_write(&current->mm->mmap_sem);
++	addr = do_mmap(obj->filp, 0, args->size,
+ 			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
++	up_write(&current->mm->mmap_sem);
+ 
+ 	drm_gem_object_unreference_unlocked(obj);
+ 
+diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
+index f920fb5..877a498 100644
+--- a/drivers/gpu/drm/i810/i810_dma.c
++++ b/drivers/gpu/drm/i810/i810_dma.c
+@@ -129,7 +129,6 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
+ 	if (buf_priv->currently_mapped == I810_BUF_MAPPED)
+ 		return -EINVAL;
+ 
+-	/* This is all entirely broken */
+ 	down_write(&current->mm->mmap_sem);
+ 	old_fops = file_priv->filp->f_op;
+ 	file_priv->filp->f_op = &i810_buffer_fops;
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 0d1e4b7..0e3c6ac 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1087,9 +1087,11 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ 	if (obj == NULL)
+ 		return -ENOENT;
+ 
+-	addr = vm_mmap(obj->filp, 0, args->size,
++	down_write(&current->mm->mmap_sem);
++	addr = do_mmap(obj->filp, 0, args->size,
+ 		       PROT_READ | PROT_WRITE, MAP_SHARED,
+ 		       args->offset);
++	up_write(&current->mm->mmap_sem);
+ 	drm_gem_object_unreference_unlocked(obj);
+ 	if (IS_ERR((void *)addr))
+ 		return addr;
+-- 
+1.7.10.4
+

Added: dists/sid/linux/debian/patches/features/all/drm/Revert-drm-base-prime-dma-buf-support-v5.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/Revert-drm-base-prime-dma-buf-support-v5.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,591 @@
+From f19ae4812408f2c8b3f79396e28a6a994cf713ef Mon Sep 17 00:00:00 2001
+From: Julien Cristau <jcristau at debian.org>
+Date: Wed, 22 Aug 2012 19:17:05 +0200
+Subject: [PATCH 7/7] Revert "drm: base prime/dma-buf support (v5)"
+
+This reverts commit 3248877ea1796915419fba7c89315fdbf00cb56a.
+---
+ drivers/gpu/drm/Kconfig     |    1 -
+ drivers/gpu/drm/Makefile    |    2 +-
+ drivers/gpu/drm/drm_drv.c   |    4 -
+ drivers/gpu/drm/drm_fops.c  |    7 -
+ drivers/gpu/drm/drm_gem.c   |    9 --
+ drivers/gpu/drm/drm_prime.c |  304 -------------------------------------------
+ include/drm/drm.h           |   14 +-
+ include/drm/drmP.h          |   62 ---------
+ 8 files changed, 2 insertions(+), 401 deletions(-)
+ delete mode 100644 drivers/gpu/drm/drm_prime.c
+
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index e354bc0..cc11488 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -9,7 +9,6 @@ menuconfig DRM
+ 	depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+ 	select I2C
+ 	select I2C_ALGOBIT
+-	select DMA_SHARED_BUFFER
+ 	help
+ 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
+ 	  introduced in XFree86 4.0. If you say Y here, you need to select
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index c20da5b..a858532 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -12,7 +12,7 @@ drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
+ 		drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
+ 		drm_crtc.o drm_modes.o drm_edid.o \
+ 		drm_info.o drm_debugfs.o drm_encoder_slave.o \
+-		drm_trace_points.o drm_global.o drm_prime.o
++		drm_trace_points.o drm_global.o
+ 
+ drm-$(CONFIG_COMPAT) += drm_ioc32.o
+ 
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 6116e3b..0b65fbc 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -136,10 +136,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
+ 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ 
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+-
+-	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
+-	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+-
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 123de28..0db8d56 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -271,9 +271,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
+ 	if (dev->driver->driver_features & DRIVER_GEM)
+ 		drm_gem_open(dev, priv);
+ 
+-	if (drm_core_check_feature(dev, DRIVER_PRIME))
+-		drm_prime_init_file_private(&priv->prime);
+-
+ 	if (dev->driver->open) {
+ 		ret = dev->driver->open(dev, priv);
+ 		if (ret < 0)
+@@ -574,10 +571,6 @@ int drm_release(struct inode *inode, struct file *filp)
+ 
+ 	if (dev->driver->postclose)
+ 		dev->driver->postclose(dev, file_priv);
+-
+-	if (drm_core_check_feature(dev, DRIVER_PRIME))
+-		drm_prime_destroy_file_private(&file_priv->prime);
+-
+ 	kfree(file_priv);
+ 
+ 	/* ========================================================
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 83114b5..0ef358e 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -35,7 +35,6 @@
+ #include <linux/mman.h>
+ #include <linux/pagemap.h>
+ #include <linux/shmem_fs.h>
+-#include <linux/dma-buf.h>
+ #include "drmP.h"
+ 
+ /** @file drm_gem.c
+@@ -233,10 +232,6 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+ 	idr_remove(&filp->object_idr, handle);
+ 	spin_unlock(&filp->table_lock);
+ 
+-	if (obj->import_attach)
+-		drm_prime_remove_imported_buf_handle(&filp->prime,
+-				obj->import_attach->dmabuf);
+-
+ 	if (dev->driver->gem_close_object)
+ 		dev->driver->gem_close_object(obj, filp);
+ 	drm_gem_object_handle_unreference_unlocked(obj);
+@@ -532,10 +527,6 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
+ 	struct drm_gem_object *obj = ptr;
+ 	struct drm_device *dev = obj->dev;
+ 
+-	if (obj->import_attach)
+-		drm_prime_remove_imported_buf_handle(&file_priv->prime,
+-				obj->import_attach->dmabuf);
+-
+ 	if (dev->driver->gem_close_object)
+ 		dev->driver->gem_close_object(obj, file_priv);
+ 
+diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
+deleted file mode 100644
+index 1bdf2b5..0000000
+--- a/drivers/gpu/drm/drm_prime.c
++++ /dev/null
+@@ -1,304 +0,0 @@
+-/*
+- * Copyright © 2012 Red Hat
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the next
+- * paragraph) shall be included in all copies or substantial portions of the
+- * Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+- * IN THE SOFTWARE.
+- *
+- * Authors:
+- *      Dave Airlie <airlied at redhat.com>
+- *      Rob Clark <rob.clark at linaro.org>
+- *
+- */
+-
+-#include <linux/export.h>
+-#include <linux/dma-buf.h>
+-#include "drmP.h"
+-
+-/*
+- * DMA-BUF/GEM Object references and lifetime overview:
+- *
+- * On the export the dma_buf holds a reference to the exporting GEM
+- * object. It takes this reference in handle_to_fd_ioctl, when it
+- * first calls .prime_export and stores the exporting GEM object in
+- * the dma_buf priv. This reference is released when the dma_buf
+- * object goes away in the driver .release function.
+- *
+- * On the import the importing GEM object holds a reference to the
+- * dma_buf (which in turn holds a ref to the exporting GEM object).
+- * It takes that reference in the fd_to_handle ioctl.
+- * It calls dma_buf_get, creates an attachment to it and stores the
+- * attachment in the GEM object. When this attachment is destroyed
+- * when the imported object is destroyed, we remove the attachment
+- * and drop the reference to the dma_buf.
+- *
+- * Thus the chain of references always flows in one direction
+- * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
+- *
+- * Self-importing: if userspace is using PRIME as a replacement for flink
+- * then it will get a fd->handle request for a GEM object that it created.
+- * Drivers should detect this situation and return back the gem object
+- * from the dma-buf private.
+- */
+-
+-struct drm_prime_member {
+-	struct list_head entry;
+-	struct dma_buf *dma_buf;
+-	uint32_t handle;
+-};
+-
+-int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+-		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+-		int *prime_fd)
+-{
+-	struct drm_gem_object *obj;
+-	void *buf;
+-
+-	obj = drm_gem_object_lookup(dev, file_priv, handle);
+-	if (!obj)
+-		return -ENOENT;
+-
+-	mutex_lock(&file_priv->prime.lock);
+-	/* re-export the original imported object */
+-	if (obj->import_attach) {
+-		get_dma_buf(obj->import_attach->dmabuf);
+-		*prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
+-		drm_gem_object_unreference_unlocked(obj);
+-		mutex_unlock(&file_priv->prime.lock);
+-		return 0;
+-	}
+-
+-	if (obj->export_dma_buf) {
+-		get_dma_buf(obj->export_dma_buf);
+-		*prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
+-		drm_gem_object_unreference_unlocked(obj);
+-	} else {
+-		buf = dev->driver->gem_prime_export(dev, obj, flags);
+-		if (IS_ERR(buf)) {
+-			/* normally the created dma-buf takes ownership of the ref,
+-			 * but if that fails then drop the ref
+-			 */
+-			drm_gem_object_unreference_unlocked(obj);
+-			mutex_unlock(&file_priv->prime.lock);
+-			return PTR_ERR(buf);
+-		}
+-		obj->export_dma_buf = buf;
+-		*prime_fd = dma_buf_fd(buf, flags);
+-	}
+-	mutex_unlock(&file_priv->prime.lock);
+-	return 0;
+-}
+-EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+-
+-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+-		struct drm_file *file_priv, int prime_fd, uint32_t *handle)
+-{
+-	struct dma_buf *dma_buf;
+-	struct drm_gem_object *obj;
+-	int ret;
+-
+-	dma_buf = dma_buf_get(prime_fd);
+-	if (IS_ERR(dma_buf))
+-		return PTR_ERR(dma_buf);
+-
+-	mutex_lock(&file_priv->prime.lock);
+-
+-	ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
+-			dma_buf, handle);
+-	if (!ret) {
+-		ret = 0;
+-		goto out_put;
+-	}
+-
+-	/* never seen this one, need to import */
+-	obj = dev->driver->gem_prime_import(dev, dma_buf);
+-	if (IS_ERR(obj)) {
+-		ret = PTR_ERR(obj);
+-		goto out_put;
+-	}
+-
+-	ret = drm_gem_handle_create(file_priv, obj, handle);
+-	drm_gem_object_unreference_unlocked(obj);
+-	if (ret)
+-		goto out_put;
+-
+-	ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+-			dma_buf, *handle);
+-	if (ret)
+-		goto fail;
+-
+-	mutex_unlock(&file_priv->prime.lock);
+-	return 0;
+-
+-fail:
+-	/* hmm, if driver attached, we are relying on the free-object path
+-	 * to detach.. which seems ok..
+-	 */
+-	drm_gem_object_handle_unreference_unlocked(obj);
+-out_put:
+-	dma_buf_put(dma_buf);
+-	mutex_unlock(&file_priv->prime.lock);
+-	return ret;
+-}
+-EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
+-
+-int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+-				 struct drm_file *file_priv)
+-{
+-	struct drm_prime_handle *args = data;
+-	uint32_t flags;
+-
+-	if (!drm_core_check_feature(dev, DRIVER_PRIME))
+-		return -EINVAL;
+-
+-	if (!dev->driver->prime_handle_to_fd)
+-		return -ENOSYS;
+-
+-	/* check flags are valid */
+-	if (args->flags & ~DRM_CLOEXEC)
+-		return -EINVAL;
+-
+-	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
+-	flags = args->flags & DRM_CLOEXEC;
+-
+-	return dev->driver->prime_handle_to_fd(dev, file_priv,
+-			args->handle, flags, &args->fd);
+-}
+-
+-int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+-				 struct drm_file *file_priv)
+-{
+-	struct drm_prime_handle *args = data;
+-
+-	if (!drm_core_check_feature(dev, DRIVER_PRIME))
+-		return -EINVAL;
+-
+-	if (!dev->driver->prime_fd_to_handle)
+-		return -ENOSYS;
+-
+-	return dev->driver->prime_fd_to_handle(dev, file_priv,
+-			args->fd, &args->handle);
+-}
+-
+-/*
+- * drm_prime_pages_to_sg
+- *
+- * this helper creates an sg table object from a set of pages
+- * the driver is responsible for mapping the pages into the
+- * importers address space
+- */
+-struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
+-{
+-	struct sg_table *sg = NULL;
+-	struct scatterlist *iter;
+-	int i;
+-	int ret;
+-
+-	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+-	if (!sg)
+-		goto out;
+-
+-	ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
+-	if (ret)
+-		goto out;
+-
+-	for_each_sg(sg->sgl, iter, nr_pages, i)
+-		sg_set_page(iter, pages[i], PAGE_SIZE, 0);
+-
+-	return sg;
+-out:
+-	kfree(sg);
+-	return NULL;
+-}
+-EXPORT_SYMBOL(drm_prime_pages_to_sg);
+-
+-/* helper function to cleanup a GEM/prime object */
+-void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
+-{
+-	struct dma_buf_attachment *attach;
+-	struct dma_buf *dma_buf;
+-	attach = obj->import_attach;
+-	if (sg)
+-		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+-	dma_buf = attach->dmabuf;
+-	dma_buf_detach(attach->dmabuf, attach);
+-	/* remove the reference */
+-	dma_buf_put(dma_buf);
+-}
+-EXPORT_SYMBOL(drm_prime_gem_destroy);
+-
+-void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
+-{
+-	INIT_LIST_HEAD(&prime_fpriv->head);
+-	mutex_init(&prime_fpriv->lock);
+-}
+-EXPORT_SYMBOL(drm_prime_init_file_private);
+-
+-void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
+-{
+-	struct drm_prime_member *member, *safe;
+-	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+-		list_del(&member->entry);
+-		kfree(member);
+-	}
+-}
+-EXPORT_SYMBOL(drm_prime_destroy_file_private);
+-
+-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+-{
+-	struct drm_prime_member *member;
+-
+-	member = kmalloc(sizeof(*member), GFP_KERNEL);
+-	if (!member)
+-		return -ENOMEM;
+-
+-	member->dma_buf = dma_buf;
+-	member->handle = handle;
+-	list_add(&member->entry, &prime_fpriv->head);
+-	return 0;
+-}
+-EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
+-
+-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+-{
+-	struct drm_prime_member *member;
+-
+-	list_for_each_entry(member, &prime_fpriv->head, entry) {
+-		if (member->dma_buf == dma_buf) {
+-			*handle = member->handle;
+-			return 0;
+-		}
+-	}
+-	return -ENOENT;
+-}
+-EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
+-
+-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+-{
+-	struct drm_prime_member *member, *safe;
+-
+-	mutex_lock(&prime_fpriv->lock);
+-	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+-		if (member->dma_buf == dma_buf) {
+-			list_del(&member->entry);
+-			kfree(member);
+-		}
+-	}
+-	mutex_unlock(&prime_fpriv->lock);
+-}
+-EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
+diff --git a/include/drm/drm.h b/include/drm/drm.h
+index 64ff02d..34a7b89 100644
+--- a/include/drm/drm.h
++++ b/include/drm/drm.h
+@@ -617,17 +617,6 @@ struct drm_get_cap {
+ 	__u64 value;
+ };
+ 
+-#define DRM_CLOEXEC O_CLOEXEC
+-struct drm_prime_handle {
+-	__u32 handle;
+-
+-	/** Flags.. only applicable for handle->fd */
+-	__u32 flags;
+-
+-	/** Returned dmabuf file descriptor */
+-	__s32 fd;
+-};
+-
+ #include "drm_mode.h"
+ 
+ #define DRM_IOCTL_BASE			'd'
+@@ -684,8 +673,7 @@ struct drm_prime_handle {
+ #define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
+ #define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
+ 
+-#define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
+-#define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
++#define DRM_IOCTL_GEM_PRIME_OPEN        DRM_IOWR(0x2e, struct drm_gem_open)
+ 
+ #define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
+ #define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index dd73104..574bd1c 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -91,7 +91,6 @@ struct drm_device;
+ #define DRM_UT_CORE 		0x01
+ #define DRM_UT_DRIVER		0x02
+ #define DRM_UT_KMS		0x04
+-#define DRM_UT_PRIME		0x08
+ /*
+  * Three debug levels are defined.
+  * drm_core, drm_driver, drm_kms
+@@ -151,7 +150,6 @@ int drm_err(const char *func, const char *format, ...);
+ #define DRIVER_IRQ_VBL2    0x800
+ #define DRIVER_GEM         0x1000
+ #define DRIVER_MODESET     0x2000
+-#define DRIVER_PRIME       0x4000
+ 
+ #define DRIVER_BUS_PCI 0x1
+ #define DRIVER_BUS_PLATFORM 0x2
+@@ -217,11 +215,6 @@ int drm_err(const char *func, const char *format, ...);
+ 		drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, 		\
+ 					 __func__, fmt, ##args);	\
+ 	} while (0)
+-#define DRM_DEBUG_PRIME(fmt, args...)					\
+-	do {								\
+-		drm_ut_debug_printk(DRM_UT_PRIME, DRM_NAME,		\
+-					__func__, fmt, ##args);		\
+-	} while (0)
+ #define DRM_LOG(fmt, args...)						\
+ 	do {								\
+ 		drm_ut_debug_printk(DRM_UT_CORE, NULL,			\
+@@ -245,7 +238,6 @@ int drm_err(const char *func, const char *format, ...);
+ #else
+ #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
+ #define DRM_DEBUG_KMS(fmt, args...)	do { } while (0)
+-#define DRM_DEBUG_PRIME(fmt, args...)	do { } while (0)
+ #define DRM_DEBUG(fmt, arg...)		 do { } while (0)
+ #define DRM_LOG(fmt, arg...)		do { } while (0)
+ #define DRM_LOG_KMS(fmt, args...) do { } while (0)
+@@ -418,12 +410,6 @@ struct drm_pending_event {
+ 	void (*destroy)(struct drm_pending_event *event);
+ };
+ 
+-/* initial implementaton using a linked list - todo hashtab */
+-struct drm_prime_file_private {
+-	struct list_head head;
+-	struct mutex lock;
+-};
+-
+ /** File private data */
+ struct drm_file {
+ 	int authenticated;
+@@ -451,8 +437,6 @@ struct drm_file {
+ 	wait_queue_head_t event_wait;
+ 	struct list_head event_list;
+ 	int event_space;
+-
+-	struct drm_prime_file_private prime;
+ };
+ 
+ /** Wait queue */
+@@ -668,12 +652,6 @@ struct drm_gem_object {
+ 	uint32_t pending_write_domain;
+ 
+ 	void *driver_private;
+-
+-	/* dma buf exported from this GEM object */
+-	struct dma_buf *export_dma_buf;
+-
+-	/* dma buf attachment backing this object */
+-	struct dma_buf_attachment *import_attach;
+ };
+ 
+ #include "drm_crtc.h"
+@@ -912,20 +890,6 @@ struct drm_driver {
+ 	int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
+ 	void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
+ 
+-	/* prime: */
+-	/* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
+-	int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
+-				uint32_t handle, uint32_t flags, int *prime_fd);
+-	/* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
+-	int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
+-				int prime_fd, uint32_t *handle);
+-	/* export GEM -> dmabuf */
+-	struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+-				struct drm_gem_object *obj, int flags);
+-	/* import dmabuf -> GEM */
+-	struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+-				struct dma_buf *dma_buf);
+-
+ 	/* vga arb irq handler */
+ 	void (*vgaarb_irq)(struct drm_device *dev, bool state);
+ 
+@@ -1545,32 +1509,6 @@ extern int drm_vblank_info(struct seq_file *m, void *data);
+ extern int drm_clients_info(struct seq_file *m, void* data);
+ extern int drm_gem_name_info(struct seq_file *m, void *data);
+ 
+-
+-extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+-		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+-		int *prime_fd);
+-extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+-		struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+-
+-extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+-					struct drm_file *file_priv);
+-extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+-					struct drm_file *file_priv);
+-
+-extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
+-extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
+-
+-
+-void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
+-void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
+-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
+-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
+-
+-int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
+-int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
+-			 struct drm_gem_object **obj);
+-
+ #if DRM_DEBUG_CODE
+ extern int drm_vma_info(struct seq_file *m, void *data);
+ #endif
+-- 
+1.7.10.4
+

Added: dists/sid/linux/debian/patches/features/all/drm/drm-3.4.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/drm-3.4.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,107312 @@
+diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
+index 780498d..444f8b6 100644
+--- a/drivers/char/agp/amd64-agp.c
++++ b/drivers/char/agp/amd64-agp.c
+@@ -33,7 +33,7 @@
+ #define ULI_X86_64_ENU_SCR_REG		0x54
+ 
+ static struct resource *aperture_resource;
+-static int __initdata agp_try_unsupported = 1;
++static bool __initdata agp_try_unsupported = 1;
+ static int agp_bridges_found;
+ 
+ static void amd64_tlbflush(struct agp_memory *temp)
+diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
+index 4b71647..317c28c 100644
+--- a/drivers/char/agp/backend.c
++++ b/drivers/char/agp/backend.c
+@@ -194,10 +194,10 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
+ 
+ err_out:
+ 	if (bridge->driver->needs_scratch_page) {
+-		void *va = page_address(bridge->scratch_page_page);
++		struct page *page = bridge->scratch_page_page;
+ 
+-		bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
+-		bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
++		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
++		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
+ 	}
+ 	if (got_gatt)
+ 		bridge->driver->free_gatt_table(bridge);
+@@ -221,10 +221,10 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
+ 
+ 	if (bridge->driver->agp_destroy_page &&
+ 	    bridge->driver->needs_scratch_page) {
+-		void *va = page_address(bridge->scratch_page_page);
++		struct page *page = bridge->scratch_page_page;
+ 
+-		bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
+-		bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
++		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
++		bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
+ 	}
+ }
+ 
+diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
+index b072648..17e05d1 100644
+--- a/drivers/char/agp/generic.c
++++ b/drivers/char/agp/generic.c
+@@ -514,12 +514,12 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
+ 	switch (*bridge_agpstat & 7) {
+ 	case 4:
+ 		*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
+-		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
++		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
+ 			"Fixing up support for x2 & x1\n");
+ 		break;
+ 	case 2:
+ 		*bridge_agpstat |= AGPSTAT2_1X;
+-		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
++		printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
+ 			"Fixing up support for x1\n");
+ 		break;
+ 	default:
+@@ -693,7 +693,7 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
+ 			*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ 			*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
+ 		} else {
+-			printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
++			printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
+ 			if (!(*bridge_agpstat & AGPSTAT3_8X)) {
+ 				printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
+ 					*bridge_agpstat, origbridge);
+@@ -956,7 +956,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
+ 	bridge->driver->cache_flush();
+ #ifdef CONFIG_X86
+ 	if (set_memory_uc((unsigned long)table, 1 << page_order))
+-		printk(KERN_WARNING "Could not set GATT table memory to UC!");
++		printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
+ 
+ 	bridge->gatt_table = (void *)table;
+ #else
+diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
+index 58b49d1..4293c48 100644
+--- a/drivers/char/agp/intel-agp.c
++++ b/drivers/char/agp/intel-agp.c
+@@ -850,6 +850,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
+ 	.subvendor	= PCI_ANY_ID,			\
+ 	.subdevice	= PCI_ANY_ID,			\
+ 	}
++	ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */
+ 	ID(PCI_DEVICE_ID_INTEL_82443LX_0),
+ 	ID(PCI_DEVICE_ID_INTEL_82443BX_0),
+ 	ID(PCI_DEVICE_ID_INTEL_82443GX_0),
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index 43c4ec3..7f025fb 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -76,7 +76,6 @@ static struct _intel_private {
+ 	struct resource ifp_resource;
+ 	int resource_valid;
+ 	struct page *scratch_page;
+-	dma_addr_t scratch_page_dma;
+ } intel_private;
+ 
+ #define INTEL_GTT_GEN	intel_private.driver->gen
+@@ -306,9 +305,9 @@ static int intel_gtt_setup_scratch_page(void)
+ 		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
+ 			return -EINVAL;
+ 
+-		intel_private.scratch_page_dma = dma_addr;
++		intel_private.base.scratch_page_dma = dma_addr;
+ 	} else
+-		intel_private.scratch_page_dma = page_to_phys(page);
++		intel_private.base.scratch_page_dma = page_to_phys(page);
+ 
+ 	intel_private.scratch_page = page;
+ 
+@@ -631,7 +630,7 @@ static unsigned int intel_gtt_mappable_entries(void)
+ static void intel_gtt_teardown_scratch_page(void)
+ {
+ 	set_pages_wb(intel_private.scratch_page, 1);
+-	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
++	pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
+ 		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ 	put_page(intel_private.scratch_page);
+ 	__free_page(intel_private.scratch_page);
+@@ -681,6 +680,7 @@ static int intel_gtt_init(void)
+ 		iounmap(intel_private.registers);
+ 		return -ENOMEM;
+ 	}
++	intel_private.base.gtt = intel_private.gtt;
+ 
+ 	global_cache_flush();   /* FIXME: ? */
+ 
+@@ -975,7 +975,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
+ 	unsigned int i;
+ 
+ 	for (i = first_entry; i < (first_entry + num_entries); i++) {
+-		intel_private.driver->write_entry(intel_private.scratch_page_dma,
++		intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
+ 						  i, 0);
+ 	}
+ 	readl(intel_private.gtt+i-1);
+@@ -1190,7 +1190,6 @@ static inline int needs_idle_maps(void)
+ {
+ #ifdef CONFIG_INTEL_IOMMU
+ 	const unsigned short gpu_devid = intel_private.pcidev->device;
+-	extern int intel_iommu_gfx_mapped;
+ 
+ 	/* Query intel_iommu to see if we need the workaround. Presumably that
+ 	 * was loaded first.
+diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
+index 29aacd8..08704ae 100644
+--- a/drivers/char/agp/sis-agp.c
++++ b/drivers/char/agp/sis-agp.c
+@@ -17,7 +17,7 @@
+ #define PCI_DEVICE_ID_SI_662	0x0662
+ #define PCI_DEVICE_ID_SI_671	0x0671
+ 
+-static int __devinitdata agp_sis_force_delay = 0;
++static bool __devinitdata agp_sis_force_delay = 0;
+ static int __devinitdata agp_sis_agp_spec = -1;
+ 
+ static int sis_fetch_size(void)
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 1368826..e354bc0 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -9,6 +9,7 @@ menuconfig DRM
+ 	depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+ 	select I2C
+ 	select I2C_ALGOBIT
++	select DMA_SHARED_BUFFER
+ 	help
+ 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
+ 	  introduced in XFree86 4.0. If you say Y here, you need to select
+@@ -18,6 +19,11 @@ menuconfig DRM
+ 	  details.  You should also select and configure AGP
+ 	  (/dev/agpgart) support if it is available for your platform.
+ 
++config DRM_USB
++	tristate
++	depends on DRM
++	select USB
++
+ config DRM_KMS_HELPER
+ 	tristate
+ 	depends on DRM
+@@ -27,6 +33,18 @@ config DRM_KMS_HELPER
+ 	help
+ 	  FB and CRTC helpers for KMS drivers.
+ 
++config DRM_LOAD_EDID_FIRMWARE
++	bool "Allow to specify an EDID data set instead of probing for it"
++	depends on DRM_KMS_HELPER
++	help
++	  Say Y here, if you want to use EDID data to be loaded from the
++	  /lib/firmware directory or one of the provided built-in
++	  data sets. This may be necessary, if the graphics adapter or
++	  monitor are unable to provide appropriate EDID data. Since this
++	  feature is provided as a workaround for broken hardware, the
++	  default case is N. Details and instructions how to build your own
++	  EDID data are given in Documentation/EDID/HOWTO.txt.
++
+ config DRM_TTM
+ 	tristate
+ 	depends on DRM
+@@ -71,6 +89,8 @@ config DRM_RADEON
+ 
+ source "drivers/gpu/drm/radeon/Kconfig"
+ 
++source "drivers/gpu/drm/nouveau/Kconfig"
++
+ config DRM_I810
+ 	tristate "Intel I810"
+ 	# !PREEMPT because of missing ioctl locking
+@@ -162,3 +182,7 @@ config DRM_SAVAGE
+ source "drivers/gpu/drm/exynos/Kconfig"
+ 
+ source "drivers/gpu/drm/vmwgfx/Kconfig"
++
++source "drivers/gpu/drm/gma500/Kconfig"
++
++source "drivers/gpu/drm/udl/Kconfig"
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index c0496f6..c20da5b 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -9,20 +9,24 @@ drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
+ 		drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
+ 		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+ 		drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+-		drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
++		drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
+ 		drm_crtc.o drm_modes.o drm_edid.o \
+ 		drm_info.o drm_debugfs.o drm_encoder_slave.o \
+-		drm_trace_points.o drm_global.o drm_usb.o
++		drm_trace_points.o drm_global.o drm_prime.o
+ 
+ drm-$(CONFIG_COMPAT) += drm_ioc32.o
+ 
++drm-usb-y   := drm_usb.o
++
+ drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
++drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+ 
+ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
+ 
+ CFLAGS_drm_trace_points.o := -I$(src)
+ 
+ obj-$(CONFIG_DRM)	+= drm.o
++obj-$(CONFIG_DRM_USB)   += drm_usb.o
+ obj-$(CONFIG_DRM_TTM)	+= ttm/
+ obj-$(CONFIG_DRM_TDFX)	+= tdfx/
+ obj-$(CONFIG_DRM_R128)	+= r128/
+@@ -36,4 +40,6 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
+ obj-$(CONFIG_DRM_VIA)	+=via/
+ obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
++obj-$(CONFIG_DRM_GMA500) += gma500/
++obj-$(CONFIG_DRM_UDL) += udl/
+ obj-y			+= i2c/
+diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
+index 30372f7..348b367 100644
+--- a/drivers/gpu/drm/drm_bufs.c
++++ b/drivers/gpu/drm/drm_bufs.c
+@@ -1510,8 +1510,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
+  * \param arg pointer to a drm_buf_map structure.
+  * \return zero on success or a negative number on failure.
+  *
+- * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
+- * about each buffer into user space. For PCI buffers, it calls do_mmap() with
++ * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
++ * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
+  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+  * drm_mmap_dma().
+  */
+@@ -1553,18 +1553,14 @@ int drm_mapbufs(struct drm_device *dev, void *data,
+ 				retcode = -EINVAL;
+ 				goto done;
+ 			}
+-			down_write(&current->mm->mmap_sem);
+-			virtual = do_mmap(file_priv->filp, 0, map->size,
++			virtual = vm_mmap(file_priv->filp, 0, map->size,
+ 					  PROT_READ | PROT_WRITE,
+ 					  MAP_SHARED,
+ 					  token);
+-			up_write(&current->mm->mmap_sem);
+ 		} else {
+-			down_write(&current->mm->mmap_sem);
+-			virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
++			virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
+ 					  PROT_READ | PROT_WRITE,
+ 					  MAP_SHARED, 0);
+-			up_write(&current->mm->mmap_sem);
+ 		}
+ 		if (virtual > -1024UL) {
+ 			/* Real error */
+diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
+index 5928653..4b8653b 100644
+--- a/drivers/gpu/drm/drm_cache.c
++++ b/drivers/gpu/drm/drm_cache.c
+@@ -41,10 +41,10 @@ drm_clflush_page(struct page *page)
+ 	if (unlikely(page == NULL))
+ 		return;
+ 
+-	page_virtual = kmap_atomic(page, KM_USER0);
++	page_virtual = kmap_atomic(page);
+ 	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ 		clflush(page_virtual + i);
+-	kunmap_atomic(page_virtual, KM_USER0);
++	kunmap_atomic(page_virtual);
+ }
+ 
+ static void drm_cache_flush_clflush(struct page *pages[],
+@@ -87,10 +87,10 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
+ 		if (unlikely(page == NULL))
+ 			continue;
+ 
+-		page_virtual = kmap_atomic(page, KM_USER0);
++		page_virtual = kmap_atomic(page);
+ 		flush_dcache_range((unsigned long)page_virtual,
+ 				   (unsigned long)page_virtual + PAGE_SIZE);
+-		kunmap_atomic(page_virtual, KM_USER0);
++		kunmap_atomic(page_virtual);
+ 	}
+ #else
+ 	printk(KERN_ERR "Architecture has no drm_cache.c support\n");
+diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
+index 6d440fb..325365f 100644
+--- a/drivers/gpu/drm/drm_context.c
++++ b/drivers/gpu/drm/drm_context.c
+@@ -154,8 +154,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
+ 		return -EINVAL;
+ 	}
+ 
+-	mutex_unlock(&dev->struct_mutex);
+-
+ 	request->handle = NULL;
+ 	list_for_each_entry(_entry, &dev->maplist, head) {
+ 		if (_entry->map == map) {
+@@ -164,6 +162,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
+ 			break;
+ 		}
+ 	}
++
++	mutex_unlock(&dev->struct_mutex);
++
+ 	if (request->handle == NULL)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 3f1799b..4fd363f 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -36,11 +36,7 @@
+ #include "drmP.h"
+ #include "drm_crtc.h"
+ #include "drm_edid.h"
+-
+-struct drm_prop_enum_list {
+-	int type;
+-	char *name;
+-};
++#include "drm_fourcc.h"
+ 
+ /* Avoid boilerplate.  I'm tired of typing. */
+ #define DRM_ENUM_NAME_FN(fnname, list)				\
+@@ -297,9 +293,8 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ 	int ret;
+ 
+ 	ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+-	if (ret) {
++	if (ret)
+ 		return ret;
+-	}
+ 
+ 	fb->dev = dev;
+ 	fb->funcs = funcs;
+@@ -324,6 +319,7 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+ {
+ 	struct drm_device *dev = fb->dev;
+ 	struct drm_crtc *crtc;
++	struct drm_plane *plane;
+ 	struct drm_mode_set set;
+ 	int ret;
+ 
+@@ -340,6 +336,18 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
+ 		}
+ 	}
+ 
++	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
++		if (plane->fb == fb) {
++			/* should turn off the crtc */
++			ret = plane->funcs->disable_plane(plane);
++			if (ret)
++				DRM_ERROR("failed to disable plane with busy fb\n");
++			/* disconnect the plane from the fb and crtc: */
++			plane->fb = NULL;
++			plane->crtc = NULL;
++		}
++	}
++
+ 	drm_mode_object_put(dev, &fb->base);
+ 	list_del(&fb->head);
+ 	dev->mode_config.num_fb--;
+@@ -356,19 +364,31 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
+  * Caller must hold mode config lock.
+  *
+  * Inits a new object created as base part of an driver crtc object.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure.
+  */
+-void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
++int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ 		   const struct drm_crtc_funcs *funcs)
+ {
++	int ret;
++
+ 	crtc->dev = dev;
+ 	crtc->funcs = funcs;
+ 
+ 	mutex_lock(&dev->mode_config.mutex);
+-	drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
++
++	ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
++	if (ret)
++		goto out;
+ 
+ 	list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
+ 	dev->mode_config.num_crtc++;
++
++ out:
+ 	mutex_unlock(&dev->mode_config.mutex);
++
++	return ret;
+ }
+ EXPORT_SYMBOL(drm_crtc_init);
+ 
+@@ -428,7 +448,7 @@ void drm_mode_remove(struct drm_connector *connector,
+ 		     struct drm_display_mode *mode)
+ {
+ 	list_del(&mode->head);
+-	kfree(mode);
++	drm_mode_destroy(connector->dev, mode);
+ }
+ EXPORT_SYMBOL(drm_mode_remove);
+ 
+@@ -440,21 +460,29 @@ EXPORT_SYMBOL(drm_mode_remove);
+  * @name: user visible name of the connector
+  *
+  * LOCKING:
+- * Caller must hold @dev's mode_config lock.
++ * Takes mode config lock.
+  *
+  * Initialises a preallocated connector. Connectors should be
+  * subclassed as part of driver connector objects.
++ *
++ * RETURNS:
++ * Zero on success, error code on failure.
+  */
+-void drm_connector_init(struct drm_device *dev,
+-		     struct drm_connector *connector,
+-		     const struct drm_connector_funcs *funcs,
+-		     int connector_type)
++int drm_connector_init(struct drm_device *dev,
++		       struct drm_connector *connector,
++		       const struct drm_connector_funcs *funcs,
++		       int connector_type)
+ {
++	int ret;
++
+ 	mutex_lock(&dev->mode_config.mutex);
+ 
++	ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
++	if (ret)
++		goto out;
++
+ 	connector->dev = dev;
+ 	connector->funcs = funcs;
+-	drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ 	connector->connector_type = connector_type;
+ 	connector->connector_type_id =
+ 		++drm_connector_enum_list[connector_type].count; /* TODO */
+@@ -474,7 +502,10 @@ void drm_connector_init(struct drm_device *dev,
+ 	drm_connector_attach_property(connector,
+ 				      dev->mode_config.dpms_property, 0);
+ 
++ out:
+ 	mutex_unlock(&dev->mode_config.mutex);
++
++	return ret;
+ }
+ EXPORT_SYMBOL(drm_connector_init);
+ 
+@@ -483,7 +514,7 @@ EXPORT_SYMBOL(drm_connector_init);
+  * @connector: connector to cleanup
+  *
+  * LOCKING:
+- * Caller must hold @dev's mode_config lock.
++ * Takes mode config lock.
+  *
+  * Cleans up the connector but doesn't free the object.
+  */
+@@ -509,23 +540,41 @@ void drm_connector_cleanup(struct drm_connector *connector)
+ }
+ EXPORT_SYMBOL(drm_connector_cleanup);
+ 
+-void drm_encoder_init(struct drm_device *dev,
++void drm_connector_unplug_all(struct drm_device *dev)
++{
++	struct drm_connector *connector;
++
++	/* taking the mode config mutex ends up in a clash with sysfs */
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		drm_sysfs_connector_remove(connector);
++
++}
++EXPORT_SYMBOL(drm_connector_unplug_all);
++
++int drm_encoder_init(struct drm_device *dev,
+ 		      struct drm_encoder *encoder,
+ 		      const struct drm_encoder_funcs *funcs,
+ 		      int encoder_type)
+ {
++	int ret;
++
+ 	mutex_lock(&dev->mode_config.mutex);
+ 
+-	encoder->dev = dev;
++	ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
++	if (ret)
++		goto out;
+ 
+-	drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
++	encoder->dev = dev;
+ 	encoder->encoder_type = encoder_type;
+ 	encoder->funcs = funcs;
+ 
+ 	list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
+ 	dev->mode_config.num_encoder++;
+ 
++ out:
+ 	mutex_unlock(&dev->mode_config.mutex);
++
++	return ret;
+ }
+ EXPORT_SYMBOL(drm_encoder_init);
+ 
+@@ -540,6 +589,69 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
+ }
+ EXPORT_SYMBOL(drm_encoder_cleanup);
+ 
++int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
++		   unsigned long possible_crtcs,
++		   const struct drm_plane_funcs *funcs,
++		   const uint32_t *formats, uint32_t format_count,
++		   bool priv)
++{
++	int ret;
++
++	mutex_lock(&dev->mode_config.mutex);
++
++	ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
++	if (ret)
++		goto out;
++
++	plane->dev = dev;
++	plane->funcs = funcs;
++	plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
++				      GFP_KERNEL);
++	if (!plane->format_types) {
++		DRM_DEBUG_KMS("out of memory when allocating plane\n");
++		drm_mode_object_put(dev, &plane->base);
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
++	plane->format_count = format_count;
++	plane->possible_crtcs = possible_crtcs;
++
++	/* private planes are not exposed to userspace, but depending on
++	 * display hardware, might be convenient to allow sharing programming
++	 * for the scanout engine with the crtc implementation.
++	 */
++	if (!priv) {
++		list_add_tail(&plane->head, &dev->mode_config.plane_list);
++		dev->mode_config.num_plane++;
++	} else {
++		INIT_LIST_HEAD(&plane->head);
++	}
++
++ out:
++	mutex_unlock(&dev->mode_config.mutex);
++
++	return ret;
++}
++EXPORT_SYMBOL(drm_plane_init);
++
++void drm_plane_cleanup(struct drm_plane *plane)
++{
++	struct drm_device *dev = plane->dev;
++
++	mutex_lock(&dev->mode_config.mutex);
++	kfree(plane->format_types);
++	drm_mode_object_put(dev, &plane->base);
++	/* if not added to a list, it must be a private plane */
++	if (!list_empty(&plane->head)) {
++		list_del(&plane->head);
++		dev->mode_config.num_plane--;
++	}
++	mutex_unlock(&dev->mode_config.mutex);
++}
++EXPORT_SYMBOL(drm_plane_cleanup);
++
+ /**
+  * drm_mode_create - create a new display mode
+  * @dev: DRM device
+@@ -560,7 +672,11 @@ struct drm_display_mode *drm_mode_create(struct drm_device *dev)
+ 	if (!nmode)
+ 		return NULL;
+ 
+-	drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
++	if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
++		kfree(nmode);
++		return NULL;
++	}
++
+ 	return nmode;
+ }
+ EXPORT_SYMBOL(drm_mode_create);
+@@ -577,6 +693,9 @@ EXPORT_SYMBOL(drm_mode_create);
+  */
+ void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
+ {
++	if (!mode)
++		return;
++
+ 	drm_mode_object_put(dev, &mode->base);
+ 
+ 	kfree(mode);
+@@ -587,7 +706,6 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+ {
+ 	struct drm_property *edid;
+ 	struct drm_property *dpms;
+-	int i;
+ 
+ 	/*
+ 	 * Standard properties (apply to all connectors)
+@@ -597,11 +715,9 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
+ 				   "EDID", 0);
+ 	dev->mode_config.edid_property = edid;
+ 
+-	dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+-				   "DPMS", ARRAY_SIZE(drm_dpms_enum_list));
+-	for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
+-		drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
+-				      drm_dpms_enum_list[i].name);
++	dpms = drm_property_create_enum(dev, 0,
++				   "DPMS", drm_dpms_enum_list,
++				   ARRAY_SIZE(drm_dpms_enum_list));
+ 	dev->mode_config.dpms_property = dpms;
+ 
+ 	return 0;
+@@ -617,30 +733,21 @@ int drm_mode_create_dvi_i_properties(struct drm_device *dev)
+ {
+ 	struct drm_property *dvi_i_selector;
+ 	struct drm_property *dvi_i_subconnector;
+-	int i;
+ 
+ 	if (dev->mode_config.dvi_i_select_subconnector_property)
+ 		return 0;
+ 
+ 	dvi_i_selector =
+-		drm_property_create(dev, DRM_MODE_PROP_ENUM,
++		drm_property_create_enum(dev, 0,
+ 				    "select subconnector",
++				    drm_dvi_i_select_enum_list,
+ 				    ARRAY_SIZE(drm_dvi_i_select_enum_list));
+-	for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
+-		drm_property_add_enum(dvi_i_selector, i,
+-				      drm_dvi_i_select_enum_list[i].type,
+-				      drm_dvi_i_select_enum_list[i].name);
+ 	dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
+ 
+-	dvi_i_subconnector =
+-		drm_property_create(dev, DRM_MODE_PROP_ENUM |
+-				    DRM_MODE_PROP_IMMUTABLE,
++	dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ 				    "subconnector",
++				    drm_dvi_i_subconnector_enum_list,
+ 				    ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
+-	for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
+-		drm_property_add_enum(dvi_i_subconnector, i,
+-				      drm_dvi_i_subconnector_enum_list[i].type,
+-				      drm_dvi_i_subconnector_enum_list[i].name);
+ 	dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
+ 
+ 	return 0;
+@@ -671,51 +778,33 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+ 	/*
+ 	 * Basic connector properties
+ 	 */
+-	tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
++	tv_selector = drm_property_create_enum(dev, 0,
+ 					  "select subconnector",
++					  drm_tv_select_enum_list,
+ 					  ARRAY_SIZE(drm_tv_select_enum_list));
+-	for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
+-		drm_property_add_enum(tv_selector, i,
+-				      drm_tv_select_enum_list[i].type,
+-				      drm_tv_select_enum_list[i].name);
+ 	dev->mode_config.tv_select_subconnector_property = tv_selector;
+ 
+ 	tv_subconnector =
+-		drm_property_create(dev, DRM_MODE_PROP_ENUM |
+-				    DRM_MODE_PROP_IMMUTABLE, "subconnector",
++		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
++				    "subconnector",
++				    drm_tv_subconnector_enum_list,
+ 				    ARRAY_SIZE(drm_tv_subconnector_enum_list));
+-	for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
+-		drm_property_add_enum(tv_subconnector, i,
+-				      drm_tv_subconnector_enum_list[i].type,
+-				      drm_tv_subconnector_enum_list[i].name);
+ 	dev->mode_config.tv_subconnector_property = tv_subconnector;
+ 
+ 	/*
+ 	 * Other, TV specific properties: margins & TV modes.
+ 	 */
+ 	dev->mode_config.tv_left_margin_property =
+-		drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-				    "left margin", 2);
+-	dev->mode_config.tv_left_margin_property->values[0] = 0;
+-	dev->mode_config.tv_left_margin_property->values[1] = 100;
++		drm_property_create_range(dev, 0, "left margin", 0, 100);
+ 
+ 	dev->mode_config.tv_right_margin_property =
+-		drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-				    "right margin", 2);
+-	dev->mode_config.tv_right_margin_property->values[0] = 0;
+-	dev->mode_config.tv_right_margin_property->values[1] = 100;
++		drm_property_create_range(dev, 0, "right margin", 0, 100);
+ 
+ 	dev->mode_config.tv_top_margin_property =
+-		drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-				    "top margin", 2);
+-	dev->mode_config.tv_top_margin_property->values[0] = 0;
+-	dev->mode_config.tv_top_margin_property->values[1] = 100;
++		drm_property_create_range(dev, 0, "top margin", 0, 100);
+ 
+ 	dev->mode_config.tv_bottom_margin_property =
+-		drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-				    "bottom margin", 2);
+-	dev->mode_config.tv_bottom_margin_property->values[0] = 0;
+-	dev->mode_config.tv_bottom_margin_property->values[1] = 100;
++		drm_property_create_range(dev, 0, "bottom margin", 0, 100);
+ 
+ 	dev->mode_config.tv_mode_property =
+ 		drm_property_create(dev, DRM_MODE_PROP_ENUM,
+@@ -725,40 +814,22 @@ int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes,
+ 				      i, modes[i]);
+ 
+ 	dev->mode_config.tv_brightness_property =
+-		drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-				    "brightness", 2);
+-	dev->mode_config.tv_brightness_property->values[0] = 0;
+-	dev->mode_config.tv_brightness_property->values[1] = 100;
++		drm_property_create_range(dev, 0, "brightness", 0, 100);
+ 
+ 	dev->mode_config.tv_contrast_property =
+-		drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-				    "contrast", 2);
+-	dev->mode_config.tv_contrast_property->values[0] = 0;
+-	dev->mode_config.tv_contrast_property->values[1] = 100;
++		drm_property_create_range(dev, 0, "contrast", 0, 100);
+ 
+ 	dev->mode_config.tv_flicker_reduction_property =
+-		drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-				    "flicker reduction", 2);
+-	dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
+-	dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
++		drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
+ 
+ 	dev->mode_config.tv_overscan_property =
+-		drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-				    "overscan", 2);
+-	dev->mode_config.tv_overscan_property->values[0] = 0;
+-	dev->mode_config.tv_overscan_property->values[1] = 100;
++		drm_property_create_range(dev, 0, "overscan", 0, 100);
+ 
+ 	dev->mode_config.tv_saturation_property =
+-		drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-				    "saturation", 2);
+-	dev->mode_config.tv_saturation_property->values[0] = 0;
+-	dev->mode_config.tv_saturation_property->values[1] = 100;
++		drm_property_create_range(dev, 0, "saturation", 0, 100);
+ 
+ 	dev->mode_config.tv_hue_property =
+-		drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-				    "hue", 2);
+-	dev->mode_config.tv_hue_property->values[0] = 0;
+-	dev->mode_config.tv_hue_property->values[1] = 100;
++		drm_property_create_range(dev, 0, "hue", 0, 100);
+ 
+ 	return 0;
+ }
+@@ -774,18 +845,14 @@ EXPORT_SYMBOL(drm_mode_create_tv_properties);
+ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
+ {
+ 	struct drm_property *scaling_mode;
+-	int i;
+ 
+ 	if (dev->mode_config.scaling_mode_property)
+ 		return 0;
+ 
+ 	scaling_mode =
+-		drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
++		drm_property_create_enum(dev, 0, "scaling mode",
++				drm_scaling_mode_enum_list,
+ 				    ARRAY_SIZE(drm_scaling_mode_enum_list));
+-	for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
+-		drm_property_add_enum(scaling_mode, i,
+-				      drm_scaling_mode_enum_list[i].type,
+-				      drm_scaling_mode_enum_list[i].name);
+ 
+ 	dev->mode_config.scaling_mode_property = scaling_mode;
+ 
+@@ -803,18 +870,14 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
+ int drm_mode_create_dithering_property(struct drm_device *dev)
+ {
+ 	struct drm_property *dithering_mode;
+-	int i;
+ 
+ 	if (dev->mode_config.dithering_mode_property)
+ 		return 0;
+ 
+ 	dithering_mode =
+-		drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
++		drm_property_create_enum(dev, 0, "dithering",
++				drm_dithering_mode_enum_list,
+ 				    ARRAY_SIZE(drm_dithering_mode_enum_list));
+-	for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
+-		drm_property_add_enum(dithering_mode, i,
+-				      drm_dithering_mode_enum_list[i].type,
+-				      drm_dithering_mode_enum_list[i].name);
+ 	dev->mode_config.dithering_mode_property = dithering_mode;
+ 
+ 	return 0;
+@@ -831,20 +894,15 @@ EXPORT_SYMBOL(drm_mode_create_dithering_property);
+ int drm_mode_create_dirty_info_property(struct drm_device *dev)
+ {
+ 	struct drm_property *dirty_info;
+-	int i;
+ 
+ 	if (dev->mode_config.dirty_info_property)
+ 		return 0;
+ 
+ 	dirty_info =
+-		drm_property_create(dev, DRM_MODE_PROP_ENUM |
+-				    DRM_MODE_PROP_IMMUTABLE,
++		drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
+ 				    "dirty",
++				    drm_dirty_info_enum_list,
+ 				    ARRAY_SIZE(drm_dirty_info_enum_list));
+-	for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
+-		drm_property_add_enum(dirty_info, i,
+-				      drm_dirty_info_enum_list[i].type,
+-				      drm_dirty_info_enum_list[i].name);
+ 	dev->mode_config.dirty_info_property = dirty_info;
+ 
+ 	return 0;
+@@ -871,6 +929,7 @@ void drm_mode_config_init(struct drm_device *dev)
+ 	INIT_LIST_HEAD(&dev->mode_config.encoder_list);
+ 	INIT_LIST_HEAD(&dev->mode_config.property_list);
+ 	INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
++	INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ 	idr_init(&dev->mode_config.crtc_idr);
+ 
+ 	mutex_lock(&dev->mode_config.mutex);
+@@ -927,6 +986,7 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
+ 
+ /**
+  * drm_mode_config_cleanup - free up DRM mode_config info
+@@ -947,6 +1007,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
+ 	struct drm_encoder *encoder, *enct;
+ 	struct drm_framebuffer *fb, *fbt;
+ 	struct drm_property *property, *pt;
++	struct drm_plane *plane, *plt;
+ 
+ 	list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
+ 				 head) {
+@@ -967,10 +1028,17 @@ void drm_mode_config_cleanup(struct drm_device *dev)
+ 		fb->funcs->destroy(fb);
+ 	}
+ 
++	list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
++				 head) {
++		plane->funcs->destroy(plane);
++	}
++
+ 	list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ 		crtc->funcs->destroy(crtc);
+ 	}
+ 
++	idr_remove_all(&dev->mode_config.crtc_idr);
++	idr_destroy(&dev->mode_config.crtc_idr);
+ }
+ EXPORT_SYMBOL(drm_mode_config_cleanup);
+ 
+@@ -985,9 +1053,16 @@ EXPORT_SYMBOL(drm_mode_config_cleanup);
+  * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
+  * the user.
+  */
+-void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+-			       struct drm_display_mode *in)
++static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
++				      const struct drm_display_mode *in)
+ {
++	WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
++	     in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
++	     in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
++	     in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
++	     in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX,
++	     "timing values too large for mode info\n");
++
+ 	out->clock = in->clock;
+ 	out->hdisplay = in->hdisplay;
+ 	out->hsync_start = in->hsync_start;
+@@ -1016,10 +1091,16 @@ void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
+  *
+  * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
+  * the caller.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
+  */
+-void drm_crtc_convert_umode(struct drm_display_mode *out,
+-			    struct drm_mode_modeinfo *in)
++static int drm_crtc_convert_umode(struct drm_display_mode *out,
++				  const struct drm_mode_modeinfo *in)
+ {
++	if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
++		return -ERANGE;
++
+ 	out->clock = in->clock;
+ 	out->hdisplay = in->hdisplay;
+ 	out->hsync_start = in->hsync_start;
+@@ -1036,6 +1117,8 @@ void drm_crtc_convert_umode(struct drm_display_mode *out,
+ 	out->type = in->type;
+ 	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
+ 	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
++
++	return 0;
+ }
+ 
+ /**
+@@ -1234,7 +1317,7 @@ out:
+  * @arg: arg from ioctl
+  *
+  * LOCKING:
+- * Caller? (FIXME)
++ * Takes mode config lock.
+  *
+  * Construct a CRTC configuration structure to return to the user.
+  *
+@@ -1294,7 +1377,7 @@ out:
+  * @arg: arg from ioctl
+  *
+  * LOCKING:
+- * Caller? (FIXME)
++ * Takes mode config lock.
+  *
+  * Construct a connector configuration structure to return to the user.
+  *
+@@ -1379,7 +1462,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
+ 	 */
+ 	if ((out_resp->count_modes >= mode_count) && mode_count) {
+ 		copied = 0;
+-		mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
++		mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
+ 		list_for_each_entry(mode, &connector->modes, head) {
+ 			drm_crtc_convert_to_umode(&u_mode, mode);
+ 			if (copy_to_user(mode_ptr + copied,
+@@ -1394,8 +1477,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
+ 
+ 	if ((out_resp->count_props >= props_count) && props_count) {
+ 		copied = 0;
+-		prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
+-		prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
++		prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
++		prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
+ 		for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
+ 			if (connector->property_ids[i] != 0) {
+ 				if (put_user(connector->property_ids[i],
+@@ -1417,7 +1500,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
+ 
+ 	if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
+ 		copied = 0;
+-		encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
++		encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
+ 		for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ 			if (connector->encoder_ids[i] != 0) {
+ 				if (put_user(connector->encoder_ids[i],
+@@ -1471,6 +1554,254 @@ out:
+ }
+ 
+ /**
++ * drm_mode_getplane_res - get plane info
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * LOCKING:
++ * Takes mode config lock.
++ *
++ * Return an plane count and set of IDs.
++ */
++int drm_mode_getplane_res(struct drm_device *dev, void *data,
++			    struct drm_file *file_priv)
++{
++	struct drm_mode_get_plane_res *plane_resp = data;
++	struct drm_mode_config *config;
++	struct drm_plane *plane;
++	uint32_t __user *plane_ptr;
++	int copied = 0, ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	mutex_lock(&dev->mode_config.mutex);
++	config = &dev->mode_config;
++
++	/*
++	 * This ioctl is called twice, once to determine how much space is
++	 * needed, and the 2nd time to fill it.
++	 */
++	if (config->num_plane &&
++	    (plane_resp->count_planes >= config->num_plane)) {
++		plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
++
++		list_for_each_entry(plane, &config->plane_list, head) {
++			if (put_user(plane->base.id, plane_ptr + copied)) {
++				ret = -EFAULT;
++				goto out;
++			}
++			copied++;
++		}
++	}
++	plane_resp->count_planes = config->num_plane;
++
++out:
++	mutex_unlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++/**
++ * drm_mode_getplane - get plane info
++ * @dev: DRM device
++ * @data: ioctl data
++ * @file_priv: DRM file info
++ *
++ * LOCKING:
++ * Takes mode config lock.
++ *
++ * Return plane info, including formats supported, gamma size, any
++ * current fb, etc.
++ */
++int drm_mode_getplane(struct drm_device *dev, void *data,
++			struct drm_file *file_priv)
++{
++	struct drm_mode_get_plane *plane_resp = data;
++	struct drm_mode_object *obj;
++	struct drm_plane *plane;
++	uint32_t __user *format_ptr;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	mutex_lock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, plane_resp->plane_id,
++				   DRM_MODE_OBJECT_PLANE);
++	if (!obj) {
++		ret = -ENOENT;
++		goto out;
++	}
++	plane = obj_to_plane(obj);
++
++	if (plane->crtc)
++		plane_resp->crtc_id = plane->crtc->base.id;
++	else
++		plane_resp->crtc_id = 0;
++
++	if (plane->fb)
++		plane_resp->fb_id = plane->fb->base.id;
++	else
++		plane_resp->fb_id = 0;
++
++	plane_resp->plane_id = plane->base.id;
++	plane_resp->possible_crtcs = plane->possible_crtcs;
++	plane_resp->gamma_size = plane->gamma_size;
++
++	/*
++	 * This ioctl is called twice, once to determine how much space is
++	 * needed, and the 2nd time to fill it.
++	 */
++	if (plane->format_count &&
++	    (plane_resp->count_format_types >= plane->format_count)) {
++		format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
++		if (copy_to_user(format_ptr,
++				 plane->format_types,
++				 sizeof(uint32_t) * plane->format_count)) {
++			ret = -EFAULT;
++			goto out;
++		}
++	}
++	plane_resp->count_format_types = plane->format_count;
++
++out:
++	mutex_unlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++/**
++ * drm_mode_setplane - set up or tear down an plane
++ * @dev: DRM device
++ * @data: ioctl data*
++ * @file_prive: DRM file info
++ *
++ * LOCKING:
++ * Takes mode config lock.
++ *
++ * Set plane info, including placement, fb, scaling, and other factors.
++ * Or pass a NULL fb to disable.
++ */
++int drm_mode_setplane(struct drm_device *dev, void *data,
++			struct drm_file *file_priv)
++{
++	struct drm_mode_set_plane *plane_req = data;
++	struct drm_mode_object *obj;
++	struct drm_plane *plane;
++	struct drm_crtc *crtc;
++	struct drm_framebuffer *fb;
++	int ret = 0;
++	unsigned int fb_width, fb_height;
++	int i;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	mutex_lock(&dev->mode_config.mutex);
++
++	/*
++	 * First, find the plane, crtc, and fb objects.  If not available,
++	 * we don't bother to call the driver.
++	 */
++	obj = drm_mode_object_find(dev, plane_req->plane_id,
++				   DRM_MODE_OBJECT_PLANE);
++	if (!obj) {
++		DRM_DEBUG_KMS("Unknown plane ID %d\n",
++			      plane_req->plane_id);
++		ret = -ENOENT;
++		goto out;
++	}
++	plane = obj_to_plane(obj);
++
++	/* No fb means shut it down */
++	if (!plane_req->fb_id) {
++		plane->funcs->disable_plane(plane);
++		plane->crtc = NULL;
++		plane->fb = NULL;
++		goto out;
++	}
++
++	obj = drm_mode_object_find(dev, plane_req->crtc_id,
++				   DRM_MODE_OBJECT_CRTC);
++	if (!obj) {
++		DRM_DEBUG_KMS("Unknown crtc ID %d\n",
++			      plane_req->crtc_id);
++		ret = -ENOENT;
++		goto out;
++	}
++	crtc = obj_to_crtc(obj);
++
++	obj = drm_mode_object_find(dev, plane_req->fb_id,
++				   DRM_MODE_OBJECT_FB);
++	if (!obj) {
++		DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
++			      plane_req->fb_id);
++		ret = -ENOENT;
++		goto out;
++	}
++	fb = obj_to_fb(obj);
++
++	/* Check whether this plane supports the fb pixel format. */
++	for (i = 0; i < plane->format_count; i++)
++		if (fb->pixel_format == plane->format_types[i])
++			break;
++	if (i == plane->format_count) {
++		DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
++		ret = -EINVAL;
++		goto out;
++	}
++
++	fb_width = fb->width << 16;
++	fb_height = fb->height << 16;
++
++	/* Make sure source coordinates are inside the fb. */
++	if (plane_req->src_w > fb_width ||
++	    plane_req->src_x > fb_width - plane_req->src_w ||
++	    plane_req->src_h > fb_height ||
++	    plane_req->src_y > fb_height - plane_req->src_h) {
++		DRM_DEBUG_KMS("Invalid source coordinates "
++			      "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
++			      plane_req->src_w >> 16,
++			      ((plane_req->src_w & 0xffff) * 15625) >> 10,
++			      plane_req->src_h >> 16,
++			      ((plane_req->src_h & 0xffff) * 15625) >> 10,
++			      plane_req->src_x >> 16,
++			      ((plane_req->src_x & 0xffff) * 15625) >> 10,
++			      plane_req->src_y >> 16,
++			      ((plane_req->src_y & 0xffff) * 15625) >> 10);
++		ret = -ENOSPC;
++		goto out;
++	}
++
++	/* Give drivers some help against integer overflows */
++	if (plane_req->crtc_w > INT_MAX ||
++	    plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
++	    plane_req->crtc_h > INT_MAX ||
++	    plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
++		DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
++			      plane_req->crtc_w, plane_req->crtc_h,
++			      plane_req->crtc_x, plane_req->crtc_y);
++		ret = -ERANGE;
++		goto out;
++	}
++
++	ret = plane->funcs->update_plane(plane, crtc, fb,
++					 plane_req->crtc_x, plane_req->crtc_y,
++					 plane_req->crtc_w, plane_req->crtc_h,
++					 plane_req->src_x, plane_req->src_y,
++					 plane_req->src_w, plane_req->src_h);
++	if (!ret) {
++		plane->crtc = crtc;
++		plane->fb = fb;
++	}
++
++out:
++	mutex_unlock(&dev->mode_config.mutex);
++
++	return ret;
++}
++
++/**
+  * drm_mode_setcrtc - set CRTC configuration
+  * @inode: inode from the ioctl
+  * @filp: file * from the ioctl
+@@ -1478,7 +1809,7 @@ out:
+  * @arg: arg from ioctl
+  *
+  * LOCKING:
+- * Caller? (FIXME)
++ * Takes mode config lock.
+  *
+  * Build a new CRTC configuration based on user request.
+  *
+@@ -1493,7 +1824,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 	struct drm_mode_config *config = &dev->mode_config;
+ 	struct drm_mode_crtc *crtc_req = data;
+ 	struct drm_mode_object *obj;
+-	struct drm_crtc *crtc, *crtcfb;
++	struct drm_crtc *crtc;
+ 	struct drm_connector **connector_set = NULL, *connector;
+ 	struct drm_framebuffer *fb = NULL;
+ 	struct drm_display_mode *mode = NULL;
+@@ -1505,6 +1836,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return -EINVAL;
+ 
++	/* For some reason crtc x/y offsets are signed internally. */
++	if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
++		return -ERANGE;
++
+ 	mutex_lock(&dev->mode_config.mutex);
+ 	obj = drm_mode_object_find(dev, crtc_req->crtc_id,
+ 				   DRM_MODE_OBJECT_CRTC);
+@@ -1520,14 +1855,12 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 		/* If we have a mode we need a framebuffer. */
+ 		/* If we pass -1, set the mode with the currently bound fb */
+ 		if (crtc_req->fb_id == -1) {
+-			list_for_each_entry(crtcfb,
+-					    &dev->mode_config.crtc_list, head) {
+-				if (crtcfb == crtc) {
+-					DRM_DEBUG_KMS("Using current fb for "
+-							"setmode\n");
+-					fb = crtc->fb;
+-				}
++			if (!crtc->fb) {
++				DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
++				ret = -EINVAL;
++				goto out;
+ 			}
++			fb = crtc->fb;
+ 		} else {
+ 			obj = drm_mode_object_find(dev, crtc_req->fb_id,
+ 						   DRM_MODE_OBJECT_FB);
+@@ -1541,8 +1874,30 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 		}
+ 
+ 		mode = drm_mode_create(dev);
+-		drm_crtc_convert_umode(mode, &crtc_req->mode);
++		if (!mode) {
++			ret = -ENOMEM;
++			goto out;
++		}
++
++		ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
++		if (ret) {
++			DRM_DEBUG_KMS("Invalid mode\n");
++			goto out;
++		}
++
+ 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
++
++		if (mode->hdisplay > fb->width ||
++		    mode->vdisplay > fb->height ||
++		    crtc_req->x > fb->width - mode->hdisplay ||
++		    crtc_req->y > fb->height - mode->vdisplay) {
++			DRM_DEBUG_KMS("Invalid CRTC viewport %ux%u+%u+%u for fb size %ux%u.\n",
++				      mode->hdisplay, mode->vdisplay,
++				      crtc_req->x, crtc_req->y,
++				      fb->width, fb->height);
++			ret = -ENOSPC;
++			goto out;
++		}
+ 	}
+ 
+ 	if (crtc_req->count_connectors == 0 && mode) {
+@@ -1576,7 +1931,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 		}
+ 
+ 		for (i = 0; i < crtc_req->count_connectors; i++) {
+-			set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
++			set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
+ 			if (get_user(out_id, &set_connectors_ptr[i])) {
+ 				ret = -EFAULT;
+ 				goto out;
+@@ -1610,6 +1965,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 
+ out:
+ 	kfree(connector_set);
++	drm_mode_destroy(dev, mode);
+ 	mutex_unlock(&dev->mode_config.mutex);
+ 	return ret;
+ }
+@@ -1660,6 +2016,42 @@ out:
+ 	return ret;
+ }
+ 
++/* Original addfb only supported RGB formats, so figure out which one */
++uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
++{
++	uint32_t fmt;
++
++	switch (bpp) {
++	case 8:
++		fmt = DRM_FORMAT_RGB332;
++		break;
++	case 16:
++		if (depth == 15)
++			fmt = DRM_FORMAT_XRGB1555;
++		else
++			fmt = DRM_FORMAT_RGB565;
++		break;
++	case 24:
++		fmt = DRM_FORMAT_RGB888;
++		break;
++	case 32:
++		if (depth == 24)
++			fmt = DRM_FORMAT_XRGB8888;
++		else if (depth == 30)
++			fmt = DRM_FORMAT_XRGB2101010;
++		else
++			fmt = DRM_FORMAT_ARGB8888;
++		break;
++	default:
++		DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
++		fmt = DRM_FORMAT_XRGB8888;
++		break;
++	}
++
++	return fmt;
++}
++EXPORT_SYMBOL(drm_mode_legacy_fb_format);
++
+ /**
+  * drm_mode_addfb - add an FB to the graphics configuration
+  * @inode: inode from the ioctl
+@@ -1680,18 +2072,27 @@ out:
+ int drm_mode_addfb(struct drm_device *dev,
+ 		   void *data, struct drm_file *file_priv)
+ {
+-	struct drm_mode_fb_cmd *r = data;
++	struct drm_mode_fb_cmd *or = data;
++	struct drm_mode_fb_cmd2 r = {};
+ 	struct drm_mode_config *config = &dev->mode_config;
+ 	struct drm_framebuffer *fb;
+ 	int ret = 0;
+ 
++	/* Use new struct with format internally */
++	r.fb_id = or->fb_id;
++	r.width = or->width;
++	r.height = or->height;
++	r.pitches[0] = or->pitch;
++	r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
++	r.handles[0] = or->handle;
++
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return -EINVAL;
+ 
+-	if ((config->min_width > r->width) || (r->width > config->max_width))
++	if ((config->min_width > r.width) || (r.width > config->max_width))
+ 		return -EINVAL;
+ 
+-	if ((config->min_height > r->height) || (r->height > config->max_height))
++	if ((config->min_height > r.height) || (r.height > config->max_height))
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&dev->mode_config.mutex);
+@@ -1699,6 +2100,138 @@ int drm_mode_addfb(struct drm_device *dev,
+ 	/* TODO check buffer is sufficiently large */
+ 	/* TODO setup destructor callback */
+ 
++	fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
++	if (IS_ERR(fb)) {
++		DRM_ERROR("could not create framebuffer\n");
++		ret = PTR_ERR(fb);
++		goto out;
++	}
++
++	or->fb_id = fb->base.id;
++	list_add(&fb->filp_head, &file_priv->fbs);
++	DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
++
++out:
++	mutex_unlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++static int format_check(struct drm_mode_fb_cmd2 *r)
++{
++	uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
++
++	switch (format) {
++	case DRM_FORMAT_C8:
++	case DRM_FORMAT_RGB332:
++	case DRM_FORMAT_BGR233:
++	case DRM_FORMAT_XRGB4444:
++	case DRM_FORMAT_XBGR4444:
++	case DRM_FORMAT_RGBX4444:
++	case DRM_FORMAT_BGRX4444:
++	case DRM_FORMAT_ARGB4444:
++	case DRM_FORMAT_ABGR4444:
++	case DRM_FORMAT_RGBA4444:
++	case DRM_FORMAT_BGRA4444:
++	case DRM_FORMAT_XRGB1555:
++	case DRM_FORMAT_XBGR1555:
++	case DRM_FORMAT_RGBX5551:
++	case DRM_FORMAT_BGRX5551:
++	case DRM_FORMAT_ARGB1555:
++	case DRM_FORMAT_ABGR1555:
++	case DRM_FORMAT_RGBA5551:
++	case DRM_FORMAT_BGRA5551:
++	case DRM_FORMAT_RGB565:
++	case DRM_FORMAT_BGR565:
++	case DRM_FORMAT_RGB888:
++	case DRM_FORMAT_BGR888:
++	case DRM_FORMAT_XRGB8888:
++	case DRM_FORMAT_XBGR8888:
++	case DRM_FORMAT_RGBX8888:
++	case DRM_FORMAT_BGRX8888:
++	case DRM_FORMAT_ARGB8888:
++	case DRM_FORMAT_ABGR8888:
++	case DRM_FORMAT_RGBA8888:
++	case DRM_FORMAT_BGRA8888:
++	case DRM_FORMAT_XRGB2101010:
++	case DRM_FORMAT_XBGR2101010:
++	case DRM_FORMAT_RGBX1010102:
++	case DRM_FORMAT_BGRX1010102:
++	case DRM_FORMAT_ARGB2101010:
++	case DRM_FORMAT_ABGR2101010:
++	case DRM_FORMAT_RGBA1010102:
++	case DRM_FORMAT_BGRA1010102:
++	case DRM_FORMAT_YUYV:
++	case DRM_FORMAT_YVYU:
++	case DRM_FORMAT_UYVY:
++	case DRM_FORMAT_VYUY:
++	case DRM_FORMAT_AYUV:
++	case DRM_FORMAT_NV12:
++	case DRM_FORMAT_NV21:
++	case DRM_FORMAT_NV16:
++	case DRM_FORMAT_NV61:
++	case DRM_FORMAT_YUV410:
++	case DRM_FORMAT_YVU410:
++	case DRM_FORMAT_YUV411:
++	case DRM_FORMAT_YVU411:
++	case DRM_FORMAT_YUV420:
++	case DRM_FORMAT_YVU420:
++	case DRM_FORMAT_YUV422:
++	case DRM_FORMAT_YVU422:
++	case DRM_FORMAT_YUV444:
++	case DRM_FORMAT_YVU444:
++		return 0;
++	default:
++		return -EINVAL;
++	}
++}
++
++/**
++ * drm_mode_addfb2 - add an FB to the graphics configuration
++ * @inode: inode from the ioctl
++ * @filp: file * from the ioctl
++ * @cmd: cmd from ioctl
++ * @arg: arg from ioctl
++ *
++ * LOCKING:
++ * Takes mode config lock.
++ *
++ * Add a new FB to the specified CRTC, given a user request with format.
++ *
++ * Called by the user via ioctl.
++ *
++ * RETURNS:
++ * Zero on success, errno on failure.
++ */
++int drm_mode_addfb2(struct drm_device *dev,
++		    void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_fb_cmd2 *r = data;
++	struct drm_mode_config *config = &dev->mode_config;
++	struct drm_framebuffer *fb;
++	int ret = 0;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	if ((config->min_width > r->width) || (r->width > config->max_width)) {
++		DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
++			  r->width, config->min_width, config->max_width);
++		return -EINVAL;
++	}
++	if ((config->min_height > r->height) || (r->height > config->max_height)) {
++		DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
++			  r->height, config->min_height, config->max_height);
++		return -EINVAL;
++	}
++
++	ret = format_check(r);
++	if (ret) {
++		DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
++		return ret;
++	}
++
++	mutex_lock(&dev->mode_config.mutex);
++
+ 	fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
+ 	if (IS_ERR(fb)) {
+ 		DRM_ERROR("could not create framebuffer\n");
+@@ -1782,7 +2315,7 @@ out:
+  * @arg: arg from ioctl
+  *
+  * LOCKING:
+- * Caller? (FIXME)
++ * Takes mode config lock.
+  *
+  * Lookup the FB given its ID and return info about it.
+  *
+@@ -1814,7 +2347,7 @@ int drm_mode_getfb(struct drm_device *dev,
+ 	r->width = fb->width;
+ 	r->depth = fb->depth;
+ 	r->bpp = fb->bits_per_pixel;
+-	r->pitch = fb->pitch;
++	r->pitch = fb->pitches[0];
+ 	fb->funcs->create_handle(fb, file_priv, &r->handle);
+ 
+ out:
+@@ -1846,7 +2379,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+ 	fb = obj_to_fb(obj);
+ 
+ 	num_clips = r->num_clips;
+-	clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
++	clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
+ 
+ 	if (!num_clips != !clips_ptr) {
+ 		ret = -EINVAL;
+@@ -1931,38 +2464,48 @@ void drm_fb_release(struct drm_file *priv)
+  *
+  * Add @mode to @connector's user mode list.
+  */
+-static int drm_mode_attachmode(struct drm_device *dev,
+-			       struct drm_connector *connector,
+-			       struct drm_display_mode *mode)
++static void drm_mode_attachmode(struct drm_device *dev,
++				struct drm_connector *connector,
++				struct drm_display_mode *mode)
+ {
+-	int ret = 0;
+-
+ 	list_add_tail(&mode->head, &connector->user_modes);
+-	return ret;
+ }
+ 
+ int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
+-			     struct drm_display_mode *mode)
++			     const struct drm_display_mode *mode)
+ {
+ 	struct drm_connector *connector;
+ 	int ret = 0;
+-	struct drm_display_mode *dup_mode;
+-	int need_dup = 0;
++	struct drm_display_mode *dup_mode, *next;
++	LIST_HEAD(list);
++
+ 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ 		if (!connector->encoder)
+-			break;
++			continue;
+ 		if (connector->encoder->crtc == crtc) {
+-			if (need_dup)
+-				dup_mode = drm_mode_duplicate(dev, mode);
+-			else
+-				dup_mode = mode;
+-			ret = drm_mode_attachmode(dev, connector, dup_mode);
+-			if (ret)
+-				return ret;
+-			need_dup = 1;
++			dup_mode = drm_mode_duplicate(dev, mode);
++			if (!dup_mode) {
++				ret = -ENOMEM;
++				goto out;
++			}
++			list_add_tail(&dup_mode->head, &list);
+ 		}
+ 	}
+-	return 0;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		if (!connector->encoder)
++			continue;
++		if (connector->encoder->crtc == crtc)
++			list_move_tail(list.next, &connector->user_modes);
++	}
++
++	WARN_ON(!list_empty(&list));
++
++ out:
++	list_for_each_entry_safe(dup_mode, next, &list, head)
++		drm_mode_destroy(dev, dup_mode);
++
++	return ret;
+ }
+ EXPORT_SYMBOL(drm_mode_attachmode_crtc);
+ 
+@@ -2041,9 +2584,14 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
+ 		goto out;
+ 	}
+ 
+-	drm_crtc_convert_umode(mode, umode);
++	ret = drm_crtc_convert_umode(mode, umode);
++	if (ret) {
++		DRM_DEBUG_KMS("Invalid mode\n");
++		drm_mode_destroy(dev, mode);
++		goto out;
++	}
+ 
+-	ret = drm_mode_attachmode(dev, connector, mode);
++	drm_mode_attachmode(dev, connector, mode);
+ out:
+ 	mutex_unlock(&dev->mode_config.mutex);
+ 	return ret;
+@@ -2084,7 +2632,12 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
+ 	}
+ 	connector = obj_to_connector(obj);
+ 
+-	drm_crtc_convert_umode(&mode, umode);
++	ret = drm_crtc_convert_umode(&mode, umode);
++	if (ret) {
++		DRM_DEBUG_KMS("Invalid mode\n");
++		goto out;
++	}
++
+ 	ret = drm_mode_detachmode(dev, connector, &mode);
+ out:
+ 	mutex_unlock(&dev->mode_config.mutex);
+@@ -2095,6 +2648,7 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ 					 const char *name, int num_values)
+ {
+ 	struct drm_property *property = NULL;
++	int ret;
+ 
+ 	property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
+ 	if (!property)
+@@ -2106,7 +2660,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ 			goto fail;
+ 	}
+ 
+-	drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
++	ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
++	if (ret)
++		goto fail;
++
+ 	property->flags = flags;
+ 	property->num_values = num_values;
+ 	INIT_LIST_HEAD(&property->enum_blob_list);
+@@ -2119,11 +2676,59 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ 	list_add_tail(&property->head, &dev->mode_config.property_list);
+ 	return property;
+ fail:
++	kfree(property->values);
+ 	kfree(property);
+ 	return NULL;
+ }
+ EXPORT_SYMBOL(drm_property_create);
+ 
++struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
++					 const char *name,
++					 const struct drm_prop_enum_list *props,
++					 int num_values)
++{
++	struct drm_property *property;
++	int i, ret;
++
++	flags |= DRM_MODE_PROP_ENUM;
++
++	property = drm_property_create(dev, flags, name, num_values);
++	if (!property)
++		return NULL;
++
++	for (i = 0; i < num_values; i++) {
++		ret = drm_property_add_enum(property, i,
++				      props[i].type,
++				      props[i].name);
++		if (ret) {
++			drm_property_destroy(dev, property);
++			return NULL;
++		}
++	}
++
++	return property;
++}
++EXPORT_SYMBOL(drm_property_create_enum);
++
++struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
++					 const char *name,
++					 uint64_t min, uint64_t max)
++{
++	struct drm_property *property;
++
++	flags |= DRM_MODE_PROP_RANGE;
++
++	property = drm_property_create(dev, flags, name, 2);
++	if (!property)
++		return NULL;
++
++	property->values[0] = min;
++	property->values[1] = max;
++
++	return property;
++}
++EXPORT_SYMBOL(drm_property_create_range);
++
+ int drm_property_add_enum(struct drm_property *property, int index,
+ 			  uint64_t value, const char *name)
+ {
+@@ -2242,7 +2847,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ 	struct drm_property_enum *prop_enum;
+ 	struct drm_mode_property_enum __user *enum_ptr;
+ 	struct drm_property_blob *prop_blob;
+-	uint32_t *blob_id_ptr;
++	uint32_t __user *blob_id_ptr;
+ 	uint64_t __user *values_ptr;
+ 	uint32_t __user *blob_length_ptr;
+ 
+@@ -2272,7 +2877,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ 	out_resp->flags = property->flags;
+ 
+ 	if ((out_resp->count_values >= value_count) && value_count) {
+-		values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
++		values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
+ 		for (i = 0; i < value_count; i++) {
+ 			if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
+ 				ret = -EFAULT;
+@@ -2285,7 +2890,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ 	if (property->flags & DRM_MODE_PROP_ENUM) {
+ 		if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
+ 			copied = 0;
+-			enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
++			enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
+ 			list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
+ 
+ 				if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
+@@ -2307,8 +2912,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+ 	if (property->flags & DRM_MODE_PROP_BLOB) {
+ 		if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
+ 			copied = 0;
+-			blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
+-			blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
++			blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
++			blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
+ 
+ 			list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
+ 				if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
+@@ -2335,6 +2940,7 @@ static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev
+ 							  void *data)
+ {
+ 	struct drm_property_blob *blob;
++	int ret;
+ 
+ 	if (!length || !data)
+ 		return NULL;
+@@ -2343,13 +2949,16 @@ static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev
+ 	if (!blob)
+ 		return NULL;
+ 
+-	blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
++	ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
++	if (ret) {
++		kfree(blob);
++		return NULL;
++	}
++
+ 	blob->length = length;
+ 
+ 	memcpy(blob->data, data, length);
+ 
+-	drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+-
+ 	list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
+ 	return blob;
+ }
+@@ -2369,7 +2978,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
+ 	struct drm_mode_get_blob *out_resp = data;
+ 	struct drm_property_blob *blob;
+ 	int ret = 0;
+-	void *blob_ptr;
++	void __user *blob_ptr;
+ 
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return -EINVAL;
+@@ -2383,7 +2992,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
+ 	blob = obj_to_blob(obj);
+ 
+ 	if (out_resp->length == blob->length) {
+-		blob_ptr = (void *)(unsigned long)out_resp->data;
++		blob_ptr = (void __user *)(unsigned long)out_resp->data;
+ 		if (copy_to_user(blob_ptr, blob->data, blob->length)){
+ 			ret = -EFAULT;
+ 			goto done;
+@@ -2528,7 +3137,7 @@ void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ }
+ EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
+ 
+-bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
++int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ 				  int gamma_size)
+ {
+ 	crtc->gamma_size = gamma_size;
+@@ -2536,10 +3145,10 @@ bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ 	crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
+ 	if (!crtc->gamma_store) {
+ 		crtc->gamma_size = 0;
+-		return false;
++		return -ENOMEM;
+ 	}
+ 
+-	return true;
++	return 0;
+ }
+ EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
+ 
+@@ -2685,6 +3294,18 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ 		goto out;
+ 	fb = obj_to_fb(obj);
+ 
++	if (crtc->mode.hdisplay > fb->width ||
++	    crtc->mode.vdisplay > fb->height ||
++	    crtc->x > fb->width - crtc->mode.hdisplay ||
++	    crtc->y > fb->height - crtc->mode.vdisplay) {
++		DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d.\n",
++			      fb->width, fb->height,
++			      crtc->mode.hdisplay, crtc->mode.vdisplay,
++			      crtc->x, crtc->y);
++		ret = -ENOSPC;
++		goto out;
++	}
++
+ 	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+ 		ret = -ENOMEM;
+ 		spin_lock_irqsave(&dev->event_lock, flags);
+@@ -2714,10 +3335,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ 
+ 	ret = crtc->funcs->page_flip(crtc, fb, e);
+ 	if (ret) {
+-		spin_lock_irqsave(&dev->event_lock, flags);
+-		file_priv->event_space += sizeof e->event;
+-		spin_unlock_irqrestore(&dev->event_lock, flags);
+-		kfree(e);
++		if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
++			spin_lock_irqsave(&dev->event_lock, flags);
++			file_priv->event_space += sizeof e->event;
++			spin_unlock_irqrestore(&dev->event_lock, flags);
++			kfree(e);
++		}
+ 	}
+ 
+ out:
+@@ -2777,3 +3400,71 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ 
+ 	return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+ }
++
++/*
++ * Just need to support RGB formats here for compat with code that doesn't
++ * use pixel formats directly yet.
++ */
++void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
++			  int *bpp)
++{
++	switch (format) {
++	case DRM_FORMAT_RGB332:
++	case DRM_FORMAT_BGR233:
++		*depth = 8;
++		*bpp = 8;
++		break;
++	case DRM_FORMAT_XRGB1555:
++	case DRM_FORMAT_XBGR1555:
++	case DRM_FORMAT_RGBX5551:
++	case DRM_FORMAT_BGRX5551:
++	case DRM_FORMAT_ARGB1555:
++	case DRM_FORMAT_ABGR1555:
++	case DRM_FORMAT_RGBA5551:
++	case DRM_FORMAT_BGRA5551:
++		*depth = 15;
++		*bpp = 16;
++		break;
++	case DRM_FORMAT_RGB565:
++	case DRM_FORMAT_BGR565:
++		*depth = 16;
++		*bpp = 16;
++		break;
++	case DRM_FORMAT_RGB888:
++	case DRM_FORMAT_BGR888:
++		*depth = 24;
++		*bpp = 24;
++		break;
++	case DRM_FORMAT_XRGB8888:
++	case DRM_FORMAT_XBGR8888:
++	case DRM_FORMAT_RGBX8888:
++	case DRM_FORMAT_BGRX8888:
++		*depth = 24;
++		*bpp = 32;
++		break;
++	case DRM_FORMAT_XRGB2101010:
++	case DRM_FORMAT_XBGR2101010:
++	case DRM_FORMAT_RGBX1010102:
++	case DRM_FORMAT_BGRX1010102:
++	case DRM_FORMAT_ARGB2101010:
++	case DRM_FORMAT_ABGR2101010:
++	case DRM_FORMAT_RGBA1010102:
++	case DRM_FORMAT_BGRA1010102:
++		*depth = 30;
++		*bpp = 32;
++		break;
++	case DRM_FORMAT_ARGB8888:
++	case DRM_FORMAT_ABGR8888:
++	case DRM_FORMAT_RGBA8888:
++	case DRM_FORMAT_BGRA8888:
++		*depth = 32;
++		*bpp = 32;
++		break;
++	default:
++		DRM_DEBUG_KMS("unsupported pixel format\n");
++		*depth = 0;
++		*bpp = 0;
++		break;
++	}
++}
++EXPORT_SYMBOL(drm_fb_get_bpp_depth);
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index 11788f7..8111889 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -34,8 +34,10 @@
+ 
+ #include "drmP.h"
+ #include "drm_crtc.h"
++#include "drm_fourcc.h"
+ #include "drm_crtc_helper.h"
+ #include "drm_fb_helper.h"
++#include "drm_edid.h"
+ 
+ static bool drm_kms_helper_poll = true;
+ module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+@@ -43,12 +45,12 @@ module_param_named(poll, drm_kms_helper_poll, bool, 0600);
+ static void drm_mode_validate_flag(struct drm_connector *connector,
+ 				   int flags)
+ {
+-	struct drm_display_mode *mode, *t;
++	struct drm_display_mode *mode;
+ 
+ 	if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+ 		return;
+ 
+-	list_for_each_entry_safe(mode, t, &connector->modes, head) {
++	list_for_each_entry(mode, &connector->modes, head) {
+ 		if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ 				!(flags & DRM_MODE_FLAG_INTERLACE))
+ 			mode->status = MODE_NO_INTERLACE;
+@@ -86,7 +88,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ 					    uint32_t maxX, uint32_t maxY)
+ {
+ 	struct drm_device *dev = connector->dev;
+-	struct drm_display_mode *mode, *t;
++	struct drm_display_mode *mode;
+ 	struct drm_connector_helper_funcs *connector_funcs =
+ 		connector->helper_private;
+ 	int count = 0;
+@@ -95,7 +97,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+ 			drm_get_connector_name(connector));
+ 	/* set all modes to the unverified state */
+-	list_for_each_entry_safe(mode, t, &connector->modes, head)
++	list_for_each_entry(mode, &connector->modes, head)
+ 		mode->status = MODE_UNVERIFIED;
+ 
+ 	if (connector->force) {
+@@ -117,7 +119,12 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ 		goto prune;
+ 	}
+ 
+-	count = (*connector_funcs->get_modes)(connector);
++#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
++	count = drm_load_edid_firmware(connector);
++	if (count == 0)
++#endif
++		count = (*connector_funcs->get_modes)(connector);
++
+ 	if (count == 0 && connector->status == connector_status_connected)
+ 		count = drm_add_modes_noedid(connector, 1024, 768);
+ 	if (count == 0)
+@@ -135,7 +142,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ 		mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+ 	drm_mode_validate_flag(connector, mode_flags);
+ 
+-	list_for_each_entry_safe(mode, t, &connector->modes, head) {
++	list_for_each_entry(mode, &connector->modes, head) {
+ 		if (mode->status == MODE_OK)
+ 			mode->status = connector_funcs->mode_valid(connector,
+ 								   mode);
+@@ -151,7 +158,7 @@ prune:
+ 
+ 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+ 			drm_get_connector_name(connector));
+-	list_for_each_entry_safe(mode, t, &connector->modes, head) {
++	list_for_each_entry(mode, &connector->modes, head) {
+ 		mode->vrefresh = drm_mode_vrefresh(mode);
+ 
+ 		drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+@@ -321,8 +328,8 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
+  * drm_crtc_set_mode - set a mode
+  * @crtc: CRTC to program
+  * @mode: mode to use
+- * @x: horizontal offset into the surface
+- * @y: vertical offset into the surface
++ * @x: width of mode
++ * @y: height of mode
+  *
+  * LOCKING:
+  * Caller must hold mode config lock.
+@@ -351,6 +358,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ 		return true;
+ 
+ 	adjusted_mode = drm_mode_duplicate(dev, mode);
++	if (!adjusted_mode)
++		return false;
+ 
+ 	saved_hwmode = crtc->hwmode;
+ 	saved_mode = crtc->mode;
+@@ -710,7 +719,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
+ 			for (i = 0; i < set->num_connectors; i++) {
+ 				DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ 					      drm_get_connector_name(set->connectors[i]));
+-				set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
++				set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
+ 			}
+ 		}
+ 		drm_helper_disable_unused_functions(dev);
+@@ -847,13 +856,19 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
+ EXPORT_SYMBOL(drm_helper_connector_dpms);
+ 
+ int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+-				   struct drm_mode_fb_cmd *mode_cmd)
++				   struct drm_mode_fb_cmd2 *mode_cmd)
+ {
++	int i;
++
+ 	fb->width = mode_cmd->width;
+ 	fb->height = mode_cmd->height;
+-	fb->pitch = mode_cmd->pitch;
+-	fb->bits_per_pixel = mode_cmd->bpp;
+-	fb->depth = mode_cmd->depth;
++	for (i = 0; i < 4; i++) {
++		fb->pitches[i] = mode_cmd->pitches[i];
++		fb->offsets[i] = mode_cmd->offsets[i];
++	}
++	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
++				    &fb->bits_per_pixel);
++	fb->pixel_format = mode_cmd->pixel_format;
+ 
+ 	return 0;
+ }
+@@ -1008,3 +1023,36 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
+ 		queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+ }
+ EXPORT_SYMBOL(drm_helper_hpd_irq_event);
++
++
++/**
++ * drm_format_num_planes - get the number of planes for format
++ * @format: pixel format (DRM_FORMAT_*)
++ *
++ * RETURNS:
++ * The number of planes used by the specified pixel format.
++ */
++int drm_format_num_planes(uint32_t format)
++{
++	switch (format) {
++	case DRM_FORMAT_YUV410:
++	case DRM_FORMAT_YVU410:
++	case DRM_FORMAT_YUV411:
++	case DRM_FORMAT_YVU411:
++	case DRM_FORMAT_YUV420:
++	case DRM_FORMAT_YVU420:
++	case DRM_FORMAT_YUV422:
++	case DRM_FORMAT_YVU422:
++	case DRM_FORMAT_YUV444:
++	case DRM_FORMAT_YVU444:
++		return 3;
++	case DRM_FORMAT_NV12:
++	case DRM_FORMAT_NV21:
++	case DRM_FORMAT_NV16:
++	case DRM_FORMAT_NV61:
++		return 2;
++	default:
++		return 1;
++	}
++}
++EXPORT_SYMBOL(drm_format_num_planes);
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 40c187c..6116e3b 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -61,14 +61,14 @@ static int drm_version(struct drm_device *dev, void *data,
+ 
+ /** Ioctl table */
+ static struct drm_ioctl_desc drm_ioctls[] = {
+-	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
++	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+-	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
+-	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+-	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+-	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
++	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
+ 
+ 	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+@@ -135,21 +135,29 @@ static struct drm_ioctl_desc drm_ioctls[] = {
+ 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ 
+-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++
++	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
++
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+@@ -386,6 +394,10 @@ long drm_ioctl(struct file *filp,
+ 	unsigned int usize, asize;
+ 
+ 	dev = file_priv->minor->dev;
++
++	if (drm_device_is_unplugged(dev))
++		return -ENODEV;
++
+ 	atomic_inc(&dev->ioctl_count);
+ 	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+ 	++file_priv->ioctl_count;
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index bb95d59..9d9835a 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -87,9 +87,6 @@ static struct edid_quirk {
+ 	int product_id;
+ 	u32 quirks;
+ } edid_quirk_list[] = {
+-	/* ASUS VW222S */
+-	{ "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+-
+ 	/* Acer AL1706 */
+ 	{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ 	/* Acer F51 */
+@@ -157,8 +154,7 @@ EXPORT_SYMBOL(drm_edid_header_is_valid);
+  * Sanity check the EDID block (base or extension).  Return 0 if the block
+  * doesn't check out, or 1 if it's valid.
+  */
+-static bool
+-drm_edid_block_valid(u8 *raw_edid)
++bool drm_edid_block_valid(u8 *raw_edid)
+ {
+ 	int i;
+ 	u8 csum = 0;
+@@ -211,6 +207,7 @@ bad:
+ 	}
+ 	return 0;
+ }
++EXPORT_SYMBOL(drm_edid_block_valid);
+ 
+ /**
+  * drm_edid_is_valid - sanity check EDID data
+@@ -234,7 +231,6 @@ bool drm_edid_is_valid(struct edid *edid)
+ }
+ EXPORT_SYMBOL(drm_edid_is_valid);
+ 
+-#define DDC_ADDR 0x50
+ #define DDC_SEGMENT_ADDR 0x30
+ /**
+  * Get EDID information via I2C.
+@@ -521,25 +517,10 @@ static void
+ cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+ {
+ 	int i, n = 0;
+-	u8 rev = ext[0x01], d = ext[0x02];
++	u8 d = ext[0x02];
+ 	u8 *det_base = ext + d;
+ 
+-	switch (rev) {
+-	case 0:
+-		/* can't happen */
+-		return;
+-	case 1:
+-		/* have to infer how many blocks we have, check pixel clock */
+-		for (i = 0; i < 6; i++)
+-			if (det_base[18*i] || det_base[18*i+1])
+-				n++;
+-		break;
+-	default:
+-		/* explicit count */
+-		n = min(ext[0x03] & 0x0f, 6);
+-		break;
+-	}
+-
++	n = (127 - d) / 18;
+ 	for (i = 0; i < n; i++)
+ 		cb((struct detailed_timing *)(det_base + 18 * i), closure);
+ }
+@@ -773,7 +754,7 @@ drm_mode_std(struct drm_connector *connector, struct edid *edid,
+ 		 */
+ 		mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
+ 		if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
+-			kfree(mode);
++			drm_mode_destroy(dev, mode);
+ 			mode = drm_gtf_mode_complex(dev, hsize, vsize,
+ 						    vrefresh_rate, 0, 0,
+ 						    drm_gtf2_m(edid),
+@@ -1341,6 +1322,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+ 
+ #define HDMI_IDENTIFIER 0x000C03
+ #define AUDIO_BLOCK	0x01
++#define VIDEO_BLOCK     0x02
+ #define VENDOR_BLOCK    0x03
+ #define SPEAKER_BLOCK	0x04
+ #define EDID_BASIC_AUDIO	(1 << 6)
+@@ -1371,6 +1353,47 @@ u8 *drm_find_cea_extension(struct edid *edid)
+ }
+ EXPORT_SYMBOL(drm_find_cea_extension);
+ 
++static int
++do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
++{
++	struct drm_device *dev = connector->dev;
++	u8 * mode, cea_mode;
++	int modes = 0;
++
++	for (mode = db; mode < db + len; mode++) {
++		cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
++		if (cea_mode < drm_num_cea_modes) {
++			struct drm_display_mode *newmode;
++			newmode = drm_mode_duplicate(dev,
++						     &edid_cea_modes[cea_mode]);
++			if (newmode) {
++				drm_mode_probed_add(connector, newmode);
++				modes++;
++			}
++		}
++	}
++
++	return modes;
++}
++
++static int
++add_cea_modes(struct drm_connector *connector, struct edid *edid)
++{
++	u8 * cea = drm_find_cea_extension(edid);
++	u8 * db, dbl;
++	int modes = 0;
++
++	if (cea && cea[1] >= 3) {
++		for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
++			dbl = db[0] & 0x1f;
++			if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
++				modes += do_cea_modes (connector, db+1, dbl);
++		}
++	}
++
++	return modes;
++}
++
+ static void
+ parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
+ {
+@@ -1454,26 +1477,29 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+ 	eld[18] = edid->prod_code[0];
+ 	eld[19] = edid->prod_code[1];
+ 
+-	for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+-		dbl = db[0] & 0x1f;
+-
+-		switch ((db[0] & 0xe0) >> 5) {
+-		case AUDIO_BLOCK:	/* Audio Data Block, contains SADs */
+-			sad_count = dbl / 3;
+-			memcpy(eld + 20 + mnl, &db[1], dbl);
+-			break;
+-		case SPEAKER_BLOCK:	/* Speaker Allocation Data Block */
+-			eld[7] = db[1];
+-			break;
+-		case VENDOR_BLOCK:
+-			/* HDMI Vendor-Specific Data Block */
+-			if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+-				parse_hdmi_vsdb(connector, db);
+-			break;
+-		default:
+-			break;
++	if (cea[1] >= 3)
++		for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
++			dbl = db[0] & 0x1f;
++			
++			switch ((db[0] & 0xe0) >> 5) {
++			case AUDIO_BLOCK:
++				/* Audio Data Block, contains SADs */
++				sad_count = dbl / 3;
++				memcpy(eld + 20 + mnl, &db[1], dbl);
++				break;
++			case SPEAKER_BLOCK:
++                                /* Speaker Allocation Data Block */
++				eld[7] = db[1];
++				break;
++			case VENDOR_BLOCK:
++				/* HDMI Vendor-Specific Data Block */
++				if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
++					parse_hdmi_vsdb(connector, db);
++				break;
++			default:
++				break;
++			}
+ 		}
+-	}
+ 	eld[5] |= sad_count << 4;
+ 	eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+ 
+@@ -1744,6 +1770,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
+ 	num_modes += add_standard_modes(connector, edid);
+ 	num_modes += add_established_modes(connector, edid);
+ 	num_modes += add_inferred_modes(connector, edid);
++	num_modes += add_cea_modes(connector, edid);
+ 
+ 	if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ 		edid_fixup_preferred(connector, quirks);
+diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
+new file mode 100644
+index 0000000..da9acba
+--- /dev/null
++++ b/drivers/gpu/drm/drm_edid_load.c
+@@ -0,0 +1,250 @@
++/*
++   drm_edid_load.c: use a built-in EDID data set or load it via the firmware
++		    interface
++
++   Copyright (C) 2012 Carsten Emde <C.Emde at osadl.org>
++
++   This program is free software; you can redistribute it and/or
++   modify it under the terms of the GNU General Public License
++   as published by the Free Software Foundation; either version 2
++   of the License, or (at your option) any later version.
++
++   This program is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with this program; if not, write to the Free Software
++   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
++*/
++
++#include <linux/module.h>
++#include <linux/firmware.h>
++#include "drmP.h"
++#include "drm_crtc.h"
++#include "drm_crtc_helper.h"
++#include "drm_edid.h"
++
++static char edid_firmware[PATH_MAX];
++module_param_string(edid_firmware, edid_firmware, sizeof(edid_firmware), 0644);
++MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
++	"from built-in data or /lib/firmware instead. ");
++
++#define GENERIC_EDIDS 4
++static char *generic_edid_name[GENERIC_EDIDS] = {
++	"edid/1024x768.bin",
++	"edid/1280x1024.bin",
++	"edid/1680x1050.bin",
++	"edid/1920x1080.bin",
++};
++
++static u8 generic_edid[GENERIC_EDIDS][128] = {
++	{
++	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
++	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++	0x05, 0x16, 0x01, 0x03, 0x6d, 0x23, 0x1a, 0x78,
++	0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
++	0x20, 0x50, 0x54, 0x00, 0x08, 0x00, 0x61, 0x40,
++	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x64, 0x19,
++	0x00, 0x40, 0x41, 0x00, 0x26, 0x30, 0x08, 0x90,
++	0x36, 0x00, 0x63, 0x0a, 0x11, 0x00, 0x00, 0x18,
++	0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
++	0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
++	0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
++	0x3d, 0x2f, 0x31, 0x07, 0x00, 0x0a, 0x20, 0x20,
++	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
++	0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x58,
++	0x47, 0x41, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x55,
++	},
++	{
++	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
++	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++	0x05, 0x16, 0x01, 0x03, 0x6d, 0x2c, 0x23, 0x78,
++	0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
++	0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0x81, 0x80,
++	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x30, 0x2a,
++	0x00, 0x98, 0x51, 0x00, 0x2a, 0x40, 0x30, 0x70,
++	0x13, 0x00, 0xbc, 0x63, 0x11, 0x00, 0x00, 0x1e,
++	0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
++	0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
++	0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
++	0x3d, 0x3e, 0x40, 0x0b, 0x00, 0x0a, 0x20, 0x20,
++	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
++	0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x53,
++	0x58, 0x47, 0x41, 0x0a, 0x20, 0x20, 0x00, 0xa0,
++	},
++	{
++	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
++	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++	0x05, 0x16, 0x01, 0x03, 0x6d, 0x2b, 0x1b, 0x78,
++	0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
++	0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xb3, 0x00,
++	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x21, 0x39,
++	0x90, 0x30, 0x62, 0x1a, 0x27, 0x40, 0x68, 0xb0,
++	0x36, 0x00, 0xb5, 0x11, 0x11, 0x00, 0x00, 0x1e,
++	0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
++	0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
++	0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
++	0x3d, 0x40, 0x42, 0x0f, 0x00, 0x0a, 0x20, 0x20,
++	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
++	0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x57,
++	0x53, 0x58, 0x47, 0x41, 0x0a, 0x20, 0x00, 0x26,
++	},
++	{
++	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
++	0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++	0x05, 0x16, 0x01, 0x03, 0x6d, 0x32, 0x1c, 0x78,
++	0xea, 0x5e, 0xc0, 0xa4, 0x59, 0x4a, 0x98, 0x25,
++	0x20, 0x50, 0x54, 0x00, 0x00, 0x00, 0xd1, 0xc0,
++	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a,
++	0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
++	0x45, 0x00, 0xf4, 0x19, 0x11, 0x00, 0x00, 0x1e,
++	0x00, 0x00, 0x00, 0xff, 0x00, 0x4c, 0x69, 0x6e,
++	0x75, 0x78, 0x20, 0x23, 0x30, 0x0a, 0x20, 0x20,
++	0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x3b,
++	0x3d, 0x42, 0x44, 0x0f, 0x00, 0x0a, 0x20, 0x20,
++	0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc,
++	0x00, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x20, 0x46,
++	0x48, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x05,
++	},
++};
++
++static int edid_load(struct drm_connector *connector, char *name,
++		     char *connector_name)
++{
++	const struct firmware *fw;
++	struct platform_device *pdev;
++	u8 *fwdata = NULL, *edid;
++	int fwsize, expected;
++	int builtin = 0, err = 0;
++	int i, valid_extensions = 0;
++
++	pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
++	if (IS_ERR(pdev)) {
++		DRM_ERROR("Failed to register EDID firmware platform device "
++		    "for connector \"%s\"\n", connector_name);
++		err = -EINVAL;
++		goto out;
++	}
++
++	err = request_firmware(&fw, name, &pdev->dev);
++	platform_device_unregister(pdev);
++
++	if (err) {
++		i = 0;
++		while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
++			i++;
++		if (i < GENERIC_EDIDS) {
++			err = 0;
++			builtin = 1;
++			fwdata = generic_edid[i];
++			fwsize = sizeof(generic_edid[i]);
++		}
++	}
++
++	if (err) {
++		DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
++		    name, err);
++		goto out;
++	}
++
++	if (fwdata == NULL) {
++		fwdata = (u8 *) fw->data;
++		fwsize = fw->size;
++	}
++
++	expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
++	if (expected != fwsize) {
++		DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
++		    "(expected %d, got %d)\n", name, expected, (int) fwsize);
++		err = -EINVAL;
++		goto relfw_out;
++	}
++
++	edid = kmalloc(fwsize, GFP_KERNEL);
++	if (edid == NULL) {
++		err = -ENOMEM;
++		goto relfw_out;
++	}
++	memcpy(edid, fwdata, fwsize);
++
++	if (!drm_edid_block_valid(edid)) {
++		DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
++		    name);
++		kfree(edid);
++		err = -EINVAL;
++		goto relfw_out;
++	}
++
++	for (i = 1; i <= edid[0x7e]; i++) {
++		if (i != valid_extensions + 1)
++			memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
++			    edid + i * EDID_LENGTH, EDID_LENGTH);
++		if (drm_edid_block_valid(edid + i * EDID_LENGTH))
++			valid_extensions++;
++	}
++
++	if (valid_extensions != edid[0x7e]) {
++		edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
++		DRM_INFO("Found %d valid extensions instead of %d in EDID data "
++		    "\"%s\" for connector \"%s\"\n", valid_extensions,
++		    edid[0x7e], name, connector_name);
++		edid[0x7e] = valid_extensions;
++		edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
++		    GFP_KERNEL);
++		if (edid == NULL) {
++			err = -ENOMEM;
++			goto relfw_out;
++		}
++	}
++
++	connector->display_info.raw_edid = edid;
++	DRM_INFO("Got %s EDID base block and %d extension%s from "
++	    "\"%s\" for connector \"%s\"\n", builtin ? "built-in" :
++	    "external", valid_extensions, valid_extensions == 1 ? "" : "s",
++	    name, connector_name);
++
++relfw_out:
++	release_firmware(fw);
++
++out:
++	return err;
++}
++
++int drm_load_edid_firmware(struct drm_connector *connector)
++{
++	char *connector_name = drm_get_connector_name(connector);
++	char *edidname = edid_firmware, *last, *colon;
++	int ret = 0;
++
++	if (*edidname == '\0')
++		return ret;
++
++	colon = strchr(edidname, ':');
++	if (colon != NULL) {
++		if (strncmp(connector_name, edidname, colon - edidname))
++			return ret;
++		edidname = colon + 1;
++		if (*edidname == '\0')
++			return ret;
++	}
++
++	last = edidname + strlen(edidname) - 1;
++	if (*last == '\n')
++		*last = '\0';
++
++	ret = edid_load(connector, edidname, connector_name);
++	if (ret)
++		return 0;
++
++	drm_mode_connector_update_edid_property(connector,
++	    (struct edid *) connector->display_info.raw_edid);
++
++	return drm_add_edid_modes(connector, (struct edid *)
++	    connector->display_info.raw_edid);
++}
+diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
+index 5f20644..a91ffb1 100644
+--- a/drivers/gpu/drm/drm_edid_modes.h
++++ b/drivers/gpu/drm/drm_edid_modes.h
+@@ -378,3 +378,287 @@ static const struct {
+ 	{ 1920, 1440, 75, 0 },
+ };
+ static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
++
++/*
++ * Probably taken from CEA-861 spec.
++ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
++ */
++static const struct drm_display_mode edid_cea_modes[] = {
++	/* 640x480 at 60Hz */
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
++		   752, 800, 0, 480, 490, 492, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 720x480 at 60Hz */
++	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
++		   798, 858, 0, 480, 489, 495, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 720x480 at 60Hz */
++	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
++		   798, 858, 0, 480, 489, 495, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1280x720 at 60Hz */
++	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
++		   1430, 1650, 0, 720, 725, 730, 750, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1080i at 60Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
++		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1440x480i at 60Hz */
++	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
++		   1602, 1716, 0, 480, 488, 494, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1440x480i at 60Hz */
++	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
++		   1602, 1716, 0, 480, 488, 494, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1440x240 at 60Hz */
++	{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
++		   1602, 1716, 0, 240, 244, 247, 262, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x240 at 60Hz */
++	{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
++		   1602, 1716, 0, 240, 244, 247, 262, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 2880x480i at 60Hz */
++	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
++		   3204, 3432, 0, 480, 488, 494, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 2880x480i at 60Hz */
++	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
++		   3204, 3432, 0, 480, 488, 494, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 2880x240 at 60Hz */
++	{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
++		   3204, 3432, 0, 240, 244, 247, 262, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 2880x240 at 60Hz */
++	{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
++		   3204, 3432, 0, 240, 244, 247, 262, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x480 at 60Hz */
++	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
++		   1596, 1716, 0, 480, 489, 495, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x480 at 60Hz */
++	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
++		   1596, 1716, 0, 480, 489, 495, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1920x1080 at 60Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
++		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 720x576 at 50Hz */
++	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
++		   796, 864, 0, 576, 581, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 720x576 at 50Hz */
++	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
++		   796, 864, 0, 576, 581, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1280x720 at 50Hz */
++	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
++		   1760, 1980, 0, 720, 725, 730, 750, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1080i at 50Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
++		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1440x576i at 50Hz */
++	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
++		   1590, 1728, 0, 576, 580, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1440x576i at 50Hz */
++	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
++		   1590, 1728, 0, 576, 580, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1440x288 at 50Hz */
++	{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
++		   1590, 1728, 0, 288, 290, 293, 312, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x288 at 50Hz */
++	{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
++		   1590, 1728, 0, 288, 290, 293, 312, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 2880x576i at 50Hz */
++	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
++		   3180, 3456, 0, 576, 580, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 2880x576i at 50Hz */
++	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
++		   3180, 3456, 0, 576, 580, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 2880x288 at 50Hz */
++	{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
++		   3180, 3456, 0, 288, 290, 293, 312, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 2880x288 at 50Hz */
++	{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
++		   3180, 3456, 0, 288, 290, 293, 312, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x576 at 50Hz */
++	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
++		   1592, 1728, 0, 576, 581, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x576 at 50Hz */
++	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
++		   1592, 1728, 0, 576, 581, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1920x1080 at 50Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
++		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1080 at 24Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
++		   2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1080 at 25Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
++		   2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1080 at 30Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
++		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 2880x480 at 60Hz */
++	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
++		   3192, 3432, 0, 480, 489, 495, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 2880x480 at 60Hz */
++	{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
++		   3192, 3432, 0, 480, 489, 495, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 2880x576 at 50Hz */
++	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
++		   3184, 3456, 0, 576, 581, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 2880x576 at 50Hz */
++	{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
++		   3184, 3456, 0, 576, 581, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1920x1080i at 50Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
++		   2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1920x1080i at 100Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
++		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1280x720 at 100Hz */
++	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
++		   1760, 1980, 0, 720, 725, 730, 750, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 720x576 at 100Hz */
++	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
++		   796, 864, 0, 576, 581, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 720x576 at 100Hz */
++	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
++		   796, 864, 0, 576, 581, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x576i at 100Hz */
++	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
++		   1590, 1728, 0, 576, 580, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x576i at 100Hz */
++	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
++		   1590, 1728, 0, 576, 580, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1920x1080i at 120Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
++		   2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1280x720 at 120Hz */
++	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
++		   1430, 1650, 0, 720, 725, 730, 750, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 720x480 at 120Hz */
++	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
++		   798, 858, 0, 480, 489, 495, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 720x480 at 120Hz */
++	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
++		   798, 858, 0, 480, 489, 495, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x480i at 120Hz */
++	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
++		   1602, 1716, 0, 480, 488, 494, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1440x480i at 120Hz */
++	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
++		   1602, 1716, 0, 480, 488, 494, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 720x576 at 200Hz */
++	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
++		   796, 864, 0, 576, 581, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 720x576 at 200Hz */
++	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
++		   796, 864, 0, 576, 581, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x576i at 200Hz */
++	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
++		   1590, 1728, 0, 576, 580, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1440x576i at 200Hz */
++	{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
++		   1590, 1728, 0, 576, 580, 586, 625, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 720x480 at 240Hz */
++	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
++		   798, 858, 0, 480, 489, 495, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 720x480 at 240Hz */
++	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
++		   798, 858, 0, 480, 489, 495, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1440x480i at 240 */
++	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
++		   1602, 1716, 0, 480, 488, 494, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1440x480i at 240 */
++	{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
++		   1602, 1716, 0, 480, 488, 494, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
++			DRM_MODE_FLAG_INTERLACE) },
++	/* 1280x720 at 24Hz */
++	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
++		   3080, 3300, 0, 720, 725, 730, 750, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x720 at 25Hz */
++	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
++		   3740, 3960, 0, 720, 725, 730, 750, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x720 at 30Hz */
++	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
++		   3080, 3300, 0, 720, 725, 730, 750, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1080 at 120Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
++		   2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1080 at 100Hz */
++	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
++		   2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++};
++static const int drm_num_cea_modes =
++	sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index dd58373..a0d6e89 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -255,6 +255,13 @@ bool drm_fb_helper_force_kernel_mode(void)
+ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
+ 			void *panic_str)
+ {
++	/*
++	 * It's a waste of time and effort to switch back to text console
++	 * if the kernel should reboot before panic messages can be seen.
++	 */
++	if (panic_timeout < 0)
++		return 0;
++
+ 	printk(KERN_ERR "panic occurred, switching back to text console\n");
+ 	return drm_fb_helper_force_kernel_mode();
+ }
+@@ -299,91 +306,31 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
+ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
+ #endif
+ 
+-static void drm_fb_helper_on(struct fb_info *info)
+-{
+-	struct drm_fb_helper *fb_helper = info->par;
+-	struct drm_device *dev = fb_helper->dev;
+-	struct drm_crtc *crtc;
+-	struct drm_crtc_helper_funcs *crtc_funcs;
+-	struct drm_connector *connector;
+-	struct drm_encoder *encoder;
+-	int i, j;
+-
+-	/*
+-	 * For each CRTC in this fb, turn the crtc on then,
+-	 * find all associated encoders and turn them on.
+-	 */
+-	mutex_lock(&dev->mode_config.mutex);
+-	for (i = 0; i < fb_helper->crtc_count; i++) {
+-		crtc = fb_helper->crtc_info[i].mode_set.crtc;
+-		crtc_funcs = crtc->helper_private;
+-
+-		if (!crtc->enabled)
+-			continue;
+-
+-		crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+-
+-		/* Walk the connectors & encoders on this fb turning them on */
+-		for (j = 0; j < fb_helper->connector_count; j++) {
+-			connector = fb_helper->connector_info[j]->connector;
+-			connector->dpms = DRM_MODE_DPMS_ON;
+-			drm_connector_property_set_value(connector,
+-							 dev->mode_config.dpms_property,
+-							 DRM_MODE_DPMS_ON);
+-		}
+-		/* Found a CRTC on this fb, now find encoders */
+-		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+-			if (encoder->crtc == crtc) {
+-				struct drm_encoder_helper_funcs *encoder_funcs;
+-
+-				encoder_funcs = encoder->helper_private;
+-				encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+-			}
+-		}
+-	}
+-	mutex_unlock(&dev->mode_config.mutex);
+-}
+-
+-static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
++static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
+ {
+ 	struct drm_fb_helper *fb_helper = info->par;
+ 	struct drm_device *dev = fb_helper->dev;
+ 	struct drm_crtc *crtc;
+-	struct drm_crtc_helper_funcs *crtc_funcs;
+ 	struct drm_connector *connector;
+-	struct drm_encoder *encoder;
+ 	int i, j;
+ 
+ 	/*
+-	 * For each CRTC in this fb, find all associated encoders
+-	 * and turn them off, then turn off the CRTC.
++	 * For each CRTC in this fb, turn the connectors on/off.
+ 	 */
+ 	mutex_lock(&dev->mode_config.mutex);
+ 	for (i = 0; i < fb_helper->crtc_count; i++) {
+ 		crtc = fb_helper->crtc_info[i].mode_set.crtc;
+-		crtc_funcs = crtc->helper_private;
+ 
+ 		if (!crtc->enabled)
+ 			continue;
+ 
+-		/* Walk the connectors on this fb and mark them off */
++		/* Walk the connectors & encoders on this fb turning them on/off */
+ 		for (j = 0; j < fb_helper->connector_count; j++) {
+ 			connector = fb_helper->connector_info[j]->connector;
+-			connector->dpms = dpms_mode;
++			drm_helper_connector_dpms(connector, dpms_mode);
+ 			drm_connector_property_set_value(connector,
+-							 dev->mode_config.dpms_property,
+-							 dpms_mode);
++				dev->mode_config.dpms_property, dpms_mode);
+ 		}
+-		/* Found a CRTC on this fb, now find encoders */
+-		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+-			if (encoder->crtc == crtc) {
+-				struct drm_encoder_helper_funcs *encoder_funcs;
+-
+-				encoder_funcs = encoder->helper_private;
+-				encoder_funcs->dpms(encoder, dpms_mode);
+-			}
+-		}
+-		crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ 	}
+ 	mutex_unlock(&dev->mode_config.mutex);
+ }
+@@ -393,23 +340,23 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
+ 	switch (blank) {
+ 	/* Display: On; HSync: On, VSync: On */
+ 	case FB_BLANK_UNBLANK:
+-		drm_fb_helper_on(info);
++		drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
+ 		break;
+ 	/* Display: Off; HSync: On, VSync: On */
+ 	case FB_BLANK_NORMAL:
+-		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
++		drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
+ 		break;
+ 	/* Display: Off; HSync: Off, VSync: On */
+ 	case FB_BLANK_HSYNC_SUSPEND:
+-		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
++		drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
+ 		break;
+ 	/* Display: Off; HSync: On, VSync: Off */
+ 	case FB_BLANK_VSYNC_SUSPEND:
+-		drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
++		drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
+ 		break;
+ 	/* Display: Off; HSync: Off, VSync: Off */
+ 	case FB_BLANK_POWERDOWN:
+-		drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
++		drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
+ 		break;
+ 	}
+ 	return 0;
+@@ -423,8 +370,11 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
+ 	for (i = 0; i < helper->connector_count; i++)
+ 		kfree(helper->connector_info[i]);
+ 	kfree(helper->connector_info);
+-	for (i = 0; i < helper->crtc_count; i++)
++	for (i = 0; i < helper->crtc_count; i++) {
+ 		kfree(helper->crtc_info[i].mode_set.connectors);
++		if (helper->crtc_info[i].mode_set.mode)
++			drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
++	}
+ 	kfree(helper->crtc_info);
+ }
+ 
+@@ -467,11 +417,10 @@ int drm_fb_helper_init(struct drm_device *dev,
+ 
+ 	i = 0;
+ 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+-		fb_helper->crtc_info[i].crtc_id = crtc->base.id;
+ 		fb_helper->crtc_info[i].mode_set.crtc = crtc;
+ 		i++;
+ 	}
+-	fb_helper->conn_limit = max_conn_count;
++
+ 	return 0;
+ out_free:
+ 	drm_fb_helper_crtc_free(fb_helper);
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 020b103..b90abff 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -133,6 +133,9 @@ int drm_open(struct inode *inode, struct file *filp)
+ 	if (!(dev = minor->dev))
+ 		return -ENODEV;
+ 
++	if (drm_device_is_unplugged(dev))
++		return -ENODEV;
++
+ 	retcode = drm_open_helper(inode, filp, dev);
+ 	if (!retcode) {
+ 		atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+@@ -184,8 +187,11 @@ int drm_stub_open(struct inode *inode, struct file *filp)
+ 	if (!(dev = minor->dev))
+ 		goto out;
+ 
++	if (drm_device_is_unplugged(dev))
++		goto out;
++
+ 	old_fops = filp->f_op;
+-	filp->f_op = fops_get(&dev->driver->fops);
++	filp->f_op = fops_get(dev->driver->fops);
+ 	if (filp->f_op == NULL) {
+ 		filp->f_op = old_fops;
+ 		goto out;
+@@ -268,6 +274,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
+ 	if (dev->driver->driver_features & DRIVER_GEM)
+ 		drm_gem_open(dev, priv);
+ 
++	if (drm_core_check_feature(dev, DRIVER_PRIME))
++		drm_prime_init_file_private(&priv->prime);
++
+ 	if (dev->driver->open) {
+ 		ret = dev->driver->open(dev, priv);
+ 		if (ret < 0)
+@@ -501,12 +510,12 @@ int drm_release(struct inode *inode, struct file *filp)
+ 
+ 	drm_events_release(file_priv);
+ 
+-	if (dev->driver->driver_features & DRIVER_GEM)
+-		drm_gem_release(dev, file_priv);
+-
+ 	if (dev->driver->driver_features & DRIVER_MODESET)
+ 		drm_fb_release(file_priv);
+ 
++	if (dev->driver->driver_features & DRIVER_GEM)
++		drm_gem_release(dev, file_priv);
++
+ 	mutex_lock(&dev->ctxlist_mutex);
+ 	if (!list_empty(&dev->ctxlist)) {
+ 		struct drm_ctx_list *pos, *n;
+@@ -568,6 +577,10 @@ int drm_release(struct inode *inode, struct file *filp)
+ 
+ 	if (dev->driver->postclose)
+ 		dev->driver->postclose(dev, file_priv);
++
++	if (drm_core_check_feature(dev, DRIVER_PRIME))
++		drm_prime_destroy_file_private(&file_priv->prime);
++
+ 	kfree(file_priv);
+ 
+ 	/* ========================================================
+@@ -582,6 +595,8 @@ int drm_release(struct inode *inode, struct file *filp)
+ 			retcode = -EBUSY;
+ 		} else
+ 			retcode = drm_lastclose(dev);
++		if (drm_device_is_unplugged(dev))
++			drm_put_dev(dev);
+ 	}
+ 	mutex_unlock(&drm_global_mutex);
+ 
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 396e60c..83114b5 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -35,6 +35,7 @@
+ #include <linux/mman.h>
+ #include <linux/pagemap.h>
+ #include <linux/shmem_fs.h>
++#include <linux/dma-buf.h>
+ #include "drmP.h"
+ 
+ /** @file drm_gem.c
+@@ -140,7 +141,7 @@ int drm_gem_object_init(struct drm_device *dev,
+ 	obj->dev = dev;
+ 	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ 	if (IS_ERR(obj->filp))
+-		return -ENOMEM;
++		return PTR_ERR(obj->filp);
+ 
+ 	kref_init(&obj->refcount);
+ 	atomic_set(&obj->handle_count, 0);
+@@ -232,6 +233,10 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+ 	idr_remove(&filp->object_idr, handle);
+ 	spin_unlock(&filp->table_lock);
+ 
++	if (obj->import_attach)
++		drm_prime_remove_imported_buf_handle(&filp->prime,
++				obj->import_attach->dmabuf);
++
+ 	if (dev->driver->gem_close_object)
+ 		dev->driver->gem_close_object(obj, filp);
+ 	drm_gem_object_handle_unreference_unlocked(obj);
+@@ -527,6 +532,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
+ 	struct drm_gem_object *obj = ptr;
+ 	struct drm_device *dev = obj->dev;
+ 
++	if (obj->import_attach)
++		drm_prime_remove_imported_buf_handle(&file_priv->prime,
++				obj->import_attach->dmabuf);
++
+ 	if (dev->driver->gem_close_object)
+ 		dev->driver->gem_close_object(obj, file_priv);
+ 
+@@ -661,6 +670,9 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	struct drm_hash_item *hash;
+ 	int ret = 0;
+ 
++	if (drm_device_is_unplugged(dev))
++		return -ENODEV;
++
+ 	mutex_lock(&dev->struct_mutex);
+ 
+ 	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+@@ -700,7 +712,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	 */
+ 	drm_gem_object_reference(obj);
+ 
+-	vma->vm_file = filp;	/* Needed for drm_vm_open() */
+ 	drm_vm_open_locked(vma);
+ 
+ out_unlock:
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index ddd70db..637fcc3 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -315,7 +315,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
+ 	if (err)
+ 		return err;
+ 
+-	if (__get_user(c32.auth, &client->auth)
++	if (__get_user(c32.idx, &client->idx)
++	    || __get_user(c32.auth, &client->auth)
+ 	    || __get_user(c32.pid, &client->pid)
+ 	    || __get_user(c32.uid, &client->uid)
+ 	    || __get_user(c32.magic, &client->magic)
+diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
+index 904d7e9..cf85155 100644
+--- a/drivers/gpu/drm/drm_ioctl.c
++++ b/drivers/gpu/drm/drm_ioctl.c
+@@ -37,6 +37,7 @@
+ #include "drm_core.h"
+ 
+ #include "linux/pci.h"
++#include "linux/export.h"
+ 
+ /**
+  * Get the bus id.
+@@ -158,14 +159,11 @@ int drm_getmap(struct drm_device *dev, void *data,
+ 	int i;
+ 
+ 	idx = map->offset;
+-
+-	mutex_lock(&dev->struct_mutex);
+-	if (idx < 0) {
+-		mutex_unlock(&dev->struct_mutex);
++	if (idx < 0)
+ 		return -EINVAL;
+-	}
+ 
+ 	i = 0;
++	mutex_lock(&dev->struct_mutex);
+ 	list_for_each(list, &dev->maplist) {
+ 		if (i == idx) {
+ 			r_list = list_entry(list, struct drm_map_list, head);
+@@ -211,9 +209,9 @@ int drm_getclient(struct drm_device *dev, void *data,
+ 	int i;
+ 
+ 	idx = client->idx;
+-	mutex_lock(&dev->struct_mutex);
+-
+ 	i = 0;
++
++	mutex_lock(&dev->struct_mutex);
+ 	list_for_each_entry(pt, &dev->filelist, lhead) {
+ 		if (i++ >= idx) {
+ 			client->auth = pt->authenticated;
+@@ -249,8 +247,6 @@ int drm_getstats(struct drm_device *dev, void *data,
+ 
+ 	memset(stats, 0, sizeof(*stats));
+ 
+-	mutex_lock(&dev->struct_mutex);
+-
+ 	for (i = 0; i < dev->counters; i++) {
+ 		if (dev->types[i] == _DRM_STAT_LOCK)
+ 			stats->data[i].value =
+@@ -262,8 +258,6 @@ int drm_getstats(struct drm_device *dev, void *data,
+ 
+ 	stats->count = dev->counters;
+ 
+-	mutex_unlock(&dev->struct_mutex);
+-
+ 	return 0;
+ }
+ 
+@@ -283,6 +277,12 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ 	case DRM_CAP_VBLANK_HIGH_CRTC:
+ 		req->value = 1;
+ 		break;
++	case DRM_CAP_DUMB_PREFERRED_DEPTH:
++		req->value = dev->mode_config.preferred_depth;
++		break;
++	case DRM_CAP_DUMB_PREFER_SHADOW:
++		req->value = dev->mode_config.prefer_shadow;
++		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+@@ -353,3 +353,4 @@ int drm_noop(struct drm_device *dev, void *data,
+ 	DRM_DEBUG("\n");
+ 	return 0;
+ }
++EXPORT_SYMBOL(drm_noop);
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index 44a5d0a..c869436 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -305,7 +305,7 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state)
+  * \param dev DRM device.
+  *
+  * Initializes the IRQ related data. Installs the handler, calling the driver
+- * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
++ * \c irq_preinstall() and \c irq_postinstall() functions
+  * before and after the installation.
+  */
+ int drm_irq_install(struct drm_device *dev)
+@@ -385,7 +385,7 @@ EXPORT_SYMBOL(drm_irq_install);
+  *
+  * \param dev DRM device.
+  *
+- * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
++ * Calls the driver's \c irq_uninstall() function, and stops the irq.
+  */
+ int drm_irq_uninstall(struct drm_device *dev)
+ {
+diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
+index 632ae24..c79c713 100644
+--- a/drivers/gpu/drm/drm_lock.c
++++ b/drivers/gpu/drm/drm_lock.c
+@@ -33,6 +33,7 @@
+  * OTHER DEALINGS IN THE SOFTWARE.
+  */
+ 
++#include <linux/export.h>
+ #include "drmP.h"
+ 
+ static int drm_notifier(void *priv);
+@@ -345,6 +346,7 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
+ 	}
+ 	spin_unlock_bh(&lock_data->spinlock);
+ }
++EXPORT_SYMBOL(drm_idlelock_take);
+ 
+ void drm_idlelock_release(struct drm_lock_data *lock_data)
+ {
+@@ -364,6 +366,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
+ 	}
+ 	spin_unlock_bh(&lock_data->spinlock);
+ }
++EXPORT_SYMBOL(drm_idlelock_release);
+ 
+ int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+ {
+diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
+index c8b6b66..c86a0f1 100644
+--- a/drivers/gpu/drm/drm_memory.c
++++ b/drivers/gpu/drm/drm_memory.c
+@@ -37,25 +37,6 @@
+ #include <linux/export.h>
+ #include "drmP.h"
+ 
+-/**
+- * Called when "/proc/dri/%dev%/mem" is read.
+- *
+- * \param buf output buffer.
+- * \param start start of output data.
+- * \param offset requested start offset.
+- * \param len requested number of bytes.
+- * \param eof whether there is no more data to return.
+- * \param data private data.
+- * \return number of written bytes.
+- *
+- * No-op.
+- */
+-int drm_mem_info(char *buf, char **start, off_t offset,
+-		 int len, int *eof, void *data)
+-{
+-	return 0;
+-}
+-
+ #if __OS_HAS_AGP
+ static void *agp_remap(unsigned long offset, unsigned long size,
+ 		       struct drm_device * dev)
+diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+index fb8e46b..b7adb4a 100644
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -686,8 +686,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
+ 			p->crtc_vsync_end /= 2;
+ 			p->crtc_vtotal /= 2;
+ 		}
+-
+-		p->crtc_vtotal |= 1;
+ 	}
+ 
+ 	if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
+@@ -716,6 +714,27 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
+ 
+ 
+ /**
++ * drm_mode_copy - copy the mode
++ * @dst: mode to overwrite
++ * @src: mode to copy
++ *
++ * LOCKING:
++ * None.
++ *
++ * Copy an existing mode into another mode, preserving the object id
++ * of the destination mode.
++ */
++void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
++{
++	int id = dst->base.id;
++
++	*dst = *src;
++	dst->base.id = id;
++	INIT_LIST_HEAD(&dst->head);
++}
++EXPORT_SYMBOL(drm_mode_copy);
++
++/**
+  * drm_mode_duplicate - allocate and duplicate an existing mode
+  * @m: mode to duplicate
+  *
+@@ -729,16 +748,13 @@ struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ 					    const struct drm_display_mode *mode)
+ {
+ 	struct drm_display_mode *nmode;
+-	int new_id;
+ 
+ 	nmode = drm_mode_create(dev);
+ 	if (!nmode)
+ 		return NULL;
+ 
+-	new_id = nmode->base.id;
+-	*nmode = *mode;
+-	nmode->base.id = new_id;
+-	INIT_LIST_HEAD(&nmode->head);
++	drm_mode_copy(nmode, mode);
++
+ 	return nmode;
+ }
+ EXPORT_SYMBOL(drm_mode_duplicate);
+diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
+index d4d10b7..13f3d93 100644
+--- a/drivers/gpu/drm/drm_pci.c
++++ b/drivers/gpu/drm/drm_pci.c
+@@ -324,8 +324,6 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+ 	if (ret)
+ 		goto err_g1;
+ 
+-	pci_set_master(pdev);
+-
+ 	dev->pdev = pdev;
+ 	dev->dev = &pdev->dev;
+ 
+diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
+index ae9db5e..82431dc 100644
+--- a/drivers/gpu/drm/drm_platform.c
++++ b/drivers/gpu/drm/drm_platform.c
+@@ -122,7 +122,7 @@ static const char *drm_platform_get_name(struct drm_device *dev)
+ 
+ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
+ {
+-	int len, ret;
++	int len, ret, id;
+ 
+ 	master->unique_len = 13 + strlen(dev->platformdev->name);
+ 	master->unique_size = master->unique_len;
+@@ -131,8 +131,16 @@ static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *mas
+ 	if (master->unique == NULL)
+ 		return -ENOMEM;
+ 
++	id = dev->platformdev->id;
++
++	/* if only a single instance of the platform device, id will be
++	 * set to -1.. use 0 instead to avoid a funny looking bus-id:
++	 */
++	if (id == -1)
++		id = 0;
++
+ 	len = snprintf(master->unique, master->unique_len,
+-			"platform:%s:%02d", dev->platformdev->name, dev->platformdev->id);
++			"platform:%s:%02d", dev->platformdev->name, id);
+ 
+ 	if (len > master->unique_len) {
+ 		DRM_ERROR("Unique buffer overflowed\n");
+diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
+new file mode 100644
+index 0000000..1bdf2b5
+--- /dev/null
++++ b/drivers/gpu/drm/drm_prime.c
+@@ -0,0 +1,304 @@
++/*
++ * Copyright © 2012 Red Hat
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ *
++ * Authors:
++ *      Dave Airlie <airlied at redhat.com>
++ *      Rob Clark <rob.clark at linaro.org>
++ *
++ */
++
++#include <linux/export.h>
++#include <linux/dma-buf.h>
++#include "drmP.h"
++
++/*
++ * DMA-BUF/GEM Object references and lifetime overview:
++ *
++ * On the export the dma_buf holds a reference to the exporting GEM
++ * object. It takes this reference in handle_to_fd_ioctl, when it
++ * first calls .prime_export and stores the exporting GEM object in
++ * the dma_buf priv. This reference is released when the dma_buf
++ * object goes away in the driver .release function.
++ *
++ * On the import the importing GEM object holds a reference to the
++ * dma_buf (which in turn holds a ref to the exporting GEM object).
++ * It takes that reference in the fd_to_handle ioctl.
++ * It calls dma_buf_get, creates an attachment to it and stores the
++ * attachment in the GEM object. When this attachment is destroyed
++ * when the imported object is destroyed, we remove the attachment
++ * and drop the reference to the dma_buf.
++ *
++ * Thus the chain of references always flows in one direction
++ * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
++ *
++ * Self-importing: if userspace is using PRIME as a replacement for flink
++ * then it will get a fd->handle request for a GEM object that it created.
++ * Drivers should detect this situation and return back the gem object
++ * from the dma-buf private.
++ */
++
++struct drm_prime_member {
++	struct list_head entry;
++	struct dma_buf *dma_buf;
++	uint32_t handle;
++};
++
++int drm_gem_prime_handle_to_fd(struct drm_device *dev,
++		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
++		int *prime_fd)
++{
++	struct drm_gem_object *obj;
++	void *buf;
++
++	obj = drm_gem_object_lookup(dev, file_priv, handle);
++	if (!obj)
++		return -ENOENT;
++
++	mutex_lock(&file_priv->prime.lock);
++	/* re-export the original imported object */
++	if (obj->import_attach) {
++		get_dma_buf(obj->import_attach->dmabuf);
++		*prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
++		drm_gem_object_unreference_unlocked(obj);
++		mutex_unlock(&file_priv->prime.lock);
++		return 0;
++	}
++
++	if (obj->export_dma_buf) {
++		get_dma_buf(obj->export_dma_buf);
++		*prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
++		drm_gem_object_unreference_unlocked(obj);
++	} else {
++		buf = dev->driver->gem_prime_export(dev, obj, flags);
++		if (IS_ERR(buf)) {
++			/* normally the created dma-buf takes ownership of the ref,
++			 * but if that fails then drop the ref
++			 */
++			drm_gem_object_unreference_unlocked(obj);
++			mutex_unlock(&file_priv->prime.lock);
++			return PTR_ERR(buf);
++		}
++		obj->export_dma_buf = buf;
++		*prime_fd = dma_buf_fd(buf, flags);
++	}
++	mutex_unlock(&file_priv->prime.lock);
++	return 0;
++}
++EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
++
++int drm_gem_prime_fd_to_handle(struct drm_device *dev,
++		struct drm_file *file_priv, int prime_fd, uint32_t *handle)
++{
++	struct dma_buf *dma_buf;
++	struct drm_gem_object *obj;
++	int ret;
++
++	dma_buf = dma_buf_get(prime_fd);
++	if (IS_ERR(dma_buf))
++		return PTR_ERR(dma_buf);
++
++	mutex_lock(&file_priv->prime.lock);
++
++	ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
++			dma_buf, handle);
++	if (!ret) {
++		ret = 0;
++		goto out_put;
++	}
++
++	/* never seen this one, need to import */
++	obj = dev->driver->gem_prime_import(dev, dma_buf);
++	if (IS_ERR(obj)) {
++		ret = PTR_ERR(obj);
++		goto out_put;
++	}
++
++	ret = drm_gem_handle_create(file_priv, obj, handle);
++	drm_gem_object_unreference_unlocked(obj);
++	if (ret)
++		goto out_put;
++
++	ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
++			dma_buf, *handle);
++	if (ret)
++		goto fail;
++
++	mutex_unlock(&file_priv->prime.lock);
++	return 0;
++
++fail:
++	/* hmm, if driver attached, we are relying on the free-object path
++	 * to detach.. which seems ok..
++	 */
++	drm_gem_object_handle_unreference_unlocked(obj);
++out_put:
++	dma_buf_put(dma_buf);
++	mutex_unlock(&file_priv->prime.lock);
++	return ret;
++}
++EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
++
++int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
++				 struct drm_file *file_priv)
++{
++	struct drm_prime_handle *args = data;
++	uint32_t flags;
++
++	if (!drm_core_check_feature(dev, DRIVER_PRIME))
++		return -EINVAL;
++
++	if (!dev->driver->prime_handle_to_fd)
++		return -ENOSYS;
++
++	/* check flags are valid */
++	if (args->flags & ~DRM_CLOEXEC)
++		return -EINVAL;
++
++	/* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
++	flags = args->flags & DRM_CLOEXEC;
++
++	return dev->driver->prime_handle_to_fd(dev, file_priv,
++			args->handle, flags, &args->fd);
++}
++
++int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
++				 struct drm_file *file_priv)
++{
++	struct drm_prime_handle *args = data;
++
++	if (!drm_core_check_feature(dev, DRIVER_PRIME))
++		return -EINVAL;
++
++	if (!dev->driver->prime_fd_to_handle)
++		return -ENOSYS;
++
++	return dev->driver->prime_fd_to_handle(dev, file_priv,
++			args->fd, &args->handle);
++}
++
++/*
++ * drm_prime_pages_to_sg
++ *
++ * this helper creates an sg table object from a set of pages
++ * the driver is responsible for mapping the pages into the
++ * importers address space
++ */
++struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages)
++{
++	struct sg_table *sg = NULL;
++	struct scatterlist *iter;
++	int i;
++	int ret;
++
++	sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
++	if (!sg)
++		goto out;
++
++	ret = sg_alloc_table(sg, nr_pages, GFP_KERNEL);
++	if (ret)
++		goto out;
++
++	for_each_sg(sg->sgl, iter, nr_pages, i)
++		sg_set_page(iter, pages[i], PAGE_SIZE, 0);
++
++	return sg;
++out:
++	kfree(sg);
++	return NULL;
++}
++EXPORT_SYMBOL(drm_prime_pages_to_sg);
++
++/* helper function to cleanup a GEM/prime object */
++void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
++{
++	struct dma_buf_attachment *attach;
++	struct dma_buf *dma_buf;
++	attach = obj->import_attach;
++	if (sg)
++		dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
++	dma_buf = attach->dmabuf;
++	dma_buf_detach(attach->dmabuf, attach);
++	/* remove the reference */
++	dma_buf_put(dma_buf);
++}
++EXPORT_SYMBOL(drm_prime_gem_destroy);
++
++void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
++{
++	INIT_LIST_HEAD(&prime_fpriv->head);
++	mutex_init(&prime_fpriv->lock);
++}
++EXPORT_SYMBOL(drm_prime_init_file_private);
++
++void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
++{
++	struct drm_prime_member *member, *safe;
++	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
++		list_del(&member->entry);
++		kfree(member);
++	}
++}
++EXPORT_SYMBOL(drm_prime_destroy_file_private);
++
++int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
++{
++	struct drm_prime_member *member;
++
++	member = kmalloc(sizeof(*member), GFP_KERNEL);
++	if (!member)
++		return -ENOMEM;
++
++	member->dma_buf = dma_buf;
++	member->handle = handle;
++	list_add(&member->entry, &prime_fpriv->head);
++	return 0;
++}
++EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
++
++int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
++{
++	struct drm_prime_member *member;
++
++	list_for_each_entry(member, &prime_fpriv->head, entry) {
++		if (member->dma_buf == dma_buf) {
++			*handle = member->handle;
++			return 0;
++		}
++	}
++	return -ENOENT;
++}
++EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
++
++void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
++{
++	struct drm_prime_member *member, *safe;
++
++	mutex_lock(&prime_fpriv->lock);
++	list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
++		if (member->dma_buf == dma_buf) {
++			list_del(&member->entry);
++			kfree(member);
++		}
++	}
++	mutex_unlock(&prime_fpriv->lock);
++}
++EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
+diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
+deleted file mode 100644
+index cebce45..0000000
+--- a/drivers/gpu/drm/drm_sman.c
++++ /dev/null
+@@ -1,351 +0,0 @@
+-/**************************************************************************
+- *
+- * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
+- * All Rights Reserved.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the
+- * "Software"), to deal in the Software without restriction, including
+- * without limitation the rights to use, copy, modify, merge, publish,
+- * distribute, sub license, and/or sell copies of the Software, and to
+- * permit persons to whom the Software is furnished to do so, subject to
+- * the following conditions:
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * The above copyright notice and this permission notice (including the
+- * next paragraph) shall be included in all copies or substantial portions
+- * of the Software.
+- *
+- *
+- **************************************************************************/
+-/*
+- * Simple memory manager interface that keeps track on allocate regions on a
+- * per "owner" basis. All regions associated with an "owner" can be released
+- * with a simple call. Typically if the "owner" exists. The owner is any
+- * "unsigned long" identifier. Can typically be a pointer to a file private
+- * struct or a context identifier.
+- *
+- * Authors:
+- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+- */
+-
+-#include <linux/export.h>
+-#include "drm_sman.h"
+-
+-struct drm_owner_item {
+-	struct drm_hash_item owner_hash;
+-	struct list_head sman_list;
+-	struct list_head mem_blocks;
+-};
+-
+-void drm_sman_takedown(struct drm_sman * sman)
+-{
+-	drm_ht_remove(&sman->user_hash_tab);
+-	drm_ht_remove(&sman->owner_hash_tab);
+-	kfree(sman->mm);
+-}
+-
+-EXPORT_SYMBOL(drm_sman_takedown);
+-
+-int
+-drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+-	      unsigned int user_order, unsigned int owner_order)
+-{
+-	int ret = 0;
+-
+-	sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
+-	if (!sman->mm) {
+-		ret = -ENOMEM;
+-		goto out;
+-	}
+-	sman->num_managers = num_managers;
+-	INIT_LIST_HEAD(&sman->owner_items);
+-	ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
+-	if (ret)
+-		goto out1;
+-	ret = drm_ht_create(&sman->user_hash_tab, user_order);
+-	if (!ret)
+-		goto out;
+-
+-	drm_ht_remove(&sman->owner_hash_tab);
+-out1:
+-	kfree(sman->mm);
+-out:
+-	return ret;
+-}
+-
+-EXPORT_SYMBOL(drm_sman_init);
+-
+-static void *drm_sman_mm_allocate(void *private, unsigned long size,
+-				  unsigned alignment)
+-{
+-	struct drm_mm *mm = (struct drm_mm *) private;
+-	struct drm_mm_node *tmp;
+-
+-	tmp = drm_mm_search_free(mm, size, alignment, 1);
+-	if (!tmp) {
+-		return NULL;
+-	}
+-	tmp = drm_mm_get_block(tmp, size, alignment);
+-	return tmp;
+-}
+-
+-static void drm_sman_mm_free(void *private, void *ref)
+-{
+-	struct drm_mm_node *node = (struct drm_mm_node *) ref;
+-
+-	drm_mm_put_block(node);
+-}
+-
+-static void drm_sman_mm_destroy(void *private)
+-{
+-	struct drm_mm *mm = (struct drm_mm *) private;
+-	drm_mm_takedown(mm);
+-	kfree(mm);
+-}
+-
+-static unsigned long drm_sman_mm_offset(void *private, void *ref)
+-{
+-	struct drm_mm_node *node = (struct drm_mm_node *) ref;
+-	return node->start;
+-}
+-
+-int
+-drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+-		   unsigned long start, unsigned long size)
+-{
+-	struct drm_sman_mm *sman_mm;
+-	struct drm_mm *mm;
+-	int ret;
+-
+-	BUG_ON(manager >= sman->num_managers);
+-
+-	sman_mm = &sman->mm[manager];
+-	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+-	if (!mm) {
+-		return -ENOMEM;
+-	}
+-	sman_mm->private = mm;
+-	ret = drm_mm_init(mm, start, size);
+-
+-	if (ret) {
+-		kfree(mm);
+-		return ret;
+-	}
+-
+-	sman_mm->allocate = drm_sman_mm_allocate;
+-	sman_mm->free = drm_sman_mm_free;
+-	sman_mm->destroy = drm_sman_mm_destroy;
+-	sman_mm->offset = drm_sman_mm_offset;
+-
+-	return 0;
+-}
+-
+-EXPORT_SYMBOL(drm_sman_set_range);
+-
+-int
+-drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
+-		     struct drm_sman_mm * allocator)
+-{
+-	BUG_ON(manager >= sman->num_managers);
+-	sman->mm[manager] = *allocator;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL(drm_sman_set_manager);
+-
+-static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
+-						 unsigned long owner)
+-{
+-	int ret;
+-	struct drm_hash_item *owner_hash_item;
+-	struct drm_owner_item *owner_item;
+-
+-	ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
+-	if (!ret) {
+-		return drm_hash_entry(owner_hash_item, struct drm_owner_item,
+-				      owner_hash);
+-	}
+-
+-	owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
+-	if (!owner_item)
+-		goto out;
+-
+-	INIT_LIST_HEAD(&owner_item->mem_blocks);
+-	owner_item->owner_hash.key = owner;
+-	if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
+-		goto out1;
+-
+-	list_add_tail(&owner_item->sman_list, &sman->owner_items);
+-	return owner_item;
+-
+-out1:
+-	kfree(owner_item);
+-out:
+-	return NULL;
+-}
+-
+-struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
+-				    unsigned long size, unsigned alignment,
+-				    unsigned long owner)
+-{
+-	void *tmp;
+-	struct drm_sman_mm *sman_mm;
+-	struct drm_owner_item *owner_item;
+-	struct drm_memblock_item *memblock;
+-
+-	BUG_ON(manager >= sman->num_managers);
+-
+-	sman_mm = &sman->mm[manager];
+-	tmp = sman_mm->allocate(sman_mm->private, size, alignment);
+-
+-	if (!tmp) {
+-		return NULL;
+-	}
+-
+-	memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
+-
+-	if (!memblock)
+-		goto out;
+-
+-	memblock->mm_info = tmp;
+-	memblock->mm = sman_mm;
+-	memblock->sman = sman;
+-
+-	if (drm_ht_just_insert_please
+-	    (&sman->user_hash_tab, &memblock->user_hash,
+-	     (unsigned long)memblock, 32, 0, 0))
+-		goto out1;
+-
+-	owner_item = drm_sman_get_owner_item(sman, owner);
+-	if (!owner_item)
+-		goto out2;
+-
+-	list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
+-
+-	return memblock;
+-
+-out2:
+-	drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
+-out1:
+-	kfree(memblock);
+-out:
+-	sman_mm->free(sman_mm->private, tmp);
+-
+-	return NULL;
+-}
+-
+-EXPORT_SYMBOL(drm_sman_alloc);
+-
+-static void drm_sman_free(struct drm_memblock_item *item)
+-{
+-	struct drm_sman *sman = item->sman;
+-
+-	list_del(&item->owner_list);
+-	drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
+-	item->mm->free(item->mm->private, item->mm_info);
+-	kfree(item);
+-}
+-
+-int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
+-{
+-	struct drm_hash_item *hash_item;
+-	struct drm_memblock_item *memblock_item;
+-
+-	if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
+-		return -EINVAL;
+-
+-	memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
+-				       user_hash);
+-	drm_sman_free(memblock_item);
+-	return 0;
+-}
+-
+-EXPORT_SYMBOL(drm_sman_free_key);
+-
+-static void drm_sman_remove_owner(struct drm_sman *sman,
+-				  struct drm_owner_item *owner_item)
+-{
+-	list_del(&owner_item->sman_list);
+-	drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
+-	kfree(owner_item);
+-}
+-
+-int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
+-{
+-
+-	struct drm_hash_item *hash_item;
+-	struct drm_owner_item *owner_item;
+-
+-	if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+-		return -1;
+-	}
+-
+-	owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+-	if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
+-		drm_sman_remove_owner(sman, owner_item);
+-		return -1;
+-	}
+-
+-	return 0;
+-}
+-
+-EXPORT_SYMBOL(drm_sman_owner_clean);
+-
+-static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
+-				      struct drm_owner_item *owner_item)
+-{
+-	struct drm_memblock_item *entry, *next;
+-
+-	list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
+-				 owner_list) {
+-		drm_sman_free(entry);
+-	}
+-	drm_sman_remove_owner(sman, owner_item);
+-}
+-
+-void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
+-{
+-
+-	struct drm_hash_item *hash_item;
+-	struct drm_owner_item *owner_item;
+-
+-	if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+-
+-		return;
+-	}
+-
+-	owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+-	drm_sman_do_owner_cleanup(sman, owner_item);
+-}
+-
+-EXPORT_SYMBOL(drm_sman_owner_cleanup);
+-
+-void drm_sman_cleanup(struct drm_sman *sman)
+-{
+-	struct drm_owner_item *entry, *next;
+-	unsigned int i;
+-	struct drm_sman_mm *sman_mm;
+-
+-	list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
+-		drm_sman_do_owner_cleanup(sman, entry);
+-	}
+-	if (sman->mm) {
+-		for (i = 0; i < sman->num_managers; ++i) {
+-			sman_mm = &sman->mm[i];
+-			if (sman_mm->private) {
+-				sman_mm->destroy(sman_mm->private);
+-				sman_mm->private = NULL;
+-			}
+-		}
+-	}
+-}
+-
+-EXPORT_SYMBOL(drm_sman_cleanup);
+diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
+index 6d7b083..aa454f8 100644
+--- a/drivers/gpu/drm/drm_stub.c
++++ b/drivers/gpu/drm/drm_stub.c
+@@ -319,6 +319,7 @@ int drm_fill_in_dev(struct drm_device *dev,
+ 	drm_lastclose(dev);
+ 	return retcode;
+ }
++EXPORT_SYMBOL(drm_fill_in_dev);
+ 
+ 
+ /**
+@@ -397,6 +398,7 @@ err_idr:
+ 	*minor = NULL;
+ 	return ret;
+ }
++EXPORT_SYMBOL(drm_get_minor);
+ 
+ /**
+  * Put a secondary minor number.
+@@ -428,6 +430,12 @@ int drm_put_minor(struct drm_minor **minor_p)
+ 	*minor_p = NULL;
+ 	return 0;
+ }
++EXPORT_SYMBOL(drm_put_minor);
++
++static void drm_unplug_minor(struct drm_minor *minor)
++{
++	drm_sysfs_device_remove(minor);
++}
+ 
+ /**
+  * Called via drm_exit() at module unload time or when pci device is
+@@ -492,3 +500,21 @@ void drm_put_dev(struct drm_device *dev)
+ 	kfree(dev);
+ }
+ EXPORT_SYMBOL(drm_put_dev);
++
++void drm_unplug_dev(struct drm_device *dev)
++{
++	/* for a USB device */
++	if (drm_core_check_feature(dev, DRIVER_MODESET))
++		drm_unplug_minor(dev->control);
++	drm_unplug_minor(dev->primary);
++
++	mutex_lock(&drm_global_mutex);
++
++	drm_device_set_unplugged(dev);
++
++	if (dev->open_count == 0) {
++		drm_put_dev(dev);
++	}
++	mutex_unlock(&drm_global_mutex);
++}
++EXPORT_SYMBOL(drm_unplug_dev);
+diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
+index 0f9ef9b..5a7bd51 100644
+--- a/drivers/gpu/drm/drm_sysfs.c
++++ b/drivers/gpu/drm/drm_sysfs.c
+@@ -72,7 +72,7 @@ static int drm_class_resume(struct device *dev)
+ 	return 0;
+ }
+ 
+-static char *drm_devnode(struct device *dev, mode_t *mode)
++static char *drm_devnode(struct device *dev, umode_t *mode)
+ {
+ 	return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
+ }
+@@ -454,6 +454,8 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
+ {
+ 	int i;
+ 
++	if (!connector->kdev.parent)
++		return;
+ 	DRM_DEBUG("removing \"%s\" from sysfs\n",
+ 		  drm_get_connector_name(connector));
+ 
+@@ -461,6 +463,7 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
+ 		device_remove_file(&connector->kdev, &connector_attrs[i]);
+ 	sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
+ 	device_unregister(&connector->kdev);
++	connector->kdev.parent = NULL;
+ }
+ EXPORT_SYMBOL(drm_sysfs_connector_remove);
+ 
+@@ -533,7 +536,9 @@ err_out:
+  */
+ void drm_sysfs_device_remove(struct drm_minor *minor)
+ {
+-	device_unregister(&minor->kdev);
++	if (minor->kdev.parent)
++		device_unregister(&minor->kdev);
++	minor->kdev.parent = NULL;
+ }
+ 
+ 
+diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
+index 445003f..37c9a52 100644
+--- a/drivers/gpu/drm/drm_usb.c
++++ b/drivers/gpu/drm/drm_usb.c
+@@ -1,8 +1,7 @@
+ #include "drmP.h"
+ #include <linux/usb.h>
+-#include <linux/export.h>
++#include <linux/module.h>
+ 
+-#ifdef CONFIG_USB
+ int drm_get_usb_dev(struct usb_interface *interface,
+ 		    const struct usb_device_id *id,
+ 		    struct drm_driver *driver)
+@@ -115,4 +114,7 @@ void drm_usb_exit(struct drm_driver *driver,
+ 	usb_deregister(udriver);
+ }
+ EXPORT_SYMBOL(drm_usb_exit);
+-#endif
++
++MODULE_AUTHOR("David Airlie");
++MODULE_DESCRIPTION("USB DRM support");
++MODULE_LICENSE("GPL and additional rights");
+diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
+index 8c03eaf..1495618 100644
+--- a/drivers/gpu/drm/drm_vm.c
++++ b/drivers/gpu/drm/drm_vm.c
+@@ -519,7 +519,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
+ 	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
+ 	vma->vm_flags |= VM_DONTEXPAND;
+ 
+-	vma->vm_file = filp;	/* Needed for drm_vm_open() */
+ 	drm_vm_open_locked(vma);
+ 	return 0;
+ }
+@@ -671,7 +670,6 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+ 	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
+ 	vma->vm_flags |= VM_DONTEXPAND;
+ 
+-	vma->vm_file = filp;	/* Needed for drm_vm_open() */
+ 	drm_vm_open_locked(vma);
+ 	return 0;
+ }
+@@ -682,6 +680,9 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	struct drm_device *dev = priv->minor->dev;
+ 	int ret;
+ 
++	if (drm_device_is_unplugged(dev))
++		return -ENODEV;
++
+ 	mutex_lock(&dev->struct_mutex);
+ 	ret = drm_mmap_locked(filp, vma);
+ 	mutex_unlock(&dev->struct_mutex);
+diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
+index 847466a..3343ac4 100644
+--- a/drivers/gpu/drm/exynos/Kconfig
++++ b/drivers/gpu/drm/exynos/Kconfig
+@@ -1,7 +1,6 @@
+ config DRM_EXYNOS
+ 	tristate "DRM Support for Samsung SoC EXYNOS Series"
+ 	depends on DRM && PLAT_SAMSUNG
+-	default	n
+ 	select DRM_KMS_HELPER
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+@@ -12,9 +11,19 @@ config DRM_EXYNOS
+ 	  If M is selected the module will be called exynosdrm.
+ 
+ config DRM_EXYNOS_FIMD
+-	tristate "Exynos DRM FIMD"
+-	depends on DRM_EXYNOS
+-	default n
++	bool "Exynos DRM FIMD"
++	depends on DRM_EXYNOS && !FB_S3C
+ 	help
+ 	  Choose this option if you want to use Exynos FIMD for DRM.
+-	  If M is selected, the module will be called exynos_drm_fimd
++
++config DRM_EXYNOS_HDMI
++	bool "Exynos DRM HDMI"
++	depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV
++	help
++	  Choose this option if you want to use Exynos HDMI for DRM.
++
++config DRM_EXYNOS_VIDI
++	bool "Exynos DRM Virtual Display"
++	depends on DRM_EXYNOS
++	help
++	  Choose this option if you want to use Exynos VIDI for DRM.
+diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
+index 0496d3f..9e0bff8 100644
+--- a/drivers/gpu/drm/exynos/Makefile
++++ b/drivers/gpu/drm/exynos/Makefile
+@@ -5,7 +5,13 @@
+ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
+ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
+ 		exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
+-		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o
++		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
++		exynos_drm_plane.o
+ 
+-obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
+-obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
++exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD)	+= exynos_drm_fimd.o
++exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI)	+= exynos_hdmi.o exynos_mixer.o \
++					   exynos_ddc.o exynos_hdmiphy.o \
++					   exynos_drm_hdmi.o
++exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI)	+= exynos_drm_vidi.o
++
++obj-$(CONFIG_DRM_EXYNOS)		+= exynosdrm.o
+diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
+new file mode 100644
+index 0000000..7e1051d
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_ddc.c
+@@ -0,0 +1,57 @@
++/*
++ * Copyright (C) 2011 Samsung Electronics Co.Ltd
++ * Authors:
++ *	Seung-Woo Kim <sw0312.kim at samsung.com>
++ *	Inki Dae <inki.dae at samsung.com>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include "drmP.h"
++
++#include <linux/kernel.h>
++#include <linux/i2c.h>
++#include <linux/module.h>
++
++
++#include "exynos_drm_drv.h"
++#include "exynos_hdmi.h"
++
++static int s5p_ddc_probe(struct i2c_client *client,
++			const struct i2c_device_id *dev_id)
++{
++	hdmi_attach_ddc_client(client);
++
++	dev_info(&client->adapter->dev, "attached s5p_ddc "
++		"into i2c adapter successfully\n");
++
++	return 0;
++}
++
++static int s5p_ddc_remove(struct i2c_client *client)
++{
++	dev_info(&client->adapter->dev, "detached s5p_ddc "
++		"from i2c adapter successfully\n");
++
++	return 0;
++}
++
++static struct i2c_device_id ddc_idtable[] = {
++	{"s5p_ddc", 0},
++	{ },
++};
++
++struct i2c_driver ddc_driver = {
++	.driver = {
++		.name = "s5p_ddc",
++		.owner = THIS_MODULE,
++	},
++	.id_table	= ddc_idtable,
++	.probe		= s5p_ddc_probe,
++	.remove		= __devexit_p(s5p_ddc_remove),
++	.command		= NULL,
++};
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
+index 2bb07bc..de8d209 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
+@@ -25,45 +25,142 @@
+ 
+ #include "drmP.h"
+ #include "drm.h"
++#include "exynos_drm.h"
+ 
+ #include "exynos_drm_drv.h"
+ #include "exynos_drm_gem.h"
+ #include "exynos_drm_buf.h"
+ 
+ static int lowlevel_buffer_allocate(struct drm_device *dev,
+-		struct exynos_drm_gem_buf *buffer)
++		unsigned int flags, struct exynos_drm_gem_buf *buf)
+ {
++	dma_addr_t start_addr;
++	unsigned int npages, page_size, i = 0;
++	struct scatterlist *sgl;
++	int ret = 0;
++
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
+-			&buffer->dma_addr, GFP_KERNEL);
+-	if (!buffer->kvaddr) {
+-		DRM_ERROR("failed to allocate buffer.\n");
++	if (IS_NONCONTIG_BUFFER(flags)) {
++		DRM_DEBUG_KMS("not support allocation type.\n");
++		return -EINVAL;
++	}
++
++	if (buf->dma_addr) {
++		DRM_DEBUG_KMS("already allocated.\n");
++		return 0;
++	}
++
++	if (buf->size >= SZ_1M) {
++		npages = buf->size >> SECTION_SHIFT;
++		page_size = SECTION_SIZE;
++	} else if (buf->size >= SZ_64K) {
++		npages = buf->size >> 16;
++		page_size = SZ_64K;
++	} else {
++		npages = buf->size >> PAGE_SHIFT;
++		page_size = PAGE_SIZE;
++	}
++
++	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
++	if (!buf->sgt) {
++		DRM_ERROR("failed to allocate sg table.\n");
+ 		return -ENOMEM;
+ 	}
+ 
+-	DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+-			(unsigned long)buffer->kvaddr,
+-			(unsigned long)buffer->dma_addr,
+-			buffer->size);
++	ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
++	if (ret < 0) {
++		DRM_ERROR("failed to initialize sg table.\n");
++		kfree(buf->sgt);
++		buf->sgt = NULL;
++		return -ENOMEM;
++	}
+ 
+-	return 0;
++	buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
++			&buf->dma_addr, GFP_KERNEL);
++	if (!buf->kvaddr) {
++		DRM_ERROR("failed to allocate buffer.\n");
++		ret = -ENOMEM;
++		goto err1;
++	}
++
++	buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
++	if (!buf->pages) {
++		DRM_ERROR("failed to allocate pages.\n");
++		ret = -ENOMEM;
++		goto err2;
++	}
++
++	sgl = buf->sgt->sgl;
++	start_addr = buf->dma_addr;
++
++	while (i < npages) {
++		buf->pages[i] = phys_to_page(start_addr);
++		sg_set_page(sgl, buf->pages[i], page_size, 0);
++		sg_dma_address(sgl) = start_addr;
++		start_addr += page_size;
++		sgl = sg_next(sgl);
++		i++;
++	}
++
++	DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
++			(unsigned long)buf->kvaddr,
++			(unsigned long)buf->dma_addr,
++			buf->size);
++
++	return ret;
++err2:
++	dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
++			(dma_addr_t)buf->dma_addr);
++	buf->dma_addr = (dma_addr_t)NULL;
++err1:
++	sg_free_table(buf->sgt);
++	kfree(buf->sgt);
++	buf->sgt = NULL;
++
++	return ret;
+ }
+ 
+ static void lowlevel_buffer_deallocate(struct drm_device *dev,
+-		struct exynos_drm_gem_buf *buffer)
++		unsigned int flags, struct exynos_drm_gem_buf *buf)
+ {
+ 	DRM_DEBUG_KMS("%s.\n", __FILE__);
+ 
+-	if (buffer->dma_addr && buffer->size)
+-		dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
+-				(dma_addr_t)buffer->dma_addr);
+-	else
+-		DRM_DEBUG_KMS("buffer data are invalid.\n");
++	/*
++	 * release only physically continuous memory and
++	 * non-continuous memory would be released by exynos
++	 * gem framework.
++	 */
++	if (IS_NONCONTIG_BUFFER(flags)) {
++		DRM_DEBUG_KMS("not support allocation type.\n");
++		return;
++	}
++
++	if (!buf->dma_addr) {
++		DRM_DEBUG_KMS("dma_addr is invalid.\n");
++		return;
++	}
++
++	DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
++			(unsigned long)buf->kvaddr,
++			(unsigned long)buf->dma_addr,
++			buf->size);
++
++	sg_free_table(buf->sgt);
++
++	kfree(buf->sgt);
++	buf->sgt = NULL;
++
++	kfree(buf->pages);
++	buf->pages = NULL;
++
++	dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
++				(dma_addr_t)buf->dma_addr);
++	buf->dma_addr = (dma_addr_t)NULL;
+ }
+ 
+-struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
+-		unsigned int size)
++struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
++						unsigned int size)
+ {
+ 	struct exynos_drm_gem_buf *buffer;
+ 
+@@ -73,26 +170,15 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
+ 	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ 	if (!buffer) {
+ 		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+-		return ERR_PTR(-ENOMEM);
++		return NULL;
+ 	}
+ 
+ 	buffer->size = size;
+-
+-	/*
+-	 * allocate memory region with size and set the memory information
+-	 * to vaddr and dma_addr of a buffer object.
+-	 */
+-	if (lowlevel_buffer_allocate(dev, buffer) < 0) {
+-		kfree(buffer);
+-		buffer = NULL;
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
+ 	return buffer;
+ }
+ 
+-void exynos_drm_buf_destroy(struct drm_device *dev,
+-		struct exynos_drm_gem_buf *buffer)
++void exynos_drm_fini_buf(struct drm_device *dev,
++				struct exynos_drm_gem_buf *buffer)
+ {
+ 	DRM_DEBUG_KMS("%s.\n", __FILE__);
+ 
+@@ -101,12 +187,27 @@ void exynos_drm_buf_destroy(struct drm_device *dev,
+ 		return;
+ 	}
+ 
+-	lowlevel_buffer_deallocate(dev, buffer);
+-
+ 	kfree(buffer);
+ 	buffer = NULL;
+ }
+ 
+-MODULE_AUTHOR("Inki Dae <inki.dae at samsung.com>");
+-MODULE_DESCRIPTION("Samsung SoC DRM Buffer Management Module");
+-MODULE_LICENSE("GPL");
++int exynos_drm_alloc_buf(struct drm_device *dev,
++		struct exynos_drm_gem_buf *buf, unsigned int flags)
++{
++
++	/*
++	 * allocate memory region and set the memory information
++	 * to vaddr and dma_addr of a buffer object.
++	 */
++	if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
++		return -ENOMEM;
++
++	return 0;
++}
++
++void exynos_drm_free_buf(struct drm_device *dev,
++		unsigned int flags, struct exynos_drm_gem_buf *buffer)
++{
++
++	lowlevel_buffer_deallocate(dev, flags, buffer);
++}
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
+index 6e91f9c..3388e4e 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
++++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
+@@ -26,15 +26,22 @@
+ #ifndef _EXYNOS_DRM_BUF_H_
+ #define _EXYNOS_DRM_BUF_H_
+ 
+-/* allocate physical memory. */
+-struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
+-		unsigned int size);
++/* create and initialize buffer object. */
++struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
++						unsigned int size);
+ 
+-/* get memory information of a drm framebuffer. */
+-struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
++/* destroy buffer object. */
++void exynos_drm_fini_buf(struct drm_device *dev,
++				struct exynos_drm_gem_buf *buffer);
+ 
+-/* remove allocated physical memory. */
+-void exynos_drm_buf_destroy(struct drm_device *dev,
+-		struct exynos_drm_gem_buf *buffer);
++/* allocate physical memory region and setup sgt and pages. */
++int exynos_drm_alloc_buf(struct drm_device *dev,
++				struct exynos_drm_gem_buf *buf,
++				unsigned int flags);
++
++/* release physical memory region, sgt and pages. */
++void exynos_drm_free_buf(struct drm_device *dev,
++				unsigned int flags,
++				struct exynos_drm_gem_buf *buffer);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
+index d620b07..bf791fa 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
+@@ -28,6 +28,7 @@
+ #include "drmP.h"
+ #include "drm_crtc_helper.h"
+ 
++#include <drm/exynos_drm.h>
+ #include "exynos_drm_drv.h"
+ #include "exynos_drm_encoder.h"
+ 
+@@ -44,22 +45,25 @@ struct exynos_drm_connector {
+ /* convert exynos_video_timings to drm_display_mode */
+ static inline void
+ convert_to_display_mode(struct drm_display_mode *mode,
+-			struct fb_videomode *timing)
++			struct exynos_drm_panel_info *panel)
+ {
++	struct fb_videomode *timing = &panel->timing;
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+ 	mode->clock = timing->pixclock / 1000;
+ 	mode->vrefresh = timing->refresh;
+ 
+ 	mode->hdisplay = timing->xres;
+-	mode->hsync_start = mode->hdisplay + timing->left_margin;
++	mode->hsync_start = mode->hdisplay + timing->right_margin;
+ 	mode->hsync_end = mode->hsync_start + timing->hsync_len;
+-	mode->htotal = mode->hsync_end + timing->right_margin;
++	mode->htotal = mode->hsync_end + timing->left_margin;
+ 
+ 	mode->vdisplay = timing->yres;
+-	mode->vsync_start = mode->vdisplay + timing->upper_margin;
++	mode->vsync_start = mode->vdisplay + timing->lower_margin;
+ 	mode->vsync_end = mode->vsync_start + timing->vsync_len;
+-	mode->vtotal = mode->vsync_end + timing->lower_margin;
++	mode->vtotal = mode->vsync_end + timing->upper_margin;
++	mode->width_mm = panel->width_mm;
++	mode->height_mm = panel->height_mm;
+ 
+ 	if (timing->vmode & FB_VMODE_INTERLACED)
+ 		mode->flags |= DRM_MODE_FLAG_INTERLACE;
+@@ -81,14 +85,14 @@ convert_to_video_timing(struct fb_videomode *timing,
+ 	timing->refresh = drm_mode_vrefresh(mode);
+ 
+ 	timing->xres = mode->hdisplay;
+-	timing->left_margin = mode->hsync_start - mode->hdisplay;
++	timing->right_margin = mode->hsync_start - mode->hdisplay;
+ 	timing->hsync_len = mode->hsync_end - mode->hsync_start;
+-	timing->right_margin = mode->htotal - mode->hsync_end;
++	timing->left_margin = mode->htotal - mode->hsync_end;
+ 
+ 	timing->yres = mode->vdisplay;
+-	timing->upper_margin = mode->vsync_start - mode->vdisplay;
++	timing->lower_margin = mode->vsync_start - mode->vdisplay;
+ 	timing->vsync_len = mode->vsync_end - mode->vsync_start;
+-	timing->lower_margin = mode->vtotal - mode->vsync_end;
++	timing->upper_margin = mode->vtotal - mode->vsync_end;
+ 
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ 		timing->vmode = FB_VMODE_INTERLACED;
+@@ -148,16 +152,18 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
+ 		connector->display_info.raw_edid = edid;
+ 	} else {
+ 		struct drm_display_mode *mode = drm_mode_create(connector->dev);
+-		struct fb_videomode *timing;
++		struct exynos_drm_panel_info *panel;
+ 
+-		if (display_ops->get_timing)
+-			timing = display_ops->get_timing(manager->dev);
++		if (display_ops->get_panel)
++			panel = display_ops->get_panel(manager->dev);
+ 		else {
+ 			drm_mode_destroy(connector->dev, mode);
+ 			return 0;
+ 		}
+ 
+-		convert_to_display_mode(mode, timing);
++		convert_to_display_mode(mode, panel);
++		connector->display_info.width_mm = mode->width_mm;
++		connector->display_info.height_mm = mode->height_mm;
+ 
+ 		mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ 		drm_mode_set_name(mode);
+@@ -219,6 +225,29 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
+ 	.best_encoder	= exynos_drm_best_encoder,
+ };
+ 
++static int exynos_drm_connector_fill_modes(struct drm_connector *connector,
++				unsigned int max_width, unsigned int max_height)
++{
++	struct exynos_drm_connector *exynos_connector =
++					to_exynos_connector(connector);
++	struct exynos_drm_manager *manager = exynos_connector->manager;
++	struct exynos_drm_manager_ops *ops = manager->ops;
++	unsigned int width, height;
++
++	width = max_width;
++	height = max_height;
++
++	/*
++	 * if specific driver want to find desired_mode using maxmum
++	 * resolution then get max width and height from that driver.
++	 */
++	if (ops && ops->get_max_resol)
++		ops->get_max_resol(manager->dev, &width, &height);
++
++	return drm_helper_probe_single_connector_modes(connector, width,
++							height);
++}
++
+ /* get detection status of display device. */
+ static enum drm_connector_status
+ exynos_drm_connector_detect(struct drm_connector *connector, bool force)
+@@ -256,7 +285,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector)
+ 
+ static struct drm_connector_funcs exynos_connector_funcs = {
+ 	.dpms		= drm_helper_connector_dpms,
+-	.fill_modes	= drm_helper_probe_single_connector_modes,
++	.fill_modes	= exynos_drm_connector_fill_modes,
+ 	.detect		= exynos_drm_connector_detect,
+ 	.destroy	= exynos_drm_connector_destroy,
+ };
+@@ -286,6 +315,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
+ 		connector->interlace_allowed = true;
+ 		connector->polled = DRM_CONNECTOR_POLL_HPD;
+ 		break;
++	case EXYNOS_DISPLAY_TYPE_VIDI:
++		type = DRM_MODE_CONNECTOR_VIRTUAL;
++		connector->polled = DRM_CONNECTOR_POLL_HPD;
++		break;
+ 	default:
+ 		type = DRM_MODE_CONNECTOR_Unknown;
+ 		break;
+@@ -319,9 +352,3 @@ err_connector:
+ 	kfree(exynos_connector);
+ 	return NULL;
+ }
+-
+-MODULE_AUTHOR("Inki Dae <inki.dae at samsung.com>");
+-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim at samsung.com>");
+-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim at samsung.com>");
+-MODULE_DESCRIPTION("Samsung SoC DRM Connector Driver");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
+index 661a035..eaf630d 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
+@@ -32,7 +32,6 @@
+ #include "exynos_drm_connector.h"
+ #include "exynos_drm_fbdev.h"
+ 
+-static DEFINE_MUTEX(exynos_drm_mutex);
+ static LIST_HEAD(exynos_drm_subdrv_list);
+ static struct drm_device *drm_dev;
+ 
+@@ -55,13 +54,18 @@ static int exynos_drm_subdrv_probe(struct drm_device *dev,
+ 		 *
+ 		 * P.S. note that this driver is considered for modularization.
+ 		 */
+-		ret = subdrv->probe(dev, subdrv->manager.dev);
++		ret = subdrv->probe(dev, subdrv->dev);
+ 		if (ret)
+ 			return ret;
+ 	}
+ 
++	if (!subdrv->manager)
++		return 0;
++
++	subdrv->manager->dev = subdrv->dev;
++
+ 	/* create and initialize a encoder for this sub driver. */
+-	encoder = exynos_drm_encoder_create(dev, &subdrv->manager,
++	encoder = exynos_drm_encoder_create(dev, subdrv->manager,
+ 			(1 << MAX_CRTC) - 1);
+ 	if (!encoder) {
+ 		DRM_ERROR("failed to create encoder\n");
+@@ -116,13 +120,10 @@ int exynos_drm_device_register(struct drm_device *dev)
+ 	if (!dev)
+ 		return -EINVAL;
+ 
+-	if (drm_dev) {
+-		DRM_ERROR("Already drm device were registered\n");
+-		return -EBUSY;
+-	}
++	drm_dev = dev;
+ 
+-	mutex_lock(&exynos_drm_mutex);
+ 	list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
++		subdrv->drm_dev = dev;
+ 		err = exynos_drm_subdrv_probe(dev, subdrv);
+ 		if (err) {
+ 			DRM_DEBUG("exynos drm subdrv probe failed.\n");
+@@ -130,9 +131,6 @@ int exynos_drm_device_register(struct drm_device *dev)
+ 		}
+ 	}
+ 
+-	drm_dev = dev;
+-	mutex_unlock(&exynos_drm_mutex);
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(exynos_drm_device_register);
+@@ -143,83 +141,28 @@ int exynos_drm_device_unregister(struct drm_device *dev)
+ 
+ 	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ 
+-	if (!dev || dev != drm_dev) {
++	if (!dev) {
+ 		WARN(1, "Unexpected drm device unregister!\n");
+ 		return -EINVAL;
+ 	}
+ 
+-	mutex_lock(&exynos_drm_mutex);
+ 	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list)
+ 		exynos_drm_subdrv_remove(dev, subdrv);
+ 
+ 	drm_dev = NULL;
+-	mutex_unlock(&exynos_drm_mutex);
+ 
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
+ 
+-static int exynos_drm_mode_group_reinit(struct drm_device *dev)
+-{
+-	struct drm_mode_group *group = &dev->primary->mode_group;
+-	uint32_t *id_list = group->id_list;
+-	int ret;
+-
+-	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+-
+-	ret = drm_mode_group_init_legacy_group(dev, group);
+-	if (ret < 0)
+-		return ret;
+-
+-	kfree(id_list);
+-	return 0;
+-}
+-
+ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
+ {
+-	int err;
+-
+ 	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ 
+ 	if (!subdrv)
+ 		return -EINVAL;
+ 
+-	mutex_lock(&exynos_drm_mutex);
+-	if (drm_dev) {
+-		err = exynos_drm_subdrv_probe(drm_dev, subdrv);
+-		if (err) {
+-			DRM_ERROR("failed to probe exynos drm subdrv\n");
+-			mutex_unlock(&exynos_drm_mutex);
+-			return err;
+-		}
+-
+-		/*
+-		 * if any specific driver such as fimd or hdmi driver called
+-		 * exynos_drm_subdrv_register() later than drm_load(),
+-		 * the fb helper should be re-initialized and re-configured.
+-		 */
+-		err = exynos_drm_fbdev_reinit(drm_dev);
+-		if (err) {
+-			DRM_ERROR("failed to reinitialize exynos drm fbdev\n");
+-			exynos_drm_subdrv_remove(drm_dev, subdrv);
+-			mutex_unlock(&exynos_drm_mutex);
+-			return err;
+-		}
+-
+-		err = exynos_drm_mode_group_reinit(drm_dev);
+-		if (err) {
+-			DRM_ERROR("failed to reinitialize mode group\n");
+-			exynos_drm_fbdev_fini(drm_dev);
+-			exynos_drm_subdrv_remove(drm_dev, subdrv);
+-			mutex_unlock(&exynos_drm_mutex);
+-			return err;
+-		}
+-	}
+-
+-	subdrv->drm_dev = drm_dev;
+-
+ 	list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
+-	mutex_unlock(&exynos_drm_mutex);
+ 
+ 	return 0;
+ }
+@@ -227,46 +170,48 @@ EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
+ 
+ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
+ {
+-	int ret = -EFAULT;
+-
+ 	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ 
+-	if (!subdrv) {
+-		DRM_DEBUG("Unexpected exynos drm subdrv unregister!\n");
+-		return ret;
+-	}
++	if (!subdrv)
++		return -EINVAL;
+ 
+-	mutex_lock(&exynos_drm_mutex);
+-	if (drm_dev) {
+-		exynos_drm_subdrv_remove(drm_dev, subdrv);
+-		list_del(&subdrv->list);
++	list_del(&subdrv->list);
+ 
+-		/*
+-		 * fb helper should be updated once a sub driver is released
+-		 * to re-configure crtc and connector and also to re-setup
+-		 * drm framebuffer.
+-		 */
+-		ret = exynos_drm_fbdev_reinit(drm_dev);
+-		if (ret < 0) {
+-			DRM_ERROR("failed fb helper reinit.\n");
+-			goto fail;
+-		}
++	return 0;
++}
++EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
+ 
+-		ret = exynos_drm_mode_group_reinit(drm_dev);
+-		if (ret < 0) {
+-			DRM_ERROR("failed drm mode group reinit.\n");
+-			goto fail;
++int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
++{
++	struct exynos_drm_subdrv *subdrv;
++	int ret;
++
++	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
++		if (subdrv->open) {
++			ret = subdrv->open(dev, subdrv->dev, file);
++			if (ret)
++				goto err;
+ 		}
+ 	}
+ 
+-fail:
+-	mutex_unlock(&exynos_drm_mutex);
++	return 0;
++
++err:
++	list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
++		if (subdrv->close)
++			subdrv->close(dev, subdrv->dev, file);
++	}
+ 	return ret;
+ }
+-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
++EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
++
++void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
++{
++	struct exynos_drm_subdrv *subdrv;
+ 
+-MODULE_AUTHOR("Inki Dae <inki.dae at samsung.com>");
+-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim at samsung.com>");
+-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim at samsung.com>");
+-MODULE_DESCRIPTION("Samsung SoC DRM Core Driver");
+-MODULE_LICENSE("GPL");
++	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
++		if (subdrv->close)
++			subdrv->close(dev, subdrv->dev, file);
++	}
++}
++EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+index ee43cc2..3486ffe 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+@@ -34,7 +34,6 @@
+ #include "exynos_drm_fb.h"
+ #include "exynos_drm_encoder.h"
+ #include "exynos_drm_gem.h"
+-#include "exynos_drm_buf.h"
+ 
+ #define to_exynos_crtc(x)	container_of(x, struct exynos_drm_crtc,\
+ 				drm_crtc)
+@@ -52,11 +51,13 @@
+  *	drm framework doesn't support multiple irq yet.
+  *	we can refer to the crtc to current hardware interrupt occured through
+  *	this pipe value.
++ * @dpms: store the crtc dpms value
+  */
+ struct exynos_drm_crtc {
+ 	struct drm_crtc			drm_crtc;
+ 	struct exynos_drm_overlay	overlay;
+ 	unsigned int			pipe;
++	unsigned int			dpms;
+ };
+ 
+ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
+@@ -78,19 +79,23 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ 	struct exynos_drm_gem_buf *buffer;
+ 	unsigned int actual_w;
+ 	unsigned int actual_h;
++	int nr = exynos_drm_format_num_buffers(fb->pixel_format);
++	int i;
++
++	for (i = 0; i < nr; i++) {
++		buffer = exynos_drm_fb_buffer(fb, i);
++		if (!buffer) {
++			DRM_LOG_KMS("buffer is null\n");
++			return -EFAULT;
++		}
+ 
+-	buffer = exynos_drm_fb_get_buf(fb);
+-	if (!buffer) {
+-		DRM_LOG_KMS("buffer is null.\n");
+-		return -EFAULT;
+-	}
+-
+-	overlay->dma_addr = buffer->dma_addr;
+-	overlay->vaddr = buffer->kvaddr;
++		overlay->dma_addr[i] = buffer->dma_addr;
++		overlay->vaddr[i] = buffer->kvaddr;
+ 
+-	DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
+-			(unsigned long)overlay->vaddr,
+-			(unsigned long)overlay->dma_addr);
++		DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
++				i, (unsigned long)overlay->vaddr[i],
++				(unsigned long)overlay->dma_addr[i]);
++	}
+ 
+ 	actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
+ 	actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
+@@ -101,7 +106,8 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ 	overlay->fb_width = fb->width;
+ 	overlay->fb_height = fb->height;
+ 	overlay->bpp = fb->bits_per_pixel;
+-	overlay->pitch = fb->pitch;
++	overlay->pitch = fb->pitches[0];
++	overlay->pixel_format = fb->pixel_format;
+ 
+ 	/* set overlay range to be displayed. */
+ 	overlay->crtc_x = pos->crtc_x;
+@@ -153,26 +159,37 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
+ 
+ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+ {
++	struct drm_device *dev = crtc->dev;
+ 	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ 
+ 	DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+ 
++	if (exynos_crtc->dpms == mode) {
++		DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
++		return;
++	}
++
++	mutex_lock(&dev->struct_mutex);
++
+ 	switch (mode) {
+ 	case DRM_MODE_DPMS_ON:
+-		exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+-				exynos_drm_encoder_crtc_commit);
++		exynos_drm_fn_encoder(crtc, &mode,
++				exynos_drm_encoder_crtc_dpms);
++		exynos_crtc->dpms = mode;
+ 		break;
+ 	case DRM_MODE_DPMS_STANDBY:
+ 	case DRM_MODE_DPMS_SUSPEND:
+ 	case DRM_MODE_DPMS_OFF:
+-		/* TODO */
+-		exynos_drm_fn_encoder(crtc, NULL,
+-				exynos_drm_encoder_crtc_disable);
++		exynos_drm_fn_encoder(crtc, &mode,
++				exynos_drm_encoder_crtc_dpms);
++		exynos_crtc->dpms = mode;
+ 		break;
+ 	default:
+-		DRM_DEBUG_KMS("unspecified mode %d\n", mode);
++		DRM_ERROR("unspecified mode %d\n", mode);
+ 		break;
+ 	}
++
++	mutex_unlock(&dev->struct_mutex);
+ }
+ 
+ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
+@@ -188,6 +205,28 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
++	/*
++	 * when set_crtc is requested from user or at booting time,
++	 * crtc->commit would be called without dpms call so if dpms is
++	 * no power on then crtc->dpms should be called
++	 * with DRM_MODE_DPMS_ON for the hardware power to be on.
++	 */
++	if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
++		int mode = DRM_MODE_DPMS_ON;
++
++		/*
++		 * enable hardware(power on) to all encoders hdmi connected
++		 * to current crtc.
++		 */
++		exynos_drm_crtc_dpms(crtc, mode);
++		/*
++		 * enable dma to all encoders connected to current crtc and
++		 * lcd panel.
++		 */
++		exynos_drm_fn_encoder(crtc, &mode,
++					exynos_drm_encoder_dpms_from_crtc);
++	}
++
+ 	exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ 			exynos_drm_encoder_crtc_commit);
+ }
+@@ -210,7 +249,11 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ {
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	mode = adjusted_mode;
++	/*
++	 * copy the mode data adjusted by mode_fixup() into crtc->mode
++	 * so that hardware can be seet to proper mode.
++	 */
++	memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
+ 
+ 	return exynos_drm_crtc_update(crtc);
+ }
+@@ -268,9 +311,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
+ 		 */
+ 		event->pipe = exynos_crtc->pipe;
+ 
+-		list_add_tail(&event->base.link,
+-				&dev_priv->pageflip_event_list);
+-
+ 		ret = drm_vblank_get(dev, exynos_crtc->pipe);
+ 		if (ret) {
+ 			DRM_DEBUG("failed to acquire vblank counter\n");
+@@ -279,6 +319,9 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
+ 			goto out;
+ 		}
+ 
++		list_add_tail(&event->base.link,
++				&dev_priv->pageflip_event_list);
++
+ 		crtc->fb = fb;
+ 		ret = exynos_drm_crtc_update(crtc);
+ 		if (ret) {
+@@ -344,6 +387,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
+ 	}
+ 
+ 	exynos_crtc->pipe = nr;
++	exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
++	exynos_crtc->overlay.zpos = DEFAULT_ZPOS;
+ 	crtc = &exynos_crtc->drm_crtc;
+ 
+ 	private->crtc[nr] = crtc;
+@@ -357,9 +402,14 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
+ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+ {
+ 	struct exynos_drm_private *private = dev->dev_private;
++	struct exynos_drm_crtc *exynos_crtc =
++		to_exynos_crtc(private->crtc[crtc]);
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
++	if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
++		return -EPERM;
++
+ 	exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
+ 			exynos_drm_enable_vblank);
+ 
+@@ -369,15 +419,14 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+ {
+ 	struct exynos_drm_private *private = dev->dev_private;
++	struct exynos_drm_crtc *exynos_crtc =
++		to_exynos_crtc(private->crtc[crtc]);
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
++	if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
++		return;
++
+ 	exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
+ 			exynos_drm_disable_vblank);
+ }
+-
+-MODULE_AUTHOR("Inki Dae <inki.dae at samsung.com>");
+-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim at samsung.com>");
+-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim at samsung.com>");
+-MODULE_DESCRIPTION("Samsung SoC DRM CRTC Driver");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+index 53e2216..a6819b5 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
+@@ -33,16 +33,21 @@
+ 
+ #include "exynos_drm_drv.h"
+ #include "exynos_drm_crtc.h"
++#include "exynos_drm_encoder.h"
+ #include "exynos_drm_fbdev.h"
+ #include "exynos_drm_fb.h"
+ #include "exynos_drm_gem.h"
++#include "exynos_drm_plane.h"
++#include "exynos_drm_vidi.h"
+ 
+-#define DRIVER_NAME	"exynos-drm"
++#define DRIVER_NAME	"exynos"
+ #define DRIVER_DESC	"Samsung SoC DRM"
+ #define DRIVER_DATE	"20110530"
+ #define DRIVER_MAJOR	1
+ #define DRIVER_MINOR	0
+ 
++#define VBLANK_OFF_DELAY	50000
++
+ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
+ {
+ 	struct exynos_drm_private *private;
+@@ -77,6 +82,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
+ 			goto err_crtc;
+ 	}
+ 
++	for (nr = 0; nr < MAX_PLANE; nr++) {
++		ret = exynos_plane_init(dev, nr);
++		if (ret)
++			goto err_crtc;
++	}
++
+ 	ret = drm_vblank_init(dev, MAX_CRTC);
+ 	if (ret)
+ 		goto err_crtc;
+@@ -90,6 +101,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
+ 	if (ret)
+ 		goto err_vblank;
+ 
++	/* setup possible_clones. */
++	exynos_drm_encoder_setup(dev);
++
+ 	/*
+ 	 * create and configure fb helper and also exynos specific
+ 	 * fbdev object.
+@@ -100,6 +114,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
+ 		goto err_drm_device;
+ 	}
+ 
++	drm_vblank_offdelay = VBLANK_OFF_DELAY;
++
+ 	return 0;
+ 
+ err_drm_device:
+@@ -129,17 +145,45 @@ static int exynos_drm_unload(struct drm_device *dev)
+ 	return 0;
+ }
+ 
++static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
++{
++	DRM_DEBUG_DRIVER("%s\n", __FILE__);
++
++	return exynos_drm_subdrv_open(dev, file);
++}
++
+ static void exynos_drm_preclose(struct drm_device *dev,
+-					struct drm_file *file_priv)
++					struct drm_file *file)
+ {
+-	struct exynos_drm_private *dev_priv = dev->dev_private;
++	struct exynos_drm_private *private = dev->dev_private;
++	struct drm_pending_vblank_event *e, *t;
++	unsigned long flags;
+ 
+-	/*
+-	 * drm framework frees all events at release time,
+-	 * so private event list should be cleared.
+-	 */
+-	if (!list_empty(&dev_priv->pageflip_event_list))
+-		INIT_LIST_HEAD(&dev_priv->pageflip_event_list);
++	DRM_DEBUG_DRIVER("%s\n", __FILE__);
++
++	/* release events of current file */
++	spin_lock_irqsave(&dev->event_lock, flags);
++	list_for_each_entry_safe(e, t, &private->pageflip_event_list,
++			base.link) {
++		if (e->base.file_priv == file) {
++			list_del(&e->base.link);
++			e->base.destroy(&e->base);
++		}
++	}
++	spin_unlock_irqrestore(&dev->event_lock, flags);
++
++	exynos_drm_subdrv_close(dev, file);
++}
++
++static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
++{
++	DRM_DEBUG_DRIVER("%s\n", __FILE__);
++
++	if (!file->driver_priv)
++		return;
++
++	kfree(file->driver_priv);
++	file->driver_priv = NULL;
+ }
+ 
+ static void exynos_drm_lastclose(struct drm_device *dev)
+@@ -163,6 +207,20 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
+ 			DRM_AUTH),
+ 	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
+ 			exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
++	DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
++			DRM_UNLOCKED | DRM_AUTH),
++	DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
++			vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
++};
++
++static const struct file_operations exynos_drm_driver_fops = {
++	.owner		= THIS_MODULE,
++	.open		= drm_open,
++	.mmap		= exynos_drm_gem_mmap,
++	.poll		= drm_poll,
++	.read		= drm_read,
++	.unlocked_ioctl	= drm_ioctl,
++	.release	= drm_release,
+ };
+ 
+ static struct drm_driver exynos_drm_driver = {
+@@ -170,8 +228,10 @@ static struct drm_driver exynos_drm_driver = {
+ 				  DRIVER_MODESET | DRIVER_GEM,
+ 	.load			= exynos_drm_load,
+ 	.unload			= exynos_drm_unload,
++	.open			= exynos_drm_open,
+ 	.preclose		= exynos_drm_preclose,
+ 	.lastclose		= exynos_drm_lastclose,
++	.postclose		= exynos_drm_postclose,
+ 	.get_vblank_counter	= drm_vblank_count,
+ 	.enable_vblank		= exynos_drm_crtc_enable_vblank,
+ 	.disable_vblank		= exynos_drm_crtc_disable_vblank,
+@@ -182,15 +242,7 @@ static struct drm_driver exynos_drm_driver = {
+ 	.dumb_map_offset	= exynos_drm_gem_dumb_map_offset,
+ 	.dumb_destroy		= exynos_drm_gem_dumb_destroy,
+ 	.ioctls			= exynos_ioctls,
+-	.fops = {
+-		.owner		= THIS_MODULE,
+-		.open		= drm_open,
+-		.mmap		= exynos_drm_gem_mmap,
+-		.poll		= drm_poll,
+-		.read		= drm_read,
+-		.unlocked_ioctl	= drm_ioctl,
+-		.release	= drm_release,
+-	},
++	.fops			= &exynos_drm_driver_fops,
+ 	.name	= DRIVER_NAME,
+ 	.desc	= DRIVER_DESC,
+ 	.date	= DRIVER_DATE,
+@@ -221,15 +273,66 @@ static struct platform_driver exynos_drm_platform_driver = {
+ 	.remove		= __devexit_p(exynos_drm_platform_remove),
+ 	.driver		= {
+ 		.owner	= THIS_MODULE,
+-		.name	= DRIVER_NAME,
++		.name	= "exynos-drm",
+ 	},
+ };
+ 
+ static int __init exynos_drm_init(void)
+ {
++	int ret;
++
+ 	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ 
+-	return platform_driver_register(&exynos_drm_platform_driver);
++#ifdef CONFIG_DRM_EXYNOS_FIMD
++	ret = platform_driver_register(&fimd_driver);
++	if (ret < 0)
++		goto out_fimd;
++#endif
++
++#ifdef CONFIG_DRM_EXYNOS_HDMI
++	ret = platform_driver_register(&hdmi_driver);
++	if (ret < 0)
++		goto out_hdmi;
++	ret = platform_driver_register(&mixer_driver);
++	if (ret < 0)
++		goto out_mixer;
++	ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
++	if (ret < 0)
++		goto out_common_hdmi;
++#endif
++
++#ifdef CONFIG_DRM_EXYNOS_VIDI
++	ret = platform_driver_register(&vidi_driver);
++	if (ret < 0)
++		goto out_vidi;
++#endif
++
++	ret = platform_driver_register(&exynos_drm_platform_driver);
++	if (ret < 0)
++		goto out;
++
++	return 0;
++
++out:
++#ifdef CONFIG_DRM_EXYNOS_VIDI
++out_vidi:
++	platform_driver_unregister(&vidi_driver);
++#endif
++
++#ifdef CONFIG_DRM_EXYNOS_HDMI
++	platform_driver_unregister(&exynos_drm_common_hdmi_driver);
++out_common_hdmi:
++	platform_driver_unregister(&mixer_driver);
++out_mixer:
++	platform_driver_unregister(&hdmi_driver);
++out_hdmi:
++#endif
++
++#ifdef CONFIG_DRM_EXYNOS_FIMD
++	platform_driver_unregister(&fimd_driver);
++out_fimd:
++#endif
++	return ret;
+ }
+ 
+ static void __exit exynos_drm_exit(void)
+@@ -237,6 +340,20 @@ static void __exit exynos_drm_exit(void)
+ 	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ 
+ 	platform_driver_unregister(&exynos_drm_platform_driver);
++
++#ifdef CONFIG_DRM_EXYNOS_HDMI
++	platform_driver_unregister(&exynos_drm_common_hdmi_driver);
++	platform_driver_unregister(&mixer_driver);
++	platform_driver_unregister(&hdmi_driver);
++#endif
++
++#ifdef CONFIG_DRM_EXYNOS_VIDI
++	platform_driver_unregister(&vidi_driver);
++#endif
++
++#ifdef CONFIG_DRM_EXYNOS_FIMD
++	platform_driver_unregister(&fimd_driver);
++#endif
+ }
+ 
+ module_init(exynos_drm_init);
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
+index 5e02e6e..1d81417 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
++++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
+@@ -32,12 +32,17 @@
+ #include <linux/module.h>
+ #include "drm.h"
+ 
+-#define MAX_CRTC	2
++#define MAX_CRTC	3
++#define MAX_PLANE	5
++#define MAX_FB_BUFFER	4
++#define DEFAULT_ZPOS	-1
+ 
+ struct drm_device;
+ struct exynos_drm_overlay;
+ struct drm_connector;
+ 
++extern unsigned int drm_vblank_offdelay;
++
+ /* this enumerates display type. */
+ enum exynos_drm_output_type {
+ 	EXYNOS_DISPLAY_TYPE_NONE,
+@@ -45,6 +50,8 @@ enum exynos_drm_output_type {
+ 	EXYNOS_DISPLAY_TYPE_LCD,
+ 	/* HDMI Interface. */
+ 	EXYNOS_DISPLAY_TYPE_HDMI,
++	/* Virtual Display Interface. */
++	EXYNOS_DISPLAY_TYPE_VIDI,
+ };
+ 
+ /*
+@@ -57,8 +64,8 @@ enum exynos_drm_output_type {
+ struct exynos_drm_overlay_ops {
+ 	void (*mode_set)(struct device *subdrv_dev,
+ 			 struct exynos_drm_overlay *overlay);
+-	void (*commit)(struct device *subdrv_dev);
+-	void (*disable)(struct device *subdrv_dev);
++	void (*commit)(struct device *subdrv_dev, int zpos);
++	void (*disable)(struct device *subdrv_dev, int zpos);
+ };
+ 
+ /*
+@@ -80,9 +87,11 @@ struct exynos_drm_overlay_ops {
+  * @scan_flag: interlace or progressive way.
+  *	(it could be DRM_MODE_FLAG_*)
+  * @bpp: pixel size.(in bit)
+- * @dma_addr: bus(accessed by dma) address to the memory region allocated
+- *	for a overlay.
+- * @vaddr: virtual memory addresss to this overlay.
++ * @pixel_format: fourcc pixel format of this overlay
++ * @dma_addr: array of bus(accessed by dma) address to the memory region
++ *	      allocated for a overlay.
++ * @vaddr: array of virtual memory addresss to this overlay.
++ * @zpos: order of overlay layer(z position).
+  * @default_win: a window to be enabled.
+  * @color_key: color key on or off.
+  * @index_color: if using color key feature then this value would be used
+@@ -109,8 +118,10 @@ struct exynos_drm_overlay {
+ 	unsigned int scan_flag;
+ 	unsigned int bpp;
+ 	unsigned int pitch;
+-	dma_addr_t dma_addr;
+-	void __iomem *vaddr;
++	uint32_t pixel_format;
++	dma_addr_t dma_addr[MAX_FB_BUFFER];
++	void __iomem *vaddr[MAX_FB_BUFFER];
++	int zpos;
+ 
+ 	bool default_win;
+ 	bool color_key;
+@@ -127,7 +138,7 @@ struct exynos_drm_overlay {
+  * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
+  * @is_connected: check for that display is connected or not.
+  * @get_edid: get edid modes from display driver.
+- * @get_timing: get timing object from display driver.
++ * @get_panel: get panel object from display driver.
+  * @check_timing: check if timing is valid or not.
+  * @power_on: display device on or off.
+  */
+@@ -136,7 +147,7 @@ struct exynos_drm_display_ops {
+ 	bool (*is_connected)(struct device *dev);
+ 	int (*get_edid)(struct device *dev, struct drm_connector *connector,
+ 				u8 *edid, int len);
+-	void *(*get_timing)(struct device *dev);
++	void *(*get_panel)(struct device *dev);
+ 	int (*check_timing)(struct device *dev, void *timing);
+ 	int (*power_on)(struct device *dev, int mode);
+ };
+@@ -144,17 +155,27 @@ struct exynos_drm_display_ops {
+ /*
+  * Exynos drm manager ops
+  *
++ * @dpms: control device power.
++ * @apply: set timing, vblank and overlay data to registers.
++ * @mode_fixup: fix mode data comparing to hw specific display mode.
+  * @mode_set: convert drm_display_mode to hw specific display mode and
+  *	      would be called by encoder->mode_set().
++ * @get_max_resol: get maximum resolution to specific hardware.
+  * @commit: set current hw specific display mode to hw.
+- * @disable: disable hardware specific display mode.
+  * @enable_vblank: specific driver callback for enabling vblank interrupt.
+  * @disable_vblank: specific driver callback for disabling vblank interrupt.
+  */
+ struct exynos_drm_manager_ops {
++	void (*dpms)(struct device *subdrv_dev, int mode);
++	void (*apply)(struct device *subdrv_dev);
++	void (*mode_fixup)(struct device *subdrv_dev,
++				struct drm_connector *connector,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode);
+ 	void (*mode_set)(struct device *subdrv_dev, void *mode);
++	void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width,
++				unsigned int *height);
+ 	void (*commit)(struct device *subdrv_dev);
+-	void (*disable)(struct device *subdrv_dev);
+ 	int (*enable_vblank)(struct device *subdrv_dev);
+ 	void (*disable_vblank)(struct device *subdrv_dev);
+ };
+@@ -204,25 +225,33 @@ struct exynos_drm_private {
+  * Exynos drm sub driver structure.
+  *
+  * @list: sub driver has its own list object to register to exynos drm driver.
++ * @dev: pointer to device object for subdrv device driver.
+  * @drm_dev: pointer to drm_device and this pointer would be set
+  *	when sub driver calls exynos_drm_subdrv_register().
++ * @manager: subdrv has its own manager to control a hardware appropriately
++ *	and we can access a hardware drawing on this manager.
+  * @probe: this callback would be called by exynos drm driver after
+  *	subdrv is registered to it.
+  * @remove: this callback is used to release resources created
+  *	by probe callback.
+- * @manager: subdrv has its own manager to control a hardware appropriately
+- *	and we can access a hardware drawing on this manager.
++ * @open: this would be called with drm device file open.
++ * @close: this would be called with drm device file close.
+  * @encoder: encoder object owned by this sub driver.
+  * @connector: connector object owned by this sub driver.
+  */
+ struct exynos_drm_subdrv {
+ 	struct list_head list;
++	struct device *dev;
+ 	struct drm_device *drm_dev;
++	struct exynos_drm_manager *manager;
+ 
+ 	int (*probe)(struct drm_device *drm_dev, struct device *dev);
+ 	void (*remove)(struct drm_device *dev);
++	int (*open)(struct drm_device *drm_dev, struct device *dev,
++			struct drm_file *file);
++	void (*close)(struct drm_device *drm_dev, struct device *dev,
++			struct drm_file *file);
+ 
+-	struct exynos_drm_manager manager;
+ 	struct drm_encoder *encoder;
+ 	struct drm_connector *connector;
+ };
+@@ -243,15 +272,19 @@ int exynos_drm_device_unregister(struct drm_device *dev);
+  * this function would be called by sub drivers such as display controller
+  * or hdmi driver to register this sub driver object to exynos drm driver
+  * and when a sub driver is registered to exynos drm driver a probe callback
+- * of the sub driver is called and creates its own encoder and connector
+- * and then fb helper and drm mode group would be re-initialized.
++ * of the sub driver is called and creates its own encoder and connector.
+  */
+ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
+ 
+-/*
+- * this function removes subdrv list from exynos drm driver and fb helper
+- * and drm mode group would be re-initialized.
+- */
++/* this function removes subdrv list from exynos drm driver */
+ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
+ 
++int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
++void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
++
++extern struct platform_driver fimd_driver;
++extern struct platform_driver hdmi_driver;
++extern struct platform_driver mixer_driver;
++extern struct platform_driver exynos_drm_common_hdmi_driver;
++extern struct platform_driver vidi_driver;
+ #endif
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+index 1530614..6e9ac7b 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+@@ -42,49 +42,68 @@
+  * @drm_encoder: encoder object.
+  * @manager: specific encoder has its own manager to control a hardware
+  *	appropriately and we can access a hardware drawing on this manager.
++ * @dpms: store the encoder dpms value.
+  */
+ struct exynos_drm_encoder {
+ 	struct drm_encoder		drm_encoder;
+ 	struct exynos_drm_manager	*manager;
++	int dpms;
+ };
+ 
+-static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
++static void exynos_drm_display_power(struct drm_encoder *encoder, int mode)
+ {
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_connector *connector;
+ 	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		if (connector->encoder == encoder) {
++			struct exynos_drm_display_ops *display_ops =
++							manager->display_ops;
++
++			DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
++					connector->base.id, mode);
++			if (display_ops && display_ops->power_on)
++				display_ops->power_on(manager->dev, mode);
++		}
++	}
++}
++
++static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ 	struct exynos_drm_manager_ops *manager_ops = manager->ops;
++	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ 
+ 	DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+ 
++	if (exynos_encoder->dpms == mode) {
++		DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
++		return;
++	}
++
++	mutex_lock(&dev->struct_mutex);
++
+ 	switch (mode) {
+ 	case DRM_MODE_DPMS_ON:
+-		if (manager_ops && manager_ops->commit)
+-			manager_ops->commit(manager->dev);
++		if (manager_ops && manager_ops->apply)
++			manager_ops->apply(manager->dev);
++		exynos_drm_display_power(encoder, mode);
++		exynos_encoder->dpms = mode;
+ 		break;
+ 	case DRM_MODE_DPMS_STANDBY:
+ 	case DRM_MODE_DPMS_SUSPEND:
+ 	case DRM_MODE_DPMS_OFF:
+-		/* TODO */
+-		if (manager_ops && manager_ops->disable)
+-			manager_ops->disable(manager->dev);
++		exynos_drm_display_power(encoder, mode);
++		exynos_encoder->dpms = mode;
+ 		break;
+ 	default:
+ 		DRM_ERROR("unspecified mode %d\n", mode);
+ 		break;
+ 	}
+ 
+-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+-		if (connector->encoder == encoder) {
+-			struct exynos_drm_display_ops *display_ops =
+-							manager->display_ops;
+-
+-			DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+-					connector->base.id, mode);
+-			if (display_ops && display_ops->power_on)
+-				display_ops->power_on(manager->dev, mode);
+-		}
+-	}
++	mutex_unlock(&dev->struct_mutex);
+ }
+ 
+ static bool
+@@ -92,9 +111,19 @@ exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ 			       struct drm_display_mode *mode,
+ 			       struct drm_display_mode *adjusted_mode)
+ {
++	struct drm_device *dev = encoder->dev;
++	struct drm_connector *connector;
++	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
++	struct exynos_drm_manager_ops *manager_ops = manager->ops;
++
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	/* drm framework doesn't check NULL. */
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		if (connector->encoder == encoder)
++			if (manager_ops && manager_ops->mode_fixup)
++				manager_ops->mode_fixup(manager->dev, connector,
++							mode, adjusted_mode);
++	}
+ 
+ 	return true;
+ }
+@@ -113,12 +142,11 @@ static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	mode = adjusted_mode;
+-
+ 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ 		if (connector->encoder == encoder) {
+ 			if (manager_ops && manager_ops->mode_set)
+-				manager_ops->mode_set(manager->dev, mode);
++				manager_ops->mode_set(manager->dev,
++							adjusted_mode);
+ 
+ 			if (overlay_ops && overlay_ops->mode_set)
+ 				overlay_ops->mode_set(manager->dev, overlay);
+@@ -169,7 +197,6 @@ static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
+ 	exynos_encoder->manager->pipe = -1;
+ 
+ 	drm_encoder_cleanup(encoder);
+-	encoder->dev->mode_config.num_encoder--;
+ 	kfree(exynos_encoder);
+ }
+ 
+@@ -177,6 +204,41 @@ static struct drm_encoder_funcs exynos_encoder_funcs = {
+ 	.destroy = exynos_drm_encoder_destroy,
+ };
+ 
++static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
++{
++	struct drm_encoder *clone;
++	struct drm_device *dev = encoder->dev;
++	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
++	struct exynos_drm_display_ops *display_ops =
++				exynos_encoder->manager->display_ops;
++	unsigned int clone_mask = 0;
++	int cnt = 0;
++
++	list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
++		switch (display_ops->type) {
++		case EXYNOS_DISPLAY_TYPE_LCD:
++		case EXYNOS_DISPLAY_TYPE_HDMI:
++		case EXYNOS_DISPLAY_TYPE_VIDI:
++			clone_mask |= (1 << (cnt++));
++			break;
++		default:
++			continue;
++		}
++	}
++
++	return clone_mask;
++}
++
++void exynos_drm_encoder_setup(struct drm_device *dev)
++{
++	struct drm_encoder *encoder;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
++		encoder->possible_clones = exynos_drm_encoder_clones(encoder);
++}
++
+ struct drm_encoder *
+ exynos_drm_encoder_create(struct drm_device *dev,
+ 			   struct exynos_drm_manager *manager,
+@@ -199,6 +261,7 @@ exynos_drm_encoder_create(struct drm_device *dev,
+ 		return NULL;
+ 	}
+ 
++	exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
+ 	exynos_encoder->manager = manager;
+ 	encoder = &exynos_encoder->drm_encoder;
+ 	encoder->possible_crtcs = possible_crtcs;
+@@ -275,12 +338,27 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
+ 		manager_ops->disable_vblank(manager->dev);
+ }
+ 
+-void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
++void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
++					  void *data)
+ {
+ 	struct exynos_drm_manager *manager =
+ 		to_exynos_encoder(encoder)->manager;
+ 	struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
++	int zpos = DEFAULT_ZPOS;
++
++	if (data)
++		zpos = *(int *)data;
++
++	if (overlay_ops && overlay_ops->commit)
++		overlay_ops->commit(manager->dev, zpos);
++}
++
++void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
++{
++	struct exynos_drm_manager *manager =
++		to_exynos_encoder(encoder)->manager;
+ 	int crtc = *(int *)data;
++	int zpos = DEFAULT_ZPOS;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+@@ -290,8 +368,53 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+ 	 */
+ 	manager->pipe = crtc;
+ 
+-	if (overlay_ops && overlay_ops->commit)
+-		overlay_ops->commit(manager->dev);
++	exynos_drm_encoder_crtc_plane_commit(encoder, &zpos);
++}
++
++void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data)
++{
++	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
++	int mode = *(int *)data;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	exynos_drm_encoder_dpms(encoder, mode);
++
++	exynos_encoder->dpms = mode;
++}
++
++void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
++{
++	struct drm_device *dev = encoder->dev;
++	struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
++	struct exynos_drm_manager *manager = exynos_encoder->manager;
++	struct exynos_drm_manager_ops *manager_ops = manager->ops;
++	struct drm_connector *connector;
++	int mode = *(int *)data;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (manager_ops && manager_ops->dpms)
++		manager_ops->dpms(manager->dev, mode);
++
++	/*
++	 * set current dpms mode to the connector connected to
++	 * current encoder. connector->dpms would be checked
++	 * at drm_helper_connector_dpms()
++	 */
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		if (connector->encoder == encoder)
++			connector->dpms = mode;
++
++	/*
++	 * if this condition is ok then it means that the crtc is already
++	 * detached from encoder and last function for detaching is properly
++	 * done, so clear pipe from manager to prevent repeated call.
++	 */
++	if (mode > DRM_MODE_DPMS_ON) {
++		if (!encoder->crtc)
++			manager->pipe = -1;
++	}
+ }
+ 
+ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
+@@ -310,23 +433,13 @@ void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+ 	struct exynos_drm_manager *manager =
+ 		to_exynos_encoder(encoder)->manager;
+ 	struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
++	int zpos = DEFAULT_ZPOS;
+ 
+ 	DRM_DEBUG_KMS("\n");
+ 
+-	if (overlay_ops && overlay_ops->disable)
+-		overlay_ops->disable(manager->dev);
++	if (data)
++		zpos = *(int *)data;
+ 
+-	/*
+-	 * crtc is already detached from encoder and last
+-	 * function for detaching is properly done, so
+-	 * clear pipe from manager to prevent repeated call
+-	 */
+-	if (!encoder->crtc)
+-		manager->pipe = -1;
++	if (overlay_ops && overlay_ops->disable)
++		overlay_ops->disable(manager->dev, zpos);
+ }
+-
+-MODULE_AUTHOR("Inki Dae <inki.dae at samsung.com>");
+-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim at samsung.com>");
+-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim at samsung.com>");
+-MODULE_DESCRIPTION("Samsung SoC DRM Encoder Driver");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+index a22acfb..eb7d231 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
++++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+@@ -30,6 +30,7 @@
+ 
+ struct exynos_drm_manager;
+ 
++void exynos_drm_encoder_setup(struct drm_device *dev);
+ struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
+ 					       struct exynos_drm_manager *mgr,
+ 					       unsigned int possible_crtcs);
+@@ -39,7 +40,12 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
+ 			    void (*fn)(struct drm_encoder *, void *));
+ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
+ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
++void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
++					  void *data);
+ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
++void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder,
++					void *data);
++void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
+ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+ void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
+ 
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
+index 5bf4a1a..c38c8f4 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
+@@ -33,7 +33,6 @@
+ 
+ #include "exynos_drm_drv.h"
+ #include "exynos_drm_fb.h"
+-#include "exynos_drm_buf.h"
+ #include "exynos_drm_gem.h"
+ 
+ #define to_exynos_fb(x)	container_of(x, struct exynos_drm_fb, fb)
+@@ -42,15 +41,11 @@
+  * exynos specific framebuffer structure.
+  *
+  * @fb: drm framebuffer obejct.
+- * @exynos_gem_obj: exynos specific gem object containing a gem object.
+- * @buffer: pointer to exynos_drm_gem_buffer object.
+- *	- contain the memory information to memory region allocated
+- *	at default framebuffer creation.
++ * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
+  */
+ struct exynos_drm_fb {
+ 	struct drm_framebuffer		fb;
+-	struct exynos_drm_gem_obj	*exynos_gem_obj;
+-	struct exynos_drm_gem_buf	*buffer;
++	struct exynos_drm_gem_obj	*exynos_gem_obj[MAX_FB_BUFFER];
+ };
+ 
+ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
+@@ -61,13 +56,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
+ 
+ 	drm_framebuffer_cleanup(fb);
+ 
+-	/*
+-	 * default framebuffer has no gem object so
+-	 * a buffer of the default framebuffer should be released at here.
+-	 */
+-	if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
+-		exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
+-
+ 	kfree(exynos_fb);
+ 	exynos_fb = NULL;
+ }
+@@ -81,7 +69,7 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+ 	return drm_gem_handle_create(file_priv,
+-			&exynos_fb->exynos_gem_obj->base, handle);
++			&exynos_fb->exynos_gem_obj[0]->base, handle);
+ }
+ 
+ static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
+@@ -102,134 +90,88 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
+ 	.dirty		= exynos_drm_fb_dirty,
+ };
+ 
+-static struct drm_framebuffer *
+-exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
+-		    struct drm_mode_fb_cmd *mode_cmd)
++struct drm_framebuffer *
++exynos_drm_framebuffer_init(struct drm_device *dev,
++			    struct drm_mode_fb_cmd2 *mode_cmd,
++			    struct drm_gem_object *obj)
+ {
+ 	struct exynos_drm_fb *exynos_fb;
+-	struct drm_framebuffer *fb;
+-	struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+-	struct drm_gem_object *obj;
+-	unsigned int size;
+ 	int ret;
+ 
+-	DRM_DEBUG_KMS("%s\n", __FILE__);
+-
+-	mode_cmd->pitch = max(mode_cmd->pitch,
+-			mode_cmd->width * (mode_cmd->bpp >> 3));
+-
+-	DRM_LOG_KMS("drm fb create(%dx%d)\n",
+-			mode_cmd->width, mode_cmd->height);
+-
+ 	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+ 	if (!exynos_fb) {
+-		DRM_ERROR("failed to allocate exynos drm framebuffer.\n");
++		DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	fb = &exynos_fb->fb;
+-	ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
++	ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
+ 	if (ret) {
+-		DRM_ERROR("failed to initialize framebuffer.\n");
+-		goto err_init;
++		DRM_ERROR("failed to initialize framebuffer\n");
++		return ERR_PTR(ret);
+ 	}
+ 
+-	DRM_LOG_KMS("create: fb id: %d\n", fb->base.id);
++	drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
++	exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
+ 
+-	size = mode_cmd->pitch * mode_cmd->height;
++	return &exynos_fb->fb;
++}
+ 
+-	/*
+-	 * mode_cmd->handle could be NULL at booting time or
+-	 * with user request. if NULL, a new buffer or a gem object
+-	 * would be allocated.
+-	 */
+-	if (!mode_cmd->handle) {
+-		if (!file_priv) {
+-			struct exynos_drm_gem_buf *buffer;
+-
+-			/*
+-			 * in case that file_priv is NULL, it allocates
+-			 * only buffer and this buffer would be used
+-			 * for default framebuffer.
+-			 */
+-			buffer = exynos_drm_buf_create(dev, size);
+-			if (IS_ERR(buffer)) {
+-				ret = PTR_ERR(buffer);
+-				goto err_buffer;
+-			}
+-
+-			exynos_fb->buffer = buffer;
+-
+-			DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
+-					(unsigned long)buffer->dma_addr, size);
+-
+-			goto out;
+-		} else {
+-			exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+-							&mode_cmd->handle,
+-							size);
+-			if (IS_ERR(exynos_gem_obj)) {
+-				ret = PTR_ERR(exynos_gem_obj);
+-				goto err_buffer;
+-			}
+-		}
+-	} else {
+-		obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+-		if (!obj) {
+-			DRM_ERROR("failed to lookup gem object.\n");
+-			goto err_buffer;
+-		}
++static struct drm_framebuffer *
++exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
++		      struct drm_mode_fb_cmd2 *mode_cmd)
++{
++	struct drm_gem_object *obj;
++	struct drm_framebuffer *fb;
++	struct exynos_drm_fb *exynos_fb;
++	int nr;
++	int i;
+ 
+-		exynos_gem_obj = to_exynos_gem_obj(obj);
++	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-		drm_gem_object_unreference_unlocked(obj);
++	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
++	if (!obj) {
++		DRM_ERROR("failed to lookup gem object\n");
++		return ERR_PTR(-ENOENT);
+ 	}
+ 
+-	/*
+-	 * if got a exynos_gem_obj from either a handle or
+-	 * a new creation then exynos_fb->exynos_gem_obj is NULL
+-	 * so that default framebuffer has no its own gem object,
+-	 * only its own buffer object.
+-	 */
+-	exynos_fb->buffer = exynos_gem_obj->buffer;
+-
+-	DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
+-			(unsigned long)exynos_fb->buffer->dma_addr, size,
+-			(unsigned int)&exynos_gem_obj->base);
++	drm_gem_object_unreference_unlocked(obj);
+ 
+-out:
+-	exynos_fb->exynos_gem_obj = exynos_gem_obj;
++	fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
++	if (IS_ERR(fb))
++		return fb;
+ 
+-	drm_helper_mode_fill_fb_struct(fb, mode_cmd);
++	exynos_fb = to_exynos_fb(fb);
++	nr = exynos_drm_format_num_buffers(fb->pixel_format);
+ 
+-	return fb;
+-
+-err_buffer:
+-	drm_framebuffer_cleanup(fb);
+-
+-err_init:
+-	kfree(exynos_fb);
++	for (i = 1; i < nr; i++) {
++		obj = drm_gem_object_lookup(dev, file_priv,
++				mode_cmd->handles[i]);
++		if (!obj) {
++			DRM_ERROR("failed to lookup gem object\n");
++			exynos_drm_fb_destroy(fb);
++			return ERR_PTR(-ENOENT);
++		}
+ 
+-	return ERR_PTR(ret);
+-}
++		drm_gem_object_unreference_unlocked(obj);
+ 
+-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
+-					      struct drm_file *file_priv,
+-					      struct drm_mode_fb_cmd *mode_cmd)
+-{
+-	DRM_DEBUG_KMS("%s\n", __FILE__);
++		exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
++	}
+ 
+-	return exynos_drm_fb_init(file_priv, dev, mode_cmd);
++	return fb;
+ }
+ 
+-struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
++struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
++						int index)
+ {
+ 	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ 	struct exynos_drm_gem_buf *buffer;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	buffer = exynos_fb->buffer;
++	if (index >= MAX_FB_BUFFER)
++		return NULL;
++
++	buffer = exynos_fb->exynos_gem_obj[index]->buffer;
+ 	if (!buffer)
+ 		return NULL;
+ 
+@@ -250,7 +192,7 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
+ }
+ 
+ static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
+-	.fb_create = exynos_drm_fb_create,
++	.fb_create = exynos_user_fb_create,
+ 	.output_poll_changed = exynos_drm_output_poll_changed,
+ };
+ 
+@@ -269,9 +211,3 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
+ 
+ 	dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
+ }
+-
+-MODULE_AUTHOR("Inki Dae <inki.dae at samsung.com>");
+-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim at samsung.com>");
+-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim at samsung.com>");
+-MODULE_DESCRIPTION("Samsung SoC DRM FB Driver");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
+index eb35931..3ecb30d 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
++++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
+@@ -28,9 +28,27 @@
+ #ifndef _EXYNOS_DRM_FB_H_
+ #define _EXYNOS_DRM_FB_H
+ 
+-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
+-					      struct drm_file *filp,
+-					      struct drm_mode_fb_cmd *mode_cmd);
++static inline int exynos_drm_format_num_buffers(uint32_t format)
++{
++	switch (format) {
++	case DRM_FORMAT_NV12M:
++	case DRM_FORMAT_NV12MT:
++		return 2;
++	case DRM_FORMAT_YUV420M:
++		return 3;
++	default:
++		return 1;
++	}
++}
++
++struct drm_framebuffer *
++exynos_drm_framebuffer_init(struct drm_device *dev,
++			    struct drm_mode_fb_cmd2 *mode_cmd,
++			    struct drm_gem_object *obj);
++
++/* get memory information of a drm framebuffer */
++struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
++						 int index);
+ 
+ void exynos_drm_mode_config_init(struct drm_device *dev);
+ 
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+index 836f410..d5586cc 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+@@ -34,7 +34,6 @@
+ #include "exynos_drm_drv.h"
+ #include "exynos_drm_fb.h"
+ #include "exynos_drm_gem.h"
+-#include "exynos_drm_buf.h"
+ 
+ #define MAX_CONNECTOR		4
+ #define PREFERRED_BPP		32
+@@ -43,43 +42,17 @@
+ 				drm_fb_helper)
+ 
+ struct exynos_drm_fbdev {
+-	struct drm_fb_helper	drm_fb_helper;
+-	struct drm_framebuffer	*fb;
++	struct drm_fb_helper		drm_fb_helper;
++	struct exynos_drm_gem_obj	*exynos_gem_obj;
+ };
+ 
+-static int exynos_drm_fbdev_set_par(struct fb_info *info)
+-{
+-	struct fb_var_screeninfo *var = &info->var;
+-
+-	switch (var->bits_per_pixel) {
+-	case 32:
+-	case 24:
+-	case 18:
+-	case 16:
+-	case 12:
+-		info->fix.visual = FB_VISUAL_TRUECOLOR;
+-		break;
+-	case 1:
+-		info->fix.visual = FB_VISUAL_MONO01;
+-		break;
+-	default:
+-		info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+-		break;
+-	}
+-
+-	info->fix.line_length = (var->xres_virtual * var->bits_per_pixel) / 8;
+-
+-	return drm_fb_helper_set_par(info);
+-}
+-
+-
+ static struct fb_ops exynos_drm_fb_ops = {
+ 	.owner		= THIS_MODULE,
+ 	.fb_fillrect	= cfb_fillrect,
+ 	.fb_copyarea	= cfb_copyarea,
+ 	.fb_imageblit	= cfb_imageblit,
+ 	.fb_check_var	= drm_fb_helper_check_var,
+-	.fb_set_par	= exynos_drm_fbdev_set_par,
++	.fb_set_par	= drm_fb_helper_set_par,
+ 	.fb_blank	= drm_fb_helper_blank,
+ 	.fb_pan_display	= drm_fb_helper_pan_display,
+ 	.fb_setcmap	= drm_fb_helper_setcmap,
+@@ -90,26 +63,24 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
+ {
+ 	struct fb_info *fbi = helper->fbdev;
+ 	struct drm_device *dev = helper->dev;
+-	struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
+ 	struct exynos_drm_gem_buf *buffer;
+ 	unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
+ 	unsigned long offset;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	exynos_fb->fb = fb;
+-
+-	drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
++	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ 	drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+ 
+-	buffer = exynos_drm_fb_get_buf(fb);
++	/* RGB formats use only one buffer */
++	buffer = exynos_drm_fb_buffer(fb, 0);
+ 	if (!buffer) {
+ 		DRM_LOG_KMS("buffer is null.\n");
+ 		return -EFAULT;
+ 	}
+ 
+ 	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
+-	offset += fbi->var.yoffset * fb->pitch;
++	offset += fbi->var.yoffset * fb->pitches[0];
+ 
+ 	dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+ 	fbi->screen_base = buffer->kvaddr + offset;
+@@ -124,10 +95,12 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
+ 				    struct drm_fb_helper_surface_size *sizes)
+ {
+ 	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
++	struct exynos_drm_gem_obj *exynos_gem_obj;
+ 	struct drm_device *dev = helper->dev;
+ 	struct fb_info *fbi;
+-	struct drm_mode_fb_cmd mode_cmd = { 0 };
++	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ 	struct platform_device *pdev = dev->platformdev;
++	unsigned long size;
+ 	int ret;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+@@ -138,8 +111,9 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
+ 
+ 	mode_cmd.width = sizes->surface_width;
+ 	mode_cmd.height = sizes->surface_height;
+-	mode_cmd.bpp = sizes->surface_bpp;
+-	mode_cmd.depth = sizes->surface_depth;
++	mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
++	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
++							  sizes->surface_depth);
+ 
+ 	mutex_lock(&dev->struct_mutex);
+ 
+@@ -150,14 +124,25 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
+ 		goto out;
+ 	}
+ 
+-	exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
+-	if (IS_ERR_OR_NULL(exynos_fbdev->fb)) {
++	size = mode_cmd.pitches[0] * mode_cmd.height;
++
++	/* 0 means to allocate physically continuous memory */
++	exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
++	if (IS_ERR(exynos_gem_obj)) {
++		ret = PTR_ERR(exynos_gem_obj);
++		goto out;
++	}
++
++	exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
++
++	helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
++			&exynos_gem_obj->base);
++	if (IS_ERR_OR_NULL(helper->fb)) {
+ 		DRM_ERROR("failed to create drm framebuffer.\n");
+-		ret = PTR_ERR(exynos_fbdev->fb);
++		ret = PTR_ERR(helper->fb);
+ 		goto out;
+ 	}
+ 
+-	helper->fb = exynos_fbdev->fb;
+ 	helper->fbdev = fbi;
+ 
+ 	fbi->par = helper;
+@@ -171,8 +156,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
+ 	}
+ 
+ 	ret = exynos_drm_fbdev_update(helper, helper->fb);
+-	if (ret < 0)
++	if (ret < 0) {
+ 		fb_dealloc_cmap(&fbi->cmap);
++		goto out;
++	}
+ 
+ /*
+  * if failed, all resources allocated above would be released by
+@@ -184,58 +171,6 @@ out:
+ 	return ret;
+ }
+ 
+-static bool
+-exynos_drm_fbdev_is_samefb(struct drm_framebuffer *fb,
+-			    struct drm_fb_helper_surface_size *sizes)
+-{
+-	if (fb->width != sizes->surface_width)
+-		return false;
+-	if (fb->height != sizes->surface_height)
+-		return false;
+-	if (fb->bits_per_pixel != sizes->surface_bpp)
+-		return false;
+-	if (fb->depth != sizes->surface_depth)
+-		return false;
+-
+-	return true;
+-}
+-
+-static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
+-				      struct drm_fb_helper_surface_size *sizes)
+-{
+-	struct drm_device *dev = helper->dev;
+-	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+-	struct drm_framebuffer *fb = exynos_fbdev->fb;
+-	struct drm_mode_fb_cmd mode_cmd = { 0 };
+-
+-	DRM_DEBUG_KMS("%s\n", __FILE__);
+-
+-	if (helper->fb != fb) {
+-		DRM_ERROR("drm framebuffer is different\n");
+-		return -EINVAL;
+-	}
+-
+-	if (exynos_drm_fbdev_is_samefb(fb, sizes))
+-		return 0;
+-
+-	mode_cmd.width = sizes->surface_width;
+-	mode_cmd.height = sizes->surface_height;
+-	mode_cmd.bpp = sizes->surface_bpp;
+-	mode_cmd.depth = sizes->surface_depth;
+-
+-	if (fb->funcs->destroy)
+-		fb->funcs->destroy(fb);
+-
+-	exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
+-	if (IS_ERR(exynos_fbdev->fb)) {
+-		DRM_ERROR("failed to allocate fb.\n");
+-		return PTR_ERR(exynos_fbdev->fb);
+-	}
+-
+-	helper->fb = exynos_fbdev->fb;
+-	return exynos_drm_fbdev_update(helper, helper->fb);
+-}
+-
+ static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
+ 				   struct drm_fb_helper_surface_size *sizes)
+ {
+@@ -243,6 +178,10 @@ static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
++	/*
++	 * with !helper->fb, it means that this funcion is called first time
++	 * and after that, the helper->fb would be used as clone mode.
++	 */
+ 	if (!helper->fb) {
+ 		ret = exynos_drm_fbdev_create(helper, sizes);
+ 		if (ret < 0) {
+@@ -255,12 +194,6 @@ static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
+ 		 * because register_framebuffer() should be called.
+ 		 */
+ 		ret = 1;
+-	} else {
+-		ret = exynos_drm_fbdev_recreate(helper, sizes);
+-		if (ret < 0) {
+-			DRM_ERROR("failed to reconfigure fbdev\n");
+-			return ret;
+-		}
+ 	}
+ 
+ 	return ret;
+@@ -366,6 +299,9 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
+ 
+ 	fbdev = to_exynos_fbdev(private->fb_helper);
+ 
++	if (fbdev->exynos_gem_obj)
++		exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
++
+ 	exynos_drm_fbdev_destroy(dev, private->fb_helper);
+ 	kfree(fbdev);
+ 	private->fb_helper = NULL;
+@@ -380,89 +316,3 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+ 
+ 	drm_fb_helper_restore_fbdev_mode(private->fb_helper);
+ }
+-
+-int exynos_drm_fbdev_reinit(struct drm_device *dev)
+-{
+-	struct exynos_drm_private *private = dev->dev_private;
+-	struct drm_fb_helper *fb_helper;
+-	int ret;
+-
+-	if (!private)
+-		return -EINVAL;
+-
+-	/*
+-	 * if all sub drivers were unloaded then num_connector is 0
+-	 * so at this time, the framebuffers also should be destroyed.
+-	 */
+-	if (!dev->mode_config.num_connector) {
+-		exynos_drm_fbdev_fini(dev);
+-		return 0;
+-	}
+-
+-	fb_helper = private->fb_helper;
+-
+-	if (fb_helper) {
+-		struct list_head temp_list;
+-
+-		INIT_LIST_HEAD(&temp_list);
+-
+-		/*
+-		 * fb_helper is reintialized but kernel fb is reused
+-		 * so kernel_fb_list need to be backuped and restored
+-		 */
+-		if (!list_empty(&fb_helper->kernel_fb_list))
+-			list_replace_init(&fb_helper->kernel_fb_list,
+-					&temp_list);
+-
+-		drm_fb_helper_fini(fb_helper);
+-
+-		ret = drm_fb_helper_init(dev, fb_helper,
+-				dev->mode_config.num_crtc, MAX_CONNECTOR);
+-		if (ret < 0) {
+-			DRM_ERROR("failed to initialize drm fb helper\n");
+-			return ret;
+-		}
+-
+-		if (!list_empty(&temp_list))
+-			list_replace(&temp_list, &fb_helper->kernel_fb_list);
+-
+-		ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+-		if (ret < 0) {
+-			DRM_ERROR("failed to add fb helper to connectors\n");
+-			goto err;
+-		}
+-
+-		ret = drm_fb_helper_initial_config(fb_helper, PREFERRED_BPP);
+-		if (ret < 0) {
+-			DRM_ERROR("failed to set up hw configuration.\n");
+-			goto err;
+-		}
+-	} else {
+-		/*
+-		 * if drm_load() failed whem drm load() was called prior
+-		 * to specific drivers, fb_helper must be NULL and so
+-		 * this fuction should be called again to re-initialize and
+-		 * re-configure the fb helper. it means that this function
+-		 * has been called by the specific drivers.
+-		 */
+-		ret = exynos_drm_fbdev_init(dev);
+-	}
+-
+-	return ret;
+-
+-err:
+-	/*
+-	 * if drm_load() failed when drm load() was called prior
+-	 * to specific drivers, the fb_helper must be NULL and so check it.
+-	 */
+-	if (fb_helper)
+-		drm_fb_helper_fini(fb_helper);
+-
+-	return ret;
+-}
+-
+-MODULE_AUTHOR("Inki Dae <inki.dae at samsung.com>");
+-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim at samsung.com>");
+-MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim at samsung.com>");
+-MODULE_DESCRIPTION("Samsung SoC DRM FBDEV Driver");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+index db3b3d9..29fdbfe 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+@@ -17,6 +17,7 @@
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
++#include <linux/pm_runtime.h>
+ 
+ #include <drm/exynos_drm.h>
+ #include <plat/regs-fb-v4.h>
+@@ -68,6 +69,7 @@ struct fimd_win_data {
+ 	void __iomem		*vaddr;
+ 	unsigned int		buf_offsize;
+ 	unsigned int		line_size;	/* bytes */
++	bool			enabled;
+ };
+ 
+ struct fimd_context {
+@@ -84,8 +86,10 @@ struct fimd_context {
+ 	unsigned long			irq_flags;
+ 	u32				vidcon0;
+ 	u32				vidcon1;
++	bool				suspended;
++	struct mutex			lock;
+ 
+-	struct fb_videomode		*timing;
++	struct exynos_drm_panel_info *panel;
+ };
+ 
+ static bool fimd_display_is_connected(struct device *dev)
+@@ -97,13 +101,13 @@ static bool fimd_display_is_connected(struct device *dev)
+ 	return true;
+ }
+ 
+-static void *fimd_get_timing(struct device *dev)
++static void *fimd_get_panel(struct device *dev)
+ {
+ 	struct fimd_context *ctx = get_fimd_context(dev);
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	return ctx->timing;
++	return ctx->panel;
+ }
+ 
+ static int fimd_check_timing(struct device *dev, void *timing)
+@@ -119,7 +123,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
+ {
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	/* TODO. */
++	/* TODO */
+ 
+ 	return 0;
+ }
+@@ -127,17 +131,75 @@ static int fimd_display_power_on(struct device *dev, int mode)
+ static struct exynos_drm_display_ops fimd_display_ops = {
+ 	.type = EXYNOS_DISPLAY_TYPE_LCD,
+ 	.is_connected = fimd_display_is_connected,
+-	.get_timing = fimd_get_timing,
++	.get_panel = fimd_get_panel,
+ 	.check_timing = fimd_check_timing,
+ 	.power_on = fimd_display_power_on,
+ };
+ 
++static void fimd_dpms(struct device *subdrv_dev, int mode)
++{
++	struct fimd_context *ctx = get_fimd_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
++
++	mutex_lock(&ctx->lock);
++
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++		/*
++		 * enable fimd hardware only if suspended status.
++		 *
++		 * P.S. fimd_dpms function would be called at booting time so
++		 * clk_enable could be called double time.
++		 */
++		if (ctx->suspended)
++			pm_runtime_get_sync(subdrv_dev);
++		break;
++	case DRM_MODE_DPMS_STANDBY:
++	case DRM_MODE_DPMS_SUSPEND:
++	case DRM_MODE_DPMS_OFF:
++		if (!ctx->suspended)
++			pm_runtime_put_sync(subdrv_dev);
++		break;
++	default:
++		DRM_DEBUG_KMS("unspecified mode %d\n", mode);
++		break;
++	}
++
++	mutex_unlock(&ctx->lock);
++}
++
++static void fimd_apply(struct device *subdrv_dev)
++{
++	struct fimd_context *ctx = get_fimd_context(subdrv_dev);
++	struct exynos_drm_manager *mgr = ctx->subdrv.manager;
++	struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
++	struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
++	struct fimd_win_data *win_data;
++	int i;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	for (i = 0; i < WINDOWS_NR; i++) {
++		win_data = &ctx->win_data[i];
++		if (win_data->enabled && (ovl_ops && ovl_ops->commit))
++			ovl_ops->commit(subdrv_dev, i);
++	}
++
++	if (mgr_ops && mgr_ops->commit)
++		mgr_ops->commit(subdrv_dev);
++}
++
+ static void fimd_commit(struct device *dev)
+ {
+ 	struct fimd_context *ctx = get_fimd_context(dev);
+-	struct fb_videomode *timing = ctx->timing;
++	struct exynos_drm_panel_info *panel = ctx->panel;
++	struct fb_videomode *timing = &panel->timing;
+ 	u32 val;
+ 
++	if (ctx->suspended)
++		return;
++
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+ 	/* setup polarity values from machine code. */
+@@ -177,40 +239,6 @@ static void fimd_commit(struct device *dev)
+ 	writel(val, ctx->regs + VIDCON0);
+ }
+ 
+-static void fimd_disable(struct device *dev)
+-{
+-	struct fimd_context *ctx = get_fimd_context(dev);
+-	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+-	struct drm_device *drm_dev = subdrv->drm_dev;
+-	struct exynos_drm_manager *manager = &subdrv->manager;
+-	u32 val;
+-
+-	DRM_DEBUG_KMS("%s\n", __FILE__);
+-
+-	/* fimd dma off */
+-	val = readl(ctx->regs + VIDCON0);
+-	val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
+-	writel(val, ctx->regs + VIDCON0);
+-
+-	/*
+-	 * if vblank is enabled status with dma off then
+-	 * it disables vsync interrupt.
+-	 */
+-	if (drm_dev->vblank_enabled[manager->pipe] &&
+-		atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
+-		drm_vblank_put(drm_dev, manager->pipe);
+-
+-		/*
+-		 * if vblank_disable_allowed is 0 then disable
+-		 * vsync interrupt right now else the vsync interrupt
+-		 * would be disabled by drm timer once a current process
+-		 * gives up ownershop of vblank event.
+-		 */
+-		if (!drm_dev->vblank_disable_allowed)
+-			drm_vblank_off(drm_dev, manager->pipe);
+-	}
+-}
+-
+ static int fimd_enable_vblank(struct device *dev)
+ {
+ 	struct fimd_context *ctx = get_fimd_context(dev);
+@@ -218,6 +246,9 @@ static int fimd_enable_vblank(struct device *dev)
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
++	if (ctx->suspended)
++		return -EPERM;
++
+ 	if (!test_and_set_bit(0, &ctx->irq_flags)) {
+ 		val = readl(ctx->regs + VIDINTCON0);
+ 
+@@ -242,6 +273,9 @@ static void fimd_disable_vblank(struct device *dev)
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
++	if (ctx->suspended)
++		return;
++
+ 	if (test_and_clear_bit(0, &ctx->irq_flags)) {
+ 		val = readl(ctx->regs + VIDINTCON0);
+ 
+@@ -253,8 +287,9 @@ static void fimd_disable_vblank(struct device *dev)
+ }
+ 
+ static struct exynos_drm_manager_ops fimd_manager_ops = {
++	.dpms = fimd_dpms,
++	.apply = fimd_apply,
+ 	.commit = fimd_commit,
+-	.disable = fimd_disable,
+ 	.enable_vblank = fimd_enable_vblank,
+ 	.disable_vblank = fimd_disable_vblank,
+ };
+@@ -264,6 +299,7 @@ static void fimd_win_mode_set(struct device *dev,
+ {
+ 	struct fimd_context *ctx = get_fimd_context(dev);
+ 	struct fimd_win_data *win_data;
++	int win;
+ 	unsigned long offset;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+@@ -273,12 +309,19 @@ static void fimd_win_mode_set(struct device *dev,
+ 		return;
+ 	}
+ 
++	win = overlay->zpos;
++	if (win == DEFAULT_ZPOS)
++		win = ctx->default_win;
++
++	if (win < 0 || win > WINDOWS_NR)
++		return;
++
+ 	offset = overlay->fb_x * (overlay->bpp >> 3);
+ 	offset += overlay->fb_y * overlay->pitch;
+ 
+ 	DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
+ 
+-	win_data = &ctx->win_data[ctx->default_win];
++	win_data = &ctx->win_data[win];
+ 
+ 	win_data->offset_x = overlay->crtc_x;
+ 	win_data->offset_y = overlay->crtc_y;
+@@ -286,8 +329,8 @@ static void fimd_win_mode_set(struct device *dev,
+ 	win_data->ovl_height = overlay->crtc_height;
+ 	win_data->fb_width = overlay->fb_width;
+ 	win_data->fb_height = overlay->fb_height;
+-	win_data->dma_addr = overlay->dma_addr + offset;
+-	win_data->vaddr = overlay->vaddr + offset;
++	win_data->dma_addr = overlay->dma_addr[0] + offset;
++	win_data->vaddr = overlay->vaddr[0] + offset;
+ 	win_data->bpp = overlay->bpp;
+ 	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
+ 				(overlay->bpp >> 3);
+@@ -381,15 +424,21 @@ static void fimd_win_set_colkey(struct device *dev, unsigned int win)
+ 	writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
+ }
+ 
+-static void fimd_win_commit(struct device *dev)
++static void fimd_win_commit(struct device *dev, int zpos)
+ {
+ 	struct fimd_context *ctx = get_fimd_context(dev);
+ 	struct fimd_win_data *win_data;
+-	int win = ctx->default_win;
++	int win = zpos;
+ 	unsigned long val, alpha, size;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
++	if (ctx->suspended)
++		return;
++
++	if (win == DEFAULT_ZPOS)
++		win = ctx->default_win;
++
+ 	if (win < 0 || win > WINDOWS_NR)
+ 		return;
+ 
+@@ -472,24 +521,37 @@ static void fimd_win_commit(struct device *dev)
+ 	if (win != 0)
+ 		fimd_win_set_colkey(dev, win);
+ 
++	/* wincon */
++	val = readl(ctx->regs + WINCON(win));
++	val |= WINCONx_ENWIN;
++	writel(val, ctx->regs + WINCON(win));
++
+ 	/* Enable DMA channel and unprotect windows */
+ 	val = readl(ctx->regs + SHADOWCON);
+ 	val |= SHADOWCON_CHx_ENABLE(win);
+ 	val &= ~SHADOWCON_WINx_PROTECT(win);
+ 	writel(val, ctx->regs + SHADOWCON);
++
++	win_data->enabled = true;
+ }
+ 
+-static void fimd_win_disable(struct device *dev)
++static void fimd_win_disable(struct device *dev, int zpos)
+ {
+ 	struct fimd_context *ctx = get_fimd_context(dev);
+-	int win = ctx->default_win;
++	struct fimd_win_data *win_data;
++	int win = zpos;
+ 	u32 val;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
++	if (win == DEFAULT_ZPOS)
++		win = ctx->default_win;
++
+ 	if (win < 0 || win > WINDOWS_NR)
+ 		return;
+ 
++	win_data = &ctx->win_data[win];
++
+ 	/* protect windows */
+ 	val = readl(ctx->regs + SHADOWCON);
+ 	val |= SHADOWCON_WINx_PROTECT(win);
+@@ -505,6 +567,8 @@ static void fimd_win_disable(struct device *dev)
+ 	val &= ~SHADOWCON_CHx_ENABLE(win);
+ 	val &= ~SHADOWCON_WINx_PROTECT(win);
+ 	writel(val, ctx->regs + SHADOWCON);
++
++	win_data->enabled = false;
+ }
+ 
+ static struct exynos_drm_overlay_ops fimd_overlay_ops = {
+@@ -513,6 +577,13 @@ static struct exynos_drm_overlay_ops fimd_overlay_ops = {
+ 	.disable = fimd_win_disable,
+ };
+ 
++static struct exynos_drm_manager fimd_manager = {
++	.pipe		= -1,
++	.ops		= &fimd_manager_ops,
++	.overlay_ops	= &fimd_overlay_ops,
++	.display_ops	= &fimd_display_ops,
++};
++
+ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
+ {
+ 	struct exynos_drm_private *dev_priv = drm_dev->dev_private;
+@@ -540,8 +611,21 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
+ 		wake_up_interruptible(&e->base.file_priv->event_wait);
+ 	}
+ 
+-	if (is_checked)
+-		drm_vblank_put(drm_dev, crtc);
++	if (is_checked) {
++		/*
++		 * call drm_vblank_put only in case that drm_vblank_get was
++		 * called.
++		 */
++		if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
++			drm_vblank_put(drm_dev, crtc);
++
++		/*
++		 * don't off vblank if vblank_disable_allowed is 1,
++		 * because vblank would be off by timer handler.
++		 */
++		if (!drm_dev->vblank_disable_allowed)
++			drm_vblank_off(drm_dev, crtc);
++	}
+ 
+ 	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ }
+@@ -551,7 +635,7 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
+ 	struct fimd_context *ctx = (struct fimd_context *)dev_id;
+ 	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ 	struct drm_device *drm_dev = subdrv->drm_dev;
+-	struct exynos_drm_manager *manager = &subdrv->manager;
++	struct exynos_drm_manager *manager = subdrv->manager;
+ 	u32 val;
+ 
+ 	val = readl(ctx->regs + VIDINTCON1);
+@@ -560,19 +644,14 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
+ 		/* VSYNC interrupt */
+ 		writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+ 
+-	/*
+-	 * in case that vblank_disable_allowed is 1, it could induce
+-	 * the problem that manager->pipe could be -1 because with
+-	 * disable callback, vsync interrupt isn't disabled and at this moment,
+-	 * vsync interrupt could occur. the vsync interrupt would be disabled
+-	 * by timer handler later.
+-	 */
+-	if (manager->pipe == -1)
+-		return IRQ_HANDLED;
++	/* check the crtc is detached already from encoder */
++	if (manager->pipe < 0)
++		goto out;
+ 
+ 	drm_handle_vblank(drm_dev, manager->pipe);
+ 	fimd_finish_pageflip(drm_dev, manager->pipe);
+ 
++out:
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -590,6 +669,13 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+ 	 */
+ 	drm_dev->irq_enabled = 1;
+ 
++	/*
++	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
++	 * by drm timer once a current process gives up ownership of
++	 * vblank event.(after drm_vblank_put function is called)
++	 */
++	drm_dev->vblank_disable_allowed = 1;
++
+ 	return 0;
+ }
+ 
+@@ -662,13 +748,53 @@ static void fimd_clear_win(struct fimd_context *ctx, int win)
+ 	writel(val, ctx->regs + SHADOWCON);
+ }
+ 
++static int fimd_power_on(struct fimd_context *ctx, bool enable)
++{
++	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
++	struct device *dev = subdrv->dev;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (enable != false && enable != true)
++		return -EINVAL;
++
++	if (enable) {
++		int ret;
++
++		ret = clk_enable(ctx->bus_clk);
++		if (ret < 0)
++			return ret;
++
++		ret = clk_enable(ctx->lcd_clk);
++		if  (ret < 0) {
++			clk_disable(ctx->bus_clk);
++			return ret;
++		}
++
++		ctx->suspended = false;
++
++		/* if vblank was enabled status, enable it again. */
++		if (test_and_clear_bit(0, &ctx->irq_flags))
++			fimd_enable_vblank(dev);
++
++		fimd_apply(dev);
++	} else {
++		clk_disable(ctx->lcd_clk);
++		clk_disable(ctx->bus_clk);
++
++		ctx->suspended = true;
++	}
++
++	return 0;
++}
++
+ static int __devinit fimd_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	struct fimd_context *ctx;
+ 	struct exynos_drm_subdrv *subdrv;
+ 	struct exynos_drm_fimd_pdata *pdata;
+-	struct fb_videomode *timing;
++	struct exynos_drm_panel_info *panel;
+ 	struct resource *res;
+ 	int win;
+ 	int ret = -EINVAL;
+@@ -681,9 +807,9 @@ static int __devinit fimd_probe(struct platform_device *pdev)
+ 		return -EINVAL;
+ 	}
+ 
+-	timing = &pdata->timing;
+-	if (!timing) {
+-		dev_err(dev, "timing is null.\n");
++	panel = &pdata->panel;
++	if (!panel) {
++		dev_err(dev, "panel is null.\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -698,8 +824,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
+ 		goto err_clk_get;
+ 	}
+ 
+-	clk_enable(ctx->bus_clk);
+-
+ 	ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+ 	if (IS_ERR(ctx->lcd_clk)) {
+ 		dev_err(dev, "failed to get lcd clock\n");
+@@ -707,8 +831,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
+ 		goto err_bus_clk;
+ 	}
+ 
+-	clk_enable(ctx->lcd_clk);
+-
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	if (!res) {
+ 		dev_err(dev, "failed to find registers\n");
+@@ -739,37 +861,40 @@ static int __devinit fimd_probe(struct platform_device *pdev)
+ 
+ 	ctx->irq = res->start;
+ 
+-	for (win = 0; win < WINDOWS_NR; win++)
+-		fimd_clear_win(ctx, win);
+-
+ 	ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
+ 	if (ret < 0) {
+ 		dev_err(dev, "irq request failed.\n");
+ 		goto err_req_irq;
+ 	}
+ 
+-	ctx->clkdiv = fimd_calc_clkdiv(ctx, timing);
+ 	ctx->vidcon0 = pdata->vidcon0;
+ 	ctx->vidcon1 = pdata->vidcon1;
+ 	ctx->default_win = pdata->default_win;
+-	ctx->timing = timing;
+-
+-	timing->pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
+-
+-	DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
+-			timing->pixclock, ctx->clkdiv);
++	ctx->panel = panel;
+ 
+ 	subdrv = &ctx->subdrv;
+ 
++	subdrv->dev = dev;
++	subdrv->manager = &fimd_manager;
+ 	subdrv->probe = fimd_subdrv_probe;
+ 	subdrv->remove = fimd_subdrv_remove;
+-	subdrv->manager.pipe = -1;
+-	subdrv->manager.ops = &fimd_manager_ops;
+-	subdrv->manager.overlay_ops = &fimd_overlay_ops;
+-	subdrv->manager.display_ops = &fimd_display_ops;
+-	subdrv->manager.dev = dev;
++
++	mutex_init(&ctx->lock);
+ 
+ 	platform_set_drvdata(pdev, ctx);
++
++	pm_runtime_enable(dev);
++	pm_runtime_get_sync(dev);
++
++	ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing);
++	panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
++
++	DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
++			panel->timing.pixclock, ctx->clkdiv);
++
++	for (win = 0; win < WINDOWS_NR; win++)
++		fimd_clear_win(ctx, win);
++
+ 	exynos_drm_subdrv_register(subdrv);
+ 
+ 	return 0;
+@@ -797,14 +922,25 @@ err_clk_get:
+ 
+ static int __devexit fimd_remove(struct platform_device *pdev)
+ {
++	struct device *dev = &pdev->dev;
+ 	struct fimd_context *ctx = platform_get_drvdata(pdev);
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+ 	exynos_drm_subdrv_unregister(&ctx->subdrv);
+ 
++	if (ctx->suspended)
++		goto out;
++
+ 	clk_disable(ctx->lcd_clk);
+ 	clk_disable(ctx->bus_clk);
++
++	pm_runtime_set_suspended(dev);
++	pm_runtime_put_sync(dev);
++
++out:
++	pm_runtime_disable(dev);
++
+ 	clk_put(ctx->lcd_clk);
+ 	clk_put(ctx->bus_clk);
+ 
+@@ -818,29 +954,69 @@ static int __devexit fimd_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-static struct platform_driver fimd_driver = {
+-	.probe		= fimd_probe,
+-	.remove		= __devexit_p(fimd_remove),
+-	.driver		= {
+-		.name	= "exynos4-fb",
+-		.owner	= THIS_MODULE,
+-	},
+-};
++#ifdef CONFIG_PM_SLEEP
++static int fimd_suspend(struct device *dev)
++{
++	struct fimd_context *ctx = get_fimd_context(dev);
++
++	if (pm_runtime_suspended(dev))
++		return 0;
++
++	/*
++	 * do not use pm_runtime_suspend(). if pm_runtime_suspend() is
++	 * called here, an error would be returned by that interface
++	 * because the usage_count of pm runtime is more than 1.
++	 */
++	return fimd_power_on(ctx, false);
++}
++
++static int fimd_resume(struct device *dev)
++{
++	struct fimd_context *ctx = get_fimd_context(dev);
++
++	/*
++	 * if entered to sleep when lcd panel was on, the usage_count
++	 * of pm runtime would still be 1 so in this case, fimd driver
++	 * should be on directly not drawing on pm runtime interface.
++	 */
++	if (!pm_runtime_suspended(dev))
++		return fimd_power_on(ctx, true);
+ 
+-static int __init fimd_init(void)
++	return 0;
++}
++#endif
++
++#ifdef CONFIG_PM_RUNTIME
++static int fimd_runtime_suspend(struct device *dev)
+ {
+-	return platform_driver_register(&fimd_driver);
++	struct fimd_context *ctx = get_fimd_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	return fimd_power_on(ctx, false);
+ }
+ 
+-static void __exit fimd_exit(void)
++static int fimd_runtime_resume(struct device *dev)
+ {
+-	platform_driver_unregister(&fimd_driver);
++	struct fimd_context *ctx = get_fimd_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	return fimd_power_on(ctx, true);
+ }
++#endif
+ 
+-module_init(fimd_init);
+-module_exit(fimd_exit);
++static const struct dev_pm_ops fimd_pm_ops = {
++	SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
++	SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
++};
+ 
+-MODULE_AUTHOR("Joonyoung Shim <jy0922.shim at samsung.com>");
+-MODULE_AUTHOR("Inki Dae <inki.dae at samsung.com>");
+-MODULE_DESCRIPTION("Samsung DRM FIMD Driver");
+-MODULE_LICENSE("GPL");
++struct platform_driver fimd_driver = {
++	.probe		= fimd_probe,
++	.remove		= __devexit_p(fimd_remove),
++	.driver		= {
++		.name	= "exynos4-fb",
++		.owner	= THIS_MODULE,
++		.pm	= &fimd_pm_ops,
++	},
++};
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
+index aba0fe4..1dffa83 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
+@@ -26,6 +26,7 @@
+ #include "drmP.h"
+ #include "drm.h"
+ 
++#include <linux/shmem_fs.h>
+ #include <drm/exynos_drm.h>
+ 
+ #include "exynos_drm_drv.h"
+@@ -55,118 +56,413 @@ static unsigned int convert_to_vm_err_msg(int msg)
+ 	return out_msg;
+ }
+ 
+-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
++static int check_gem_flags(unsigned int flags)
+ {
+-	DRM_DEBUG_KMS("%s\n", __FILE__);
++	if (flags & ~(EXYNOS_BO_MASK)) {
++		DRM_ERROR("invalid flags.\n");
++		return -EINVAL;
++	}
+ 
+-	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
++	return 0;
+ }
+ 
+-static struct exynos_drm_gem_obj
+-		*exynos_drm_gem_init(struct drm_device *drm_dev,
+-			struct drm_file *file_priv, unsigned int *handle,
+-			unsigned int size)
++static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
+ {
+-	struct exynos_drm_gem_obj *exynos_gem_obj;
+-	struct drm_gem_object *obj;
+-	int ret;
++	if (!IS_NONCONTIG_BUFFER(flags)) {
++		if (size >= SZ_1M)
++			return roundup(size, SECTION_SIZE);
++		else if (size >= SZ_64K)
++			return roundup(size, SZ_64K);
++		else
++			goto out;
++	}
++out:
++	return roundup(size, PAGE_SIZE);
++}
+ 
+-	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
+-	if (!exynos_gem_obj) {
+-		DRM_ERROR("failed to allocate exynos gem object.\n");
++static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
++						gfp_t gfpmask)
++{
++	struct inode *inode;
++	struct address_space *mapping;
++	struct page *p, **pages;
++	int i, npages;
++
++	/* This is the shared memory object that backs the GEM resource */
++	inode = obj->filp->f_path.dentry->d_inode;
++	mapping = inode->i_mapping;
++
++	npages = obj->size >> PAGE_SHIFT;
++
++	pages = drm_malloc_ab(npages, sizeof(struct page *));
++	if (pages == NULL)
+ 		return ERR_PTR(-ENOMEM);
++
++	gfpmask |= mapping_gfp_mask(mapping);
++
++	for (i = 0; i < npages; i++) {
++		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
++		if (IS_ERR(p))
++			goto fail;
++		pages[i] = p;
+ 	}
+ 
+-	obj = &exynos_gem_obj->base;
++	return pages;
+ 
+-	ret = drm_gem_object_init(drm_dev, obj, size);
+-	if (ret < 0) {
+-		DRM_ERROR("failed to initialize gem object.\n");
+-		ret = -EINVAL;
+-		goto err_object_init;
++fail:
++	while (i--)
++		page_cache_release(pages[i]);
++
++	drm_free_large(pages);
++	return ERR_PTR(PTR_ERR(p));
++}
++
++static void exynos_gem_put_pages(struct drm_gem_object *obj,
++					struct page **pages,
++					bool dirty, bool accessed)
++{
++	int i, npages;
++
++	npages = obj->size >> PAGE_SHIFT;
++
++	for (i = 0; i < npages; i++) {
++		if (dirty)
++			set_page_dirty(pages[i]);
++
++		if (accessed)
++			mark_page_accessed(pages[i]);
++
++		/* Undo the reference we took when populating the table */
++		page_cache_release(pages[i]);
+ 	}
+ 
+-	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
++	drm_free_large(pages);
++}
+ 
+-	ret = drm_gem_create_mmap_offset(obj);
++static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
++					struct vm_area_struct *vma,
++					unsigned long f_vaddr,
++					pgoff_t page_offset)
++{
++	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
++	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
++	unsigned long pfn;
++
++	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
++		if (!buf->pages)
++			return -EINTR;
++
++		pfn = page_to_pfn(buf->pages[page_offset++]);
++	} else
++		pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
++
++	return vm_insert_mixed(vma, f_vaddr, pfn);
++}
++
++static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
++{
++	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
++	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
++	struct scatterlist *sgl;
++	struct page **pages;
++	unsigned int npages, i = 0;
++	int ret;
++
++	if (buf->pages) {
++		DRM_DEBUG_KMS("already allocated.\n");
++		return -EINVAL;
++	}
++
++	pages = exynos_gem_get_pages(obj, GFP_KERNEL);
++	if (IS_ERR(pages)) {
++		DRM_ERROR("failed to get pages.\n");
++		return PTR_ERR(pages);
++	}
++
++	npages = obj->size >> PAGE_SHIFT;
++
++	buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
++	if (!buf->sgt) {
++		DRM_ERROR("failed to allocate sg table.\n");
++		ret = -ENOMEM;
++		goto err;
++	}
++
++	ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
+ 	if (ret < 0) {
+-		DRM_ERROR("failed to allocate mmap offset.\n");
+-		goto err_create_mmap_offset;
++		DRM_ERROR("failed to initialize sg table.\n");
++		ret = -EFAULT;
++		goto err1;
++	}
++
++	sgl = buf->sgt->sgl;
++
++	/* set all pages to sg list. */
++	while (i < npages) {
++		sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
++		sg_dma_address(sgl) = page_to_phys(pages[i]);
++		i++;
++		sgl = sg_next(sgl);
+ 	}
+ 
++	/* add some codes for UNCACHED type here. TODO */
++
++	buf->pages = pages;
++	return ret;
++err1:
++	kfree(buf->sgt);
++	buf->sgt = NULL;
++err:
++	exynos_gem_put_pages(obj, pages, true, false);
++	return ret;
++
++}
++
++static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
++{
++	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
++	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
++
++	/*
++	 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
++	 * allocated at gem fault handler.
++	 */
++	sg_free_table(buf->sgt);
++	kfree(buf->sgt);
++	buf->sgt = NULL;
++
++	exynos_gem_put_pages(obj, buf->pages, true, false);
++	buf->pages = NULL;
++
++	/* add some codes for UNCACHED type here. TODO */
++}
++
++static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
++					struct drm_file *file_priv,
++					unsigned int *handle)
++{
++	int ret;
++
+ 	/*
+ 	 * allocate a id of idr table where the obj is registered
+ 	 * and handle has the id what user can see.
+ 	 */
+ 	ret = drm_gem_handle_create(file_priv, obj, handle);
+ 	if (ret)
+-		goto err_handle_create;
++		return ret;
+ 
+ 	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+ 
+ 	/* drop reference from allocate - handle holds it now. */
+ 	drm_gem_object_unreference_unlocked(obj);
+ 
+-	return exynos_gem_obj;
++	return 0;
++}
++
++void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
++{
++	struct drm_gem_object *obj;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (!exynos_gem_obj)
++		return;
+ 
+-err_handle_create:
+-	drm_gem_free_mmap_offset(obj);
++	obj = &exynos_gem_obj->base;
++
++	DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
++
++	if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
++			exynos_gem_obj->buffer->pages)
++		exynos_drm_gem_put_pages(obj);
++	else
++		exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags,
++					exynos_gem_obj->buffer);
++
++	exynos_drm_fini_buf(obj->dev, exynos_gem_obj->buffer);
++	exynos_gem_obj->buffer = NULL;
+ 
+-err_create_mmap_offset:
++	if (obj->map_list.map)
++		drm_gem_free_mmap_offset(obj);
++
++	/* release file pointer to gem object. */
+ 	drm_gem_object_release(obj);
+ 
+-err_object_init:
+ 	kfree(exynos_gem_obj);
++	exynos_gem_obj = NULL;
++}
+ 
+-	return ERR_PTR(ret);
++static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
++						      unsigned long size)
++{
++	struct exynos_drm_gem_obj *exynos_gem_obj;
++	struct drm_gem_object *obj;
++	int ret;
++
++	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
++	if (!exynos_gem_obj) {
++		DRM_ERROR("failed to allocate exynos gem object\n");
++		return NULL;
++	}
++
++	exynos_gem_obj->size = size;
++	obj = &exynos_gem_obj->base;
++
++	ret = drm_gem_object_init(dev, obj, size);
++	if (ret < 0) {
++		DRM_ERROR("failed to initialize gem object\n");
++		kfree(exynos_gem_obj);
++		return NULL;
++	}
++
++	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
++
++	return exynos_gem_obj;
+ }
+ 
+ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+-				struct drm_file *file_priv,
+-				unsigned int *handle, unsigned long size)
++						unsigned int flags,
++						unsigned long size)
+ {
++	struct exynos_drm_gem_obj *exynos_gem_obj;
++	struct exynos_drm_gem_buf *buf;
++	int ret;
+ 
+-	struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+-	struct exynos_drm_gem_buf *buffer;
++	if (!size) {
++		DRM_ERROR("invalid size.\n");
++		return ERR_PTR(-EINVAL);
++	}
+ 
+-	size = roundup(size, PAGE_SIZE);
++	size = roundup_gem_size(size, flags);
++	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
++	ret = check_gem_flags(flags);
++	if (ret)
++		return ERR_PTR(ret);
+ 
+-	buffer = exynos_drm_buf_create(dev, size);
+-	if (IS_ERR(buffer)) {
+-		return ERR_CAST(buffer);
+-	}
++	buf = exynos_drm_init_buf(dev, size);
++	if (!buf)
++		return ERR_PTR(-ENOMEM);
+ 
+-	exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
+-	if (IS_ERR(exynos_gem_obj)) {
+-		exynos_drm_buf_destroy(dev, buffer);
+-		return exynos_gem_obj;
++	exynos_gem_obj = exynos_drm_gem_init(dev, size);
++	if (!exynos_gem_obj) {
++		ret = -ENOMEM;
++		goto err_fini_buf;
+ 	}
+ 
+-	exynos_gem_obj->buffer = buffer;
++	exynos_gem_obj->buffer = buf;
++
++	/* set memory type and cache attribute from user side. */
++	exynos_gem_obj->flags = flags;
++
++	/*
++	 * allocate all pages as desired size if user wants to allocate
++	 * physically non-continuous memory.
++	 */
++	if (flags & EXYNOS_BO_NONCONTIG) {
++		ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
++		if (ret < 0) {
++			drm_gem_object_release(&exynos_gem_obj->base);
++			goto err_fini_buf;
++		}
++	} else {
++		ret = exynos_drm_alloc_buf(dev, buf, flags);
++		if (ret < 0) {
++			drm_gem_object_release(&exynos_gem_obj->base);
++			goto err_fini_buf;
++		}
++	}
+ 
+ 	return exynos_gem_obj;
++
++err_fini_buf:
++	exynos_drm_fini_buf(dev, buf);
++	return ERR_PTR(ret);
+ }
+ 
+ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
+-					struct drm_file *file_priv)
++				struct drm_file *file_priv)
+ {
+ 	struct drm_exynos_gem_create *args = data;
+-	struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
++	struct exynos_drm_gem_obj *exynos_gem_obj;
++	int ret;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+-						&args->handle, args->size);
++	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
+ 	if (IS_ERR(exynos_gem_obj))
+ 		return PTR_ERR(exynos_gem_obj);
+ 
++	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
++			&args->handle);
++	if (ret) {
++		exynos_drm_gem_destroy(exynos_gem_obj);
++		return ret;
++	}
++
+ 	return 0;
+ }
+ 
++void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
++					unsigned int gem_handle,
++					struct drm_file *file_priv)
++{
++	struct exynos_drm_gem_obj *exynos_gem_obj;
++	struct drm_gem_object *obj;
++
++	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
++	if (!obj) {
++		DRM_ERROR("failed to lookup gem object.\n");
++		return ERR_PTR(-EINVAL);
++	}
++
++	exynos_gem_obj = to_exynos_gem_obj(obj);
++
++	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
++		DRM_DEBUG_KMS("not support NONCONTIG type.\n");
++		drm_gem_object_unreference_unlocked(obj);
++
++		/* TODO */
++		return ERR_PTR(-EINVAL);
++	}
++
++	return &exynos_gem_obj->buffer->dma_addr;
++}
++
++void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
++					unsigned int gem_handle,
++					struct drm_file *file_priv)
++{
++	struct exynos_drm_gem_obj *exynos_gem_obj;
++	struct drm_gem_object *obj;
++
++	obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
++	if (!obj) {
++		DRM_ERROR("failed to lookup gem object.\n");
++		return;
++	}
++
++	exynos_gem_obj = to_exynos_gem_obj(obj);
++
++	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
++		DRM_DEBUG_KMS("not support NONCONTIG type.\n");
++		drm_gem_object_unreference_unlocked(obj);
++
++		/* TODO */
++		return;
++	}
++
++	drm_gem_object_unreference_unlocked(obj);
++
++	/*
++	 * decrease obj->refcount one more time because we has already
++	 * increased it at exynos_drm_gem_get_dma_addr().
++	 */
++	drm_gem_object_unreference_unlocked(obj);
++}
++
+ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+-		struct drm_file *file_priv)
++				    struct drm_file *file_priv)
+ {
+ 	struct drm_exynos_gem_map_off *args = data;
+ 
+@@ -185,21 +481,23 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+ }
+ 
+ static int exynos_drm_gem_mmap_buffer(struct file *filp,
+-		struct vm_area_struct *vma)
++				      struct vm_area_struct *vma)
+ {
+ 	struct drm_gem_object *obj = filp->private_data;
+ 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ 	struct exynos_drm_gem_buf *buffer;
+-	unsigned long pfn, vm_size;
++	unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
++	int ret;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+ 	vma->vm_flags |= (VM_IO | VM_RESERVED);
+ 
++	/* in case of direct mapping, always having non-cachable attribute */
+ 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-	vma->vm_file = filp;
+ 
+-	vm_size = vma->vm_end - vma->vm_start;
++	vm_size = usize = vma->vm_end - vma->vm_start;
++
+ 	/*
+ 	 * a buffer contains information to physically continuous memory
+ 	 * allocated by user request or at framebuffer creation.
+@@ -210,18 +508,39 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
+ 	if (vm_size > buffer->size)
+ 		return -EINVAL;
+ 
+-	/*
+-	 * get page frame number to physical memory to be mapped
+-	 * to user space.
+-	 */
+-	pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
+-
+-	DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
+-
+-	if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
+-				vma->vm_page_prot)) {
+-		DRM_ERROR("failed to remap pfn range.\n");
+-		return -EAGAIN;
++	if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
++		int i = 0;
++
++		if (!buffer->pages)
++			return -EINVAL;
++
++		vma->vm_flags |= VM_MIXEDMAP;
++
++		do {
++			ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
++			if (ret) {
++				DRM_ERROR("failed to remap user space.\n");
++				return ret;
++			}
++
++			uaddr += PAGE_SIZE;
++			usize -= PAGE_SIZE;
++		} while (usize > 0);
++	} else {
++		/*
++		 * get page frame number to physical memory to be mapped
++		 * to user space.
++		 */
++		pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
++								PAGE_SHIFT;
++
++		DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
++
++		if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
++					vma->vm_page_prot)) {
++			DRM_ERROR("failed to remap pfn range.\n");
++			return -EAGAIN;
++		}
+ 	}
+ 
+ 	return 0;
+@@ -232,7 +551,7 @@ static const struct file_operations exynos_drm_gem_fops = {
+ };
+ 
+ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+-		struct drm_file *file_priv)
++			      struct drm_file *file_priv)
+ {
+ 	struct drm_exynos_gem_mmap *args = data;
+ 	struct drm_gem_object *obj;
+@@ -254,10 +573,8 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ 	obj->filp->f_op = &exynos_drm_gem_fops;
+ 	obj->filp->private_data = obj;
+ 
+-	down_write(&current->mm->mmap_sem);
+-	addr = do_mmap(obj->filp, 0, args->size,
++	addr = vm_mmap(obj->filp, 0, args->size,
+ 			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
+-	up_write(&current->mm->mmap_sem);
+ 
+ 	drm_gem_object_unreference_unlocked(obj);
+ 
+@@ -278,32 +595,19 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
+ 	return 0;
+ }
+ 
+-void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
++void exynos_drm_gem_free_object(struct drm_gem_object *obj)
+ {
+-	struct exynos_drm_gem_obj *exynos_gem_obj;
+-
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+-	DRM_DEBUG_KMS("handle count = %d\n",
+-			atomic_read(&gem_obj->handle_count));
+-
+-	if (gem_obj->map_list.map)
+-		drm_gem_free_mmap_offset(gem_obj);
+-
+-	/* release file pointer to gem object. */
+-	drm_gem_object_release(gem_obj);
+-
+-	exynos_gem_obj = to_exynos_gem_obj(gem_obj);
+-
+-	exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
+-
+-	kfree(exynos_gem_obj);
++	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
+ }
+ 
+ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+-		struct drm_device *dev, struct drm_mode_create_dumb *args)
++			       struct drm_device *dev,
++			       struct drm_mode_create_dumb *args)
+ {
+ 	struct exynos_drm_gem_obj *exynos_gem_obj;
++	int ret;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+@@ -314,21 +618,29 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+ 	 */
+ 
+ 	args->pitch = args->width * args->bpp >> 3;
+-	args->size = args->pitch * args->height;
++	args->size = PAGE_ALIGN(args->pitch * args->height);
+ 
+-	exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
+-							args->size);
++	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
+ 	if (IS_ERR(exynos_gem_obj))
+ 		return PTR_ERR(exynos_gem_obj);
+ 
++	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
++			&args->handle);
++	if (ret) {
++		exynos_drm_gem_destroy(exynos_gem_obj);
++		return ret;
++	}
++
+ 	return 0;
+ }
+ 
+ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+-		struct drm_device *dev, uint32_t handle, uint64_t *offset)
++				   struct drm_device *dev, uint32_t handle,
++				   uint64_t *offset)
+ {
+ 	struct exynos_drm_gem_obj *exynos_gem_obj;
+ 	struct drm_gem_object *obj;
++	int ret = 0;
+ 
+ 	DRM_DEBUG_KMS("%s\n", __FILE__);
+ 
+@@ -343,19 +655,46 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+ 	obj = drm_gem_object_lookup(dev, file_priv, handle);
+ 	if (!obj) {
+ 		DRM_ERROR("failed to lookup gem object.\n");
+-		mutex_unlock(&dev->struct_mutex);
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto unlock;
+ 	}
+ 
+ 	exynos_gem_obj = to_exynos_gem_obj(obj);
+ 
+-	*offset = get_gem_mmap_offset(&exynos_gem_obj->base);
+-
+-	drm_gem_object_unreference(obj);
++	if (!exynos_gem_obj->base.map_list.map) {
++		ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
++		if (ret)
++			goto out;
++	}
+ 
++	*offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
+ 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
+ 
++out:
++	drm_gem_object_unreference(obj);
++unlock:
+ 	mutex_unlock(&dev->struct_mutex);
++	return ret;
++}
++
++int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
++				struct drm_device *dev,
++				unsigned int handle)
++{
++	int ret;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	/*
++	 * obj->refcount and obj->handle_count are decreased and
++	 * if both them are 0 then exynos_drm_gem_free_object()
++	 * would be called by callback to release resources.
++	 */
++	ret = drm_gem_handle_delete(file_priv, handle);
++	if (ret < 0) {
++		DRM_ERROR("failed to delete drm_gem_handle.\n");
++		return ret;
++	}
+ 
+ 	return 0;
+ }
+@@ -363,21 +702,20 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+ 	struct drm_gem_object *obj = vma->vm_private_data;
+-	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ 	struct drm_device *dev = obj->dev;
+-	unsigned long pfn;
++	unsigned long f_vaddr;
+ 	pgoff_t page_offset;
+ 	int ret;
+ 
+ 	page_offset = ((unsigned long)vmf->virtual_address -
+ 			vma->vm_start) >> PAGE_SHIFT;
++	f_vaddr = (unsigned long)vmf->virtual_address;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+ 
+-	pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+-			PAGE_SHIFT) + page_offset;
+-
+-	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
++	ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
++	if (ret < 0)
++		DRM_ERROR("failed to map pages.\n");
+ 
+ 	mutex_unlock(&dev->struct_mutex);
+ 
+@@ -402,29 +740,3 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+ 
+ 	return ret;
+ }
+-
+-
+-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+-		struct drm_device *dev, unsigned int handle)
+-{
+-	int ret;
+-
+-	DRM_DEBUG_KMS("%s\n", __FILE__);
+-
+-	/*
+-	 * obj->refcount and obj->handle_count are decreased and
+-	 * if both them are 0 then exynos_drm_gem_free_object()
+-	 * would be called by callback to release resources.
+-	 */
+-	ret = drm_gem_handle_delete(file_priv, handle);
+-	if (ret < 0) {
+-		DRM_ERROR("failed to delete drm_gem_handle.\n");
+-		return ret;
+-	}
+-
+-	return 0;
+-}
+-
+-MODULE_AUTHOR("Inki Dae <inki.dae at samsung.com>");
+-MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
+-MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
+index ef87973..4ed8420 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
++++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
+@@ -29,6 +29,8 @@
+ #define to_exynos_gem_obj(x)	container_of(x,\
+ 			struct exynos_drm_gem_obj, base)
+ 
++#define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
++
+ /*
+  * exynos drm gem buffer structure.
+  *
+@@ -36,11 +38,15 @@
+  * @dma_addr: bus address(accessed by dma) to allocated memory region.
+  *	- this address could be physical address without IOMMU and
+  *	device address with IOMMU.
++ * @sgt: sg table to transfer page data.
++ * @pages: contain all pages to allocated memory region.
+  * @size: size of allocated memory region.
+  */
+ struct exynos_drm_gem_buf {
+ 	void __iomem		*kvaddr;
+ 	dma_addr_t		dma_addr;
++	struct sg_table		*sgt;
++	struct page		**pages;
+ 	unsigned long		size;
+ };
+ 
+@@ -55,19 +61,26 @@ struct exynos_drm_gem_buf {
+  *	by user request or at framebuffer creation.
+  *	continuous memory region allocated by user request
+  *	or at framebuffer creation.
++ * @size: total memory size to physically non-continuous memory region.
++ * @flags: indicate memory type to allocated buffer and cache attruibute.
+  *
+  * P.S. this object would be transfered to user as kms_bo.handle so
+  *	user can access the buffer through kms_bo.handle.
+  */
+ struct exynos_drm_gem_obj {
+-	struct drm_gem_object base;
+-	struct exynos_drm_gem_buf *buffer;
++	struct drm_gem_object		base;
++	struct exynos_drm_gem_buf	*buffer;
++	unsigned long			size;
++	unsigned int			flags;
+ };
+ 
+-/* create a new buffer and get a new gem handle. */
++/* destroy a buffer with gem object */
++void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
++
++/* create a new buffer with gem object */
+ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+-		struct drm_file *file_priv,
+-		unsigned int *handle, unsigned long size);
++						unsigned int flags,
++						unsigned long size);
+ 
+ /*
+  * request gem object creation and buffer allocation as the size
+@@ -75,15 +88,36 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+  * height and bpp.
+  */
+ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
+-		struct drm_file *file_priv);
++				struct drm_file *file_priv);
++
++/*
++ * get dma address from gem handle and this function could be used for
++ * other drivers such as 2d/3d acceleration drivers.
++ * with this function call, gem object reference count would be increased.
++ */
++void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
++					unsigned int gem_handle,
++					struct drm_file *file_priv);
++
++/*
++ * put dma address from gem handle and this function could be used for
++ * other drivers such as 2d/3d acceleration drivers.
++ * with this function call, gem object reference count would be decreased.
++ */
++void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
++					unsigned int gem_handle,
++					struct drm_file *file_priv);
+ 
+ /* get buffer offset to map to user space. */
+ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+-		struct drm_file *file_priv);
++				    struct drm_file *file_priv);
+ 
+-/* unmap a buffer from user space. */
+-int exynos_drm_gem_munmap_ioctl(struct drm_device *dev, void *data,
+-		struct drm_file *file_priv);
++/*
++ * mmap the physically continuous memory that a gem object contains
++ * to user space.
++ */
++int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
++			      struct drm_file *file_priv);
+ 
+ /* initialize gem object. */
+ int exynos_drm_gem_init_object(struct drm_gem_object *obj);
+@@ -93,24 +127,13 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
+ 
+ /* create memory region for drm framebuffer. */
+ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+-		struct drm_device *dev, struct drm_mode_create_dumb *args);
++			       struct drm_device *dev,
++			       struct drm_mode_create_dumb *args);
+ 
+ /* map memory region for drm framebuffer to user space. */
+ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+-		struct drm_device *dev, uint32_t handle, uint64_t *offset);
+-
+-/* page fault handler and mmap fault address(virtual) to physical memory. */
+-int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+-
+-/*
+- * mmap the physically continuous memory that a gem object contains
+- * to user space.
+- */
+-int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+-		struct drm_file *file_priv);
+-
+-/* set vm_flags and we can change the vm attribute to other one at here. */
+-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
++				   struct drm_device *dev, uint32_t handle,
++				   uint64_t *offset);
+ 
+ /*
+  * destroy memory region allocated.
+@@ -118,6 +141,13 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+  *	would be released by drm_gem_handle_delete().
+  */
+ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+-		struct drm_device *dev, unsigned int handle);
++				struct drm_device *dev,
++				unsigned int handle);
++
++/* page fault handler and mmap fault address(virtual) to physical memory. */
++int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
++
++/* set vm_flags and we can change the vm attribute to other one at here. */
++int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+new file mode 100644
+index 0000000..3424463
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+@@ -0,0 +1,377 @@
++/*
++ * Copyright (C) 2011 Samsung Electronics Co.Ltd
++ * Authors:
++ *	Inki Dae <inki.dae at samsung.com>
++ *	Seung-Woo Kim <sw0312.kim at samsung.com>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include "drmP.h"
++
++#include <linux/kernel.h>
++#include <linux/wait.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
++
++#include <drm/exynos_drm.h>
++
++#include "exynos_drm_drv.h"
++#include "exynos_drm_hdmi.h"
++
++#define to_context(dev)		platform_get_drvdata(to_platform_device(dev))
++#define to_subdrv(dev)		to_context(dev)
++#define get_ctx_from_subdrv(subdrv)	container_of(subdrv,\
++					struct drm_hdmi_context, subdrv);
++
++/* these callback points shoud be set by specific drivers. */
++static struct exynos_hdmi_ops *hdmi_ops;
++static struct exynos_mixer_ops *mixer_ops;
++
++struct drm_hdmi_context {
++	struct exynos_drm_subdrv	subdrv;
++	struct exynos_drm_hdmi_context	*hdmi_ctx;
++	struct exynos_drm_hdmi_context	*mixer_ctx;
++};
++
++void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
++{
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (ops)
++		hdmi_ops = ops;
++}
++
++void exynos_mixer_ops_register(struct exynos_mixer_ops *ops)
++{
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (ops)
++		mixer_ops = ops;
++}
++
++static bool drm_hdmi_is_connected(struct device *dev)
++{
++	struct drm_hdmi_context *ctx = to_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (hdmi_ops && hdmi_ops->is_connected)
++		return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx);
++
++	return false;
++}
++
++static int drm_hdmi_get_edid(struct device *dev,
++		struct drm_connector *connector, u8 *edid, int len)
++{
++	struct drm_hdmi_context *ctx = to_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (hdmi_ops && hdmi_ops->get_edid)
++		return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
++					  len);
++
++	return 0;
++}
++
++static int drm_hdmi_check_timing(struct device *dev, void *timing)
++{
++	struct drm_hdmi_context *ctx = to_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (hdmi_ops && hdmi_ops->check_timing)
++		return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
++
++	return 0;
++}
++
++static int drm_hdmi_power_on(struct device *dev, int mode)
++{
++	struct drm_hdmi_context *ctx = to_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (hdmi_ops && hdmi_ops->power_on)
++		return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode);
++
++	return 0;
++}
++
++static struct exynos_drm_display_ops drm_hdmi_display_ops = {
++	.type = EXYNOS_DISPLAY_TYPE_HDMI,
++	.is_connected = drm_hdmi_is_connected,
++	.get_edid = drm_hdmi_get_edid,
++	.check_timing = drm_hdmi_check_timing,
++	.power_on = drm_hdmi_power_on,
++};
++
++static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
++{
++	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
++	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
++	struct exynos_drm_manager *manager = subdrv->manager;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (mixer_ops && mixer_ops->enable_vblank)
++		return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx,
++						manager->pipe);
++
++	return 0;
++}
++
++static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
++{
++	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (mixer_ops && mixer_ops->disable_vblank)
++		return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
++}
++
++static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
++				struct drm_connector *connector,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode)
++{
++	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (hdmi_ops && hdmi_ops->mode_fixup)
++		hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode,
++				     adjusted_mode);
++}
++
++static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
++{
++	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (hdmi_ops && hdmi_ops->mode_set)
++		hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
++}
++
++static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
++				unsigned int *width, unsigned int *height)
++{
++	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (hdmi_ops && hdmi_ops->get_max_resol)
++		hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height);
++}
++
++static void drm_hdmi_commit(struct device *subdrv_dev)
++{
++	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (hdmi_ops && hdmi_ops->commit)
++		hdmi_ops->commit(ctx->hdmi_ctx->ctx);
++}
++
++static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
++{
++	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++		break;
++	case DRM_MODE_DPMS_STANDBY:
++	case DRM_MODE_DPMS_SUSPEND:
++	case DRM_MODE_DPMS_OFF:
++		if (hdmi_ops && hdmi_ops->disable)
++			hdmi_ops->disable(ctx->hdmi_ctx->ctx);
++		break;
++	default:
++		DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
++		break;
++	}
++}
++
++static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
++	.dpms = drm_hdmi_dpms,
++	.enable_vblank = drm_hdmi_enable_vblank,
++	.disable_vblank = drm_hdmi_disable_vblank,
++	.mode_fixup = drm_hdmi_mode_fixup,
++	.mode_set = drm_hdmi_mode_set,
++	.get_max_resol = drm_hdmi_get_max_resol,
++	.commit = drm_hdmi_commit,
++};
++
++static void drm_mixer_mode_set(struct device *subdrv_dev,
++		struct exynos_drm_overlay *overlay)
++{
++	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (mixer_ops && mixer_ops->win_mode_set)
++		mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
++}
++
++static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
++{
++	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (mixer_ops && mixer_ops->win_commit)
++		mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
++}
++
++static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
++{
++	struct drm_hdmi_context *ctx = to_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (mixer_ops && mixer_ops->win_disable)
++		mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
++}
++
++static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
++	.mode_set = drm_mixer_mode_set,
++	.commit = drm_mixer_commit,
++	.disable = drm_mixer_disable,
++};
++
++static struct exynos_drm_manager hdmi_manager = {
++	.pipe		= -1,
++	.ops		= &drm_hdmi_manager_ops,
++	.overlay_ops	= &drm_hdmi_overlay_ops,
++	.display_ops	= &drm_hdmi_display_ops,
++};
++
++static int hdmi_subdrv_probe(struct drm_device *drm_dev,
++		struct device *dev)
++{
++	struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
++	struct drm_hdmi_context *ctx;
++	struct platform_device *pdev = to_platform_device(dev);
++	struct exynos_drm_common_hdmi_pd *pd;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	pd = pdev->dev.platform_data;
++
++	if (!pd) {
++		DRM_DEBUG_KMS("platform data is null.\n");
++		return -EFAULT;
++	}
++
++	if (!pd->hdmi_dev) {
++		DRM_DEBUG_KMS("hdmi device is null.\n");
++		return -EFAULT;
++	}
++
++	if (!pd->mixer_dev) {
++		DRM_DEBUG_KMS("mixer device is null.\n");
++		return -EFAULT;
++	}
++
++	ctx = get_ctx_from_subdrv(subdrv);
++
++	ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *)
++				to_context(pd->hdmi_dev);
++	if (!ctx->hdmi_ctx) {
++		DRM_DEBUG_KMS("hdmi context is null.\n");
++		return -EFAULT;
++	}
++
++	ctx->hdmi_ctx->drm_dev = drm_dev;
++
++	ctx->mixer_ctx = (struct exynos_drm_hdmi_context *)
++				to_context(pd->mixer_dev);
++	if (!ctx->mixer_ctx) {
++		DRM_DEBUG_KMS("mixer context is null.\n");
++		return -EFAULT;
++	}
++
++	ctx->mixer_ctx->drm_dev = drm_dev;
++
++	return 0;
++}
++
++static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct exynos_drm_subdrv *subdrv;
++	struct drm_hdmi_context *ctx;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++	if (!ctx) {
++		DRM_LOG_KMS("failed to alloc common hdmi context.\n");
++		return -ENOMEM;
++	}
++
++	subdrv = &ctx->subdrv;
++
++	subdrv->dev = dev;
++	subdrv->manager = &hdmi_manager;
++	subdrv->probe = hdmi_subdrv_probe;
++
++	platform_set_drvdata(pdev, subdrv);
++
++	exynos_drm_subdrv_register(subdrv);
++
++	return 0;
++}
++
++static int hdmi_runtime_suspend(struct device *dev)
++{
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	return 0;
++}
++
++static int hdmi_runtime_resume(struct device *dev)
++{
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	return 0;
++}
++
++static const struct dev_pm_ops hdmi_pm_ops = {
++	.runtime_suspend = hdmi_runtime_suspend,
++	.runtime_resume	 = hdmi_runtime_resume,
++};
++
++static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
++{
++	struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	exynos_drm_subdrv_unregister(&ctx->subdrv);
++	kfree(ctx);
++
++	return 0;
++}
++
++struct platform_driver exynos_drm_common_hdmi_driver = {
++	.probe		= exynos_drm_hdmi_probe,
++	.remove		= __devexit_p(exynos_drm_hdmi_remove),
++	.driver		= {
++		.name	= "exynos-drm-hdmi",
++		.owner	= THIS_MODULE,
++		.pm = &hdmi_pm_ops,
++	},
++};
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+new file mode 100644
+index 0000000..f3ae192
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+@@ -0,0 +1,73 @@
++/* exynos_drm_hdmi.h
++ *
++ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
++ * Authoer: Inki Dae <inki.dae at samsung.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _EXYNOS_DRM_HDMI_H_
++#define _EXYNOS_DRM_HDMI_H_
++
++/*
++ * exynos hdmi common context structure.
++ *
++ * @drm_dev: pointer to drm_device.
++ * @ctx: pointer to the context of specific device driver.
++ *	this context should be hdmi_context or mixer_context.
++ */
++struct exynos_drm_hdmi_context {
++	struct drm_device	*drm_dev;
++	void			*ctx;
++};
++
++struct exynos_hdmi_ops {
++	/* display */
++	bool (*is_connected)(void *ctx);
++	int (*get_edid)(void *ctx, struct drm_connector *connector,
++			u8 *edid, int len);
++	int (*check_timing)(void *ctx, void *timing);
++	int (*power_on)(void *ctx, int mode);
++
++	/* manager */
++	void (*mode_fixup)(void *ctx, struct drm_connector *connector,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode);
++	void (*mode_set)(void *ctx, void *mode);
++	void (*get_max_resol)(void *ctx, unsigned int *width,
++				unsigned int *height);
++	void (*commit)(void *ctx);
++	void (*disable)(void *ctx);
++};
++
++struct exynos_mixer_ops {
++	/* manager */
++	int (*enable_vblank)(void *ctx, int pipe);
++	void (*disable_vblank)(void *ctx);
++
++	/* overlay */
++	void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
++	void (*win_commit)(void *ctx, int zpos);
++	void (*win_disable)(void *ctx, int zpos);
++};
++
++void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops);
++void exynos_mixer_ops_register(struct exynos_mixer_ops *ops);
++#endif
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
+new file mode 100644
+index 0000000..f92fe4c
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
+@@ -0,0 +1,171 @@
++/*
++ * Copyright (C) 2011 Samsung Electronics Co.Ltd
++ * Authors: Joonyoung Shim <jy0922.shim at samsung.com>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include "drmP.h"
++
++#include "exynos_drm.h"
++#include "exynos_drm_crtc.h"
++#include "exynos_drm_drv.h"
++#include "exynos_drm_encoder.h"
++
++struct exynos_plane {
++	struct drm_plane		base;
++	struct exynos_drm_overlay	overlay;
++	bool				enabled;
++};
++
++static const uint32_t formats[] = {
++	DRM_FORMAT_XRGB8888,
++	DRM_FORMAT_ARGB8888,
++	DRM_FORMAT_NV12,
++	DRM_FORMAT_NV12M,
++	DRM_FORMAT_NV12MT,
++};
++
++static int
++exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
++		     struct drm_framebuffer *fb, int crtc_x, int crtc_y,
++		     unsigned int crtc_w, unsigned int crtc_h,
++		     uint32_t src_x, uint32_t src_y,
++		     uint32_t src_w, uint32_t src_h)
++{
++	struct exynos_plane *exynos_plane =
++		container_of(plane, struct exynos_plane, base);
++	struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
++	struct exynos_drm_crtc_pos pos;
++	unsigned int x = src_x >> 16;
++	unsigned int y = src_y >> 16;
++	int ret;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
++	pos.crtc_x = crtc_x;
++	pos.crtc_y = crtc_y;
++	pos.crtc_w = crtc_w;
++	pos.crtc_h = crtc_h;
++
++	pos.fb_x = x;
++	pos.fb_y = y;
++
++	/* TODO: scale feature */
++	ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
++	if (ret < 0)
++		return ret;
++
++	exynos_drm_fn_encoder(crtc, overlay,
++			exynos_drm_encoder_crtc_mode_set);
++	exynos_drm_fn_encoder(crtc, &overlay->zpos,
++			exynos_drm_encoder_crtc_plane_commit);
++
++	exynos_plane->enabled = true;
++
++	return 0;
++}
++
++static int exynos_disable_plane(struct drm_plane *plane)
++{
++	struct exynos_plane *exynos_plane =
++		container_of(plane, struct exynos_plane, base);
++	struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	if (!exynos_plane->enabled)
++		return 0;
++
++	exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
++			exynos_drm_encoder_crtc_disable);
++
++	exynos_plane->enabled = false;
++	exynos_plane->overlay.zpos = DEFAULT_ZPOS;
++
++	return 0;
++}
++
++static void exynos_plane_destroy(struct drm_plane *plane)
++{
++	struct exynos_plane *exynos_plane =
++		container_of(plane, struct exynos_plane, base);
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	exynos_disable_plane(plane);
++	drm_plane_cleanup(plane);
++	kfree(exynos_plane);
++}
++
++static struct drm_plane_funcs exynos_plane_funcs = {
++	.update_plane	= exynos_update_plane,
++	.disable_plane	= exynos_disable_plane,
++	.destroy	= exynos_plane_destroy,
++};
++
++int exynos_plane_init(struct drm_device *dev, unsigned int nr)
++{
++	struct exynos_plane *exynos_plane;
++	uint32_t possible_crtcs;
++
++	exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
++	if (!exynos_plane)
++		return -ENOMEM;
++
++	/* all CRTCs are available */
++	possible_crtcs = (1 << MAX_CRTC) - 1;
++
++	exynos_plane->overlay.zpos = DEFAULT_ZPOS;
++
++	return drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
++			      &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
++			      false);
++}
++
++int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
++				struct drm_file *file_priv)
++{
++	struct drm_exynos_plane_set_zpos *zpos_req = data;
++	struct drm_mode_object *obj;
++	struct drm_plane *plane;
++	struct exynos_plane *exynos_plane;
++	int ret = 0;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) {
++		if (zpos_req->zpos != DEFAULT_ZPOS) {
++			DRM_ERROR("zpos not within limits\n");
++			return -EINVAL;
++		}
++	}
++
++	mutex_lock(&dev->mode_config.mutex);
++
++	obj = drm_mode_object_find(dev, zpos_req->plane_id,
++			DRM_MODE_OBJECT_PLANE);
++	if (!obj) {
++		DRM_DEBUG_KMS("Unknown plane ID %d\n",
++			      zpos_req->plane_id);
++		ret = -EINVAL;
++		goto out;
++	}
++
++	plane = obj_to_plane(obj);
++	exynos_plane = container_of(plane, struct exynos_plane, base);
++
++	exynos_plane->overlay.zpos = zpos_req->zpos;
++
++out:
++	mutex_unlock(&dev->mode_config.mutex);
++	return ret;
++}
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
+new file mode 100644
+index 0000000..16b71f8
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
+@@ -0,0 +1,14 @@
++/*
++ * Copyright (C) 2011 Samsung Electronics Co.Ltd
++ * Authors: Joonyoung Shim <jy0922.shim at samsung.com>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ *
++ */
++
++int exynos_plane_init(struct drm_device *dev, unsigned int nr);
++int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
++				struct drm_file *file_priv);
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+new file mode 100644
+index 0000000..7b9c153
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+@@ -0,0 +1,680 @@
++/* exynos_drm_vidi.c
++ *
++ * Copyright (C) 2012 Samsung Electronics Co.Ltd
++ * Authors:
++ *	Inki Dae <inki.dae at samsung.com>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ *
++ */
++#include "drmP.h"
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++
++#include <drm/exynos_drm.h>
++
++#include "drm_edid.h"
++#include "drm_crtc_helper.h"
++
++#include "exynos_drm_drv.h"
++#include "exynos_drm_crtc.h"
++#include "exynos_drm_encoder.h"
++
++/* vidi has totally three virtual windows. */
++#define WINDOWS_NR		3
++
++#define get_vidi_context(dev)	platform_get_drvdata(to_platform_device(dev))
++
++struct vidi_win_data {
++	unsigned int		offset_x;
++	unsigned int		offset_y;
++	unsigned int		ovl_width;
++	unsigned int		ovl_height;
++	unsigned int		fb_width;
++	unsigned int		fb_height;
++	unsigned int		bpp;
++	dma_addr_t		dma_addr;
++	void __iomem		*vaddr;
++	unsigned int		buf_offsize;
++	unsigned int		line_size;	/* bytes */
++	bool			enabled;
++};
++
++struct vidi_context {
++	struct exynos_drm_subdrv	subdrv;
++	struct drm_crtc			*crtc;
++	struct vidi_win_data		win_data[WINDOWS_NR];
++	struct edid			*raw_edid;
++	unsigned int			clkdiv;
++	unsigned int			default_win;
++	unsigned long			irq_flags;
++	unsigned int			connected;
++	bool				vblank_on;
++	bool				suspended;
++	struct work_struct		work;
++	struct mutex			lock;
++};
++
++static const char fake_edid_info[] = {
++	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05,
++	0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
++	0x0a, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0xbd,
++	0xee, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
++	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x66, 0x21, 0x50, 0xb0, 0x51, 0x00,
++	0x1b, 0x30, 0x40, 0x70, 0x36, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e,
++	0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00,
++	0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
++	0x4b, 0x1a, 0x44, 0x17, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
++	0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x41, 0x4d, 0x53, 0x55, 0x4e, 0x47,
++	0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xbc, 0x02, 0x03, 0x1e, 0xf1,
++	0x46, 0x84, 0x05, 0x03, 0x10, 0x20, 0x22, 0x23, 0x09, 0x07, 0x07, 0x83,
++	0x01, 0x00, 0x00, 0xe2, 0x00, 0x0f, 0x67, 0x03, 0x0c, 0x00, 0x10, 0x00,
++	0xb8, 0x2d, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c,
++	0x25, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x9e, 0x8c, 0x0a, 0xd0, 0x8a,
++	0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00, 0xa0, 0x5a, 0x00, 0x00,
++	0x00, 0x18, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
++	0x45, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00,
++	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++	0x00, 0x00, 0x00, 0x06
++};
++
++static void vidi_fake_vblank_handler(struct work_struct *work);
++
++static bool vidi_display_is_connected(struct device *dev)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	/*
++	 * connection request would come from user side
++	 * to do hotplug through specific ioctl.
++	 */
++	return ctx->connected ? true : false;
++}
++
++static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
++				u8 *edid, int len)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++	struct edid *raw_edid;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	/*
++	 * the edid data comes from user side and it would be set
++	 * to ctx->raw_edid through specific ioctl.
++	 */
++	if (!ctx->raw_edid) {
++		DRM_DEBUG_KMS("raw_edid is null.\n");
++		return -EFAULT;
++	}
++
++	raw_edid = kzalloc(len, GFP_KERNEL);
++	if (!raw_edid) {
++		DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
++		return -ENOMEM;
++	}
++
++	memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
++						* EDID_LENGTH, len));
++
++	/* attach the edid data to connector. */
++	connector->display_info.raw_edid = (char *)raw_edid;
++
++	memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
++					* EDID_LENGTH, len));
++
++	return 0;
++}
++
++static void *vidi_get_panel(struct device *dev)
++{
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	/* TODO. */
++
++	return NULL;
++}
++
++static int vidi_check_timing(struct device *dev, void *timing)
++{
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	/* TODO. */
++
++	return 0;
++}
++
++static int vidi_display_power_on(struct device *dev, int mode)
++{
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	/* TODO */
++
++	return 0;
++}
++
++static struct exynos_drm_display_ops vidi_display_ops = {
++	.type = EXYNOS_DISPLAY_TYPE_VIDI,
++	.is_connected = vidi_display_is_connected,
++	.get_edid = vidi_get_edid,
++	.get_panel = vidi_get_panel,
++	.check_timing = vidi_check_timing,
++	.power_on = vidi_display_power_on,
++};
++
++static void vidi_dpms(struct device *subdrv_dev, int mode)
++{
++	struct vidi_context *ctx = get_vidi_context(subdrv_dev);
++
++	DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
++
++	mutex_lock(&ctx->lock);
++
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++		/* TODO. */
++		break;
++	case DRM_MODE_DPMS_STANDBY:
++	case DRM_MODE_DPMS_SUSPEND:
++	case DRM_MODE_DPMS_OFF:
++		/* TODO. */
++		break;
++	default:
++		DRM_DEBUG_KMS("unspecified mode %d\n", mode);
++		break;
++	}
++
++	mutex_unlock(&ctx->lock);
++}
++
++static void vidi_apply(struct device *subdrv_dev)
++{
++	struct vidi_context *ctx = get_vidi_context(subdrv_dev);
++	struct exynos_drm_manager *mgr = ctx->subdrv.manager;
++	struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
++	struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
++	struct vidi_win_data *win_data;
++	int i;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	for (i = 0; i < WINDOWS_NR; i++) {
++		win_data = &ctx->win_data[i];
++		if (win_data->enabled && (ovl_ops && ovl_ops->commit))
++			ovl_ops->commit(subdrv_dev, i);
++	}
++
++	if (mgr_ops && mgr_ops->commit)
++		mgr_ops->commit(subdrv_dev);
++}
++
++static void vidi_commit(struct device *dev)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (ctx->suspended)
++		return;
++}
++
++static int vidi_enable_vblank(struct device *dev)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (ctx->suspended)
++		return -EPERM;
++
++	if (!test_and_set_bit(0, &ctx->irq_flags))
++		ctx->vblank_on = true;
++
++	return 0;
++}
++
++static void vidi_disable_vblank(struct device *dev)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (ctx->suspended)
++		return;
++
++	if (test_and_clear_bit(0, &ctx->irq_flags))
++		ctx->vblank_on = false;
++}
++
++static struct exynos_drm_manager_ops vidi_manager_ops = {
++	.dpms = vidi_dpms,
++	.apply = vidi_apply,
++	.commit = vidi_commit,
++	.enable_vblank = vidi_enable_vblank,
++	.disable_vblank = vidi_disable_vblank,
++};
++
++static void vidi_win_mode_set(struct device *dev,
++			      struct exynos_drm_overlay *overlay)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++	struct vidi_win_data *win_data;
++	int win;
++	unsigned long offset;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (!overlay) {
++		dev_err(dev, "overlay is NULL\n");
++		return;
++	}
++
++	win = overlay->zpos;
++	if (win == DEFAULT_ZPOS)
++		win = ctx->default_win;
++
++	if (win < 0 || win > WINDOWS_NR)
++		return;
++
++	offset = overlay->fb_x * (overlay->bpp >> 3);
++	offset += overlay->fb_y * overlay->pitch;
++
++	DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
++
++	win_data = &ctx->win_data[win];
++
++	win_data->offset_x = overlay->crtc_x;
++	win_data->offset_y = overlay->crtc_y;
++	win_data->ovl_width = overlay->crtc_width;
++	win_data->ovl_height = overlay->crtc_height;
++	win_data->fb_width = overlay->fb_width;
++	win_data->fb_height = overlay->fb_height;
++	win_data->dma_addr = overlay->dma_addr[0] + offset;
++	win_data->vaddr = overlay->vaddr[0] + offset;
++	win_data->bpp = overlay->bpp;
++	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
++				(overlay->bpp >> 3);
++	win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
++
++	/*
++	 * some parts of win_data should be transferred to user side
++	 * through specific ioctl.
++	 */
++
++	DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
++			win_data->offset_x, win_data->offset_y);
++	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
++			win_data->ovl_width, win_data->ovl_height);
++	DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
++			(unsigned long)win_data->dma_addr,
++			(unsigned long)win_data->vaddr);
++	DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
++			overlay->fb_width, overlay->crtc_width);
++}
++
++static void vidi_win_commit(struct device *dev, int zpos)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++	struct vidi_win_data *win_data;
++	int win = zpos;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (ctx->suspended)
++		return;
++
++	if (win == DEFAULT_ZPOS)
++		win = ctx->default_win;
++
++	if (win < 0 || win > WINDOWS_NR)
++		return;
++
++	win_data = &ctx->win_data[win];
++
++	win_data->enabled = true;
++
++	DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr);
++
++	if (ctx->vblank_on)
++		schedule_work(&ctx->work);
++}
++
++static void vidi_win_disable(struct device *dev, int zpos)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++	struct vidi_win_data *win_data;
++	int win = zpos;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (win == DEFAULT_ZPOS)
++		win = ctx->default_win;
++
++	if (win < 0 || win > WINDOWS_NR)
++		return;
++
++	win_data = &ctx->win_data[win];
++	win_data->enabled = false;
++
++	/* TODO. */
++}
++
++static struct exynos_drm_overlay_ops vidi_overlay_ops = {
++	.mode_set = vidi_win_mode_set,
++	.commit = vidi_win_commit,
++	.disable = vidi_win_disable,
++};
++
++static struct exynos_drm_manager vidi_manager = {
++	.pipe		= -1,
++	.ops		= &vidi_manager_ops,
++	.overlay_ops	= &vidi_overlay_ops,
++	.display_ops	= &vidi_display_ops,
++};
++
++static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
++{
++	struct exynos_drm_private *dev_priv = drm_dev->dev_private;
++	struct drm_pending_vblank_event *e, *t;
++	struct timeval now;
++	unsigned long flags;
++	bool is_checked = false;
++
++	spin_lock_irqsave(&drm_dev->event_lock, flags);
++
++	list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
++			base.link) {
++		/* if event's pipe isn't same as crtc then ignore it. */
++		if (crtc != e->pipe)
++			continue;
++
++		is_checked = true;
++
++		do_gettimeofday(&now);
++		e->event.sequence = 0;
++		e->event.tv_sec = now.tv_sec;
++		e->event.tv_usec = now.tv_usec;
++
++		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
++		wake_up_interruptible(&e->base.file_priv->event_wait);
++	}
++
++	if (is_checked) {
++		/*
++		 * call drm_vblank_put only in case that drm_vblank_get was
++		 * called.
++		 */
++		if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
++			drm_vblank_put(drm_dev, crtc);
++
++		/*
++		 * don't off vblank if vblank_disable_allowed is 1,
++		 * because vblank would be off by timer handler.
++		 */
++		if (!drm_dev->vblank_disable_allowed)
++			drm_vblank_off(drm_dev, crtc);
++	}
++
++	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
++}
++
++static void vidi_fake_vblank_handler(struct work_struct *work)
++{
++	struct vidi_context *ctx = container_of(work, struct vidi_context,
++					work);
++	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
++	struct exynos_drm_manager *manager = subdrv->manager;
++
++	if (manager->pipe < 0)
++		return;
++
++	/* refresh rate is about 50Hz. */
++	usleep_range(16000, 20000);
++
++	drm_handle_vblank(subdrv->drm_dev, manager->pipe);
++	vidi_finish_pageflip(subdrv->drm_dev, manager->pipe);
++}
++
++static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
++{
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	/*
++	 * enable drm irq mode.
++	 * - with irq_enabled = 1, we can use the vblank feature.
++	 *
++	 * P.S. note that we wouldn't use drm irq handler but
++	 *	just specific driver own one instead because
++	 *	drm framework supports only one irq handler.
++	 */
++	drm_dev->irq_enabled = 1;
++
++	/*
++	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
++	 * by drm timer once a current process gives up ownership of
++	 * vblank event.(after drm_vblank_put function is called)
++	 */
++	drm_dev->vblank_disable_allowed = 1;
++
++	return 0;
++}
++
++static void vidi_subdrv_remove(struct drm_device *drm_dev)
++{
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	/* TODO. */
++}
++
++static int vidi_power_on(struct vidi_context *ctx, bool enable)
++{
++	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
++	struct device *dev = subdrv->dev;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (enable != false && enable != true)
++		return -EINVAL;
++
++	if (enable) {
++		ctx->suspended = false;
++
++		/* if vblank was enabled status, enable it again. */
++		if (test_and_clear_bit(0, &ctx->irq_flags))
++			vidi_enable_vblank(dev);
++
++		vidi_apply(dev);
++	} else {
++		ctx->suspended = true;
++	}
++
++	return 0;
++}
++
++static int vidi_show_connection(struct device *dev,
++				struct device_attribute *attr, char *buf)
++{
++	int rc;
++	struct vidi_context *ctx = get_vidi_context(dev);
++
++	mutex_lock(&ctx->lock);
++
++	rc = sprintf(buf, "%d\n", ctx->connected);
++
++	mutex_unlock(&ctx->lock);
++
++	return rc;
++}
++
++static int vidi_store_connection(struct device *dev,
++				struct device_attribute *attr,
++				const char *buf, size_t len)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++	int ret;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	ret = kstrtoint(buf, 0, &ctx->connected);
++	if (ret)
++		return ret;
++
++	if (ctx->connected > 1)
++		return -EINVAL;
++
++	DRM_DEBUG_KMS("requested connection.\n");
++
++	drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
++
++	return len;
++}
++
++static DEVICE_ATTR(connection, 0644, vidi_show_connection,
++			vidi_store_connection);
++
++int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
++				struct drm_file *file_priv)
++{
++	struct vidi_context *ctx = NULL;
++	struct drm_encoder *encoder;
++	struct exynos_drm_manager *manager;
++	struct exynos_drm_display_ops *display_ops;
++	struct drm_exynos_vidi_connection *vidi = data;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	if (!vidi) {
++		DRM_DEBUG_KMS("user data for vidi is null.\n");
++		return -EINVAL;
++	}
++
++	if (!vidi->edid) {
++		DRM_DEBUG_KMS("edid data is null.\n");
++		return -EINVAL;
++	}
++
++	if (vidi->connection > 1) {
++		DRM_DEBUG_KMS("connection should be 0 or 1.\n");
++		return -EINVAL;
++	}
++
++	list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
++								head) {
++		manager = exynos_drm_get_manager(encoder);
++		display_ops = manager->display_ops;
++
++		if (display_ops->type == EXYNOS_DISPLAY_TYPE_VIDI) {
++			ctx = get_vidi_context(manager->dev);
++			break;
++		}
++	}
++
++	if (!ctx) {
++		DRM_DEBUG_KMS("not found virtual device type encoder.\n");
++		return -EINVAL;
++	}
++
++	if (ctx->connected == vidi->connection) {
++		DRM_DEBUG_KMS("same connection request.\n");
++		return -EINVAL;
++	}
++
++	if (vidi->connection)
++		ctx->raw_edid = (struct edid *)vidi->edid;
++
++	ctx->connected = vidi->connection;
++	drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
++
++	return 0;
++}
++
++static int __devinit vidi_probe(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct vidi_context *ctx;
++	struct exynos_drm_subdrv *subdrv;
++	int ret;
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++	if (!ctx)
++		return -ENOMEM;
++
++	ctx->default_win = 0;
++
++	INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
++
++	/* for test */
++	ctx->raw_edid = (struct edid *)fake_edid_info;
++
++	subdrv = &ctx->subdrv;
++	subdrv->dev = dev;
++	subdrv->manager = &vidi_manager;
++	subdrv->probe = vidi_subdrv_probe;
++	subdrv->remove = vidi_subdrv_remove;
++
++	mutex_init(&ctx->lock);
++
++	platform_set_drvdata(pdev, ctx);
++
++	ret = device_create_file(&pdev->dev, &dev_attr_connection);
++	if (ret < 0)
++		DRM_INFO("failed to create connection sysfs.\n");
++
++	exynos_drm_subdrv_register(subdrv);
++
++	return 0;
++}
++
++static int __devexit vidi_remove(struct platform_device *pdev)
++{
++	struct vidi_context *ctx = platform_get_drvdata(pdev);
++
++	DRM_DEBUG_KMS("%s\n", __FILE__);
++
++	exynos_drm_subdrv_unregister(&ctx->subdrv);
++
++	kfree(ctx);
++
++	return 0;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int vidi_suspend(struct device *dev)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++
++	return vidi_power_on(ctx, false);
++}
++
++static int vidi_resume(struct device *dev)
++{
++	struct vidi_context *ctx = get_vidi_context(dev);
++
++	return vidi_power_on(ctx, true);
++}
++#endif
++
++static const struct dev_pm_ops vidi_pm_ops = {
++	SET_SYSTEM_SLEEP_PM_OPS(vidi_suspend, vidi_resume)
++};
++
++struct platform_driver vidi_driver = {
++	.probe		= vidi_probe,
++	.remove		= __devexit_p(vidi_remove),
++	.driver		= {
++		.name	= "exynos-drm-vidi",
++		.owner	= THIS_MODULE,
++		.pm	= &vidi_pm_ops,
++	},
++};
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.h b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
+new file mode 100644
+index 0000000..a4babe4
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
+@@ -0,0 +1,36 @@
++/* exynos_drm_vidi.h
++ *
++ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
++ * Author: Inki Dae <inki.dae at samsung.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _EXYNOS_DRM_VIDI_H_
++#define _EXYNOS_DRM_VIDI_H_
++
++#ifdef CONFIG_DRM_EXYNOS_VIDI
++int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
++				struct drm_file *file_priv);
++#else
++#define vidi_connection_ioctl	NULL
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
+new file mode 100644
+index 0000000..b003538
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
+@@ -0,0 +1,2389 @@
++/*
++ * Copyright (C) 2011 Samsung Electronics Co.Ltd
++ * Authors:
++ * Seung-Woo Kim <sw0312.kim at samsung.com>
++ *	Inki Dae <inki.dae at samsung.com>
++ *	Joonyoung Shim <jy0922.shim at samsung.com>
++ *
++ * Based on drivers/media/video/s5p-tv/hdmi_drv.c
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include "drmP.h"
++#include "drm_edid.h"
++#include "drm_crtc_helper.h"
++
++#include "regs-hdmi.h"
++
++#include <linux/kernel.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include <linux/i2c.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/delay.h>
++#include <linux/pm_runtime.h>
++#include <linux/clk.h>
++#include <linux/regulator/consumer.h>
++
++#include <drm/exynos_drm.h>
++
++#include "exynos_drm_drv.h"
++#include "exynos_drm_hdmi.h"
++
++#include "exynos_hdmi.h"
++
++#define MAX_WIDTH		1920
++#define MAX_HEIGHT		1080
++#define get_hdmi_context(dev)	platform_get_drvdata(to_platform_device(dev))
++
++struct hdmi_resources {
++	struct clk			*hdmi;
++	struct clk			*sclk_hdmi;
++	struct clk			*sclk_pixel;
++	struct clk			*sclk_hdmiphy;
++	struct clk			*hdmiphy;
++	struct regulator_bulk_data	*regul_bulk;
++	int				regul_count;
++};
++
++struct hdmi_context {
++	struct device			*dev;
++	struct drm_device		*drm_dev;
++	struct fb_videomode		*default_timing;
++	unsigned int			is_v13:1;
++	unsigned int			default_win;
++	unsigned int			default_bpp;
++	bool				hpd_handle;
++	bool				enabled;
++
++	struct resource			*regs_res;
++	void __iomem			*regs;
++	unsigned int			irq;
++	struct workqueue_struct		*wq;
++	struct work_struct		hotplug_work;
++
++	struct i2c_client		*ddc_port;
++	struct i2c_client		*hdmiphy_port;
++
++	/* current hdmiphy conf index */
++	int cur_conf;
++
++	struct hdmi_resources		res;
++	void				*parent_ctx;
++};
++
++/* HDMI Version 1.3 */
++static const u8 hdmiphy_v13_conf27[32] = {
++	0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
++	0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
++	0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
++	0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
++};
++
++static const u8 hdmiphy_v13_conf27_027[32] = {
++	0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
++	0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
++	0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
++	0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
++};
++
++static const u8 hdmiphy_v13_conf74_175[32] = {
++	0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
++	0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
++	0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
++	0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
++};
++
++static const u8 hdmiphy_v13_conf74_25[32] = {
++	0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
++	0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
++	0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
++	0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
++};
++
++static const u8 hdmiphy_v13_conf148_5[32] = {
++	0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
++	0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
++	0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
++	0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
++};
++
++struct hdmi_v13_tg_regs {
++	u8 cmd;
++	u8 h_fsz_l;
++	u8 h_fsz_h;
++	u8 hact_st_l;
++	u8 hact_st_h;
++	u8 hact_sz_l;
++	u8 hact_sz_h;
++	u8 v_fsz_l;
++	u8 v_fsz_h;
++	u8 vsync_l;
++	u8 vsync_h;
++	u8 vsync2_l;
++	u8 vsync2_h;
++	u8 vact_st_l;
++	u8 vact_st_h;
++	u8 vact_sz_l;
++	u8 vact_sz_h;
++	u8 field_chg_l;
++	u8 field_chg_h;
++	u8 vact_st2_l;
++	u8 vact_st2_h;
++	u8 vsync_top_hdmi_l;
++	u8 vsync_top_hdmi_h;
++	u8 vsync_bot_hdmi_l;
++	u8 vsync_bot_hdmi_h;
++	u8 field_top_hdmi_l;
++	u8 field_top_hdmi_h;
++	u8 field_bot_hdmi_l;
++	u8 field_bot_hdmi_h;
++};
++
++struct hdmi_v13_core_regs {
++	u8 h_blank[2];
++	u8 v_blank[3];
++	u8 h_v_line[3];
++	u8 vsync_pol[1];
++	u8 int_pro_mode[1];
++	u8 v_blank_f[3];
++	u8 h_sync_gen[3];
++	u8 v_sync_gen1[3];
++	u8 v_sync_gen2[3];
++	u8 v_sync_gen3[3];
++};
++
++struct hdmi_v13_preset_conf {
++	struct hdmi_v13_core_regs core;
++	struct hdmi_v13_tg_regs tg;
++};
++
++struct hdmi_v13_conf {
++	int width;
++	int height;
++	int vrefresh;
++	bool interlace;
++	const u8 *hdmiphy_data;
++	const struct hdmi_v13_preset_conf *conf;
++};
++
++static const struct hdmi_v13_preset_conf hdmi_v13_conf_480p = {
++	.core = {
++		.h_blank = {0x8a, 0x00},
++		.v_blank = {0x0d, 0x6a, 0x01},
++		.h_v_line = {0x0d, 0xa2, 0x35},
++		.vsync_pol = {0x01},
++		.int_pro_mode = {0x00},
++		.v_blank_f = {0x00, 0x00, 0x00},
++		.h_sync_gen = {0x0e, 0x30, 0x11},
++		.v_sync_gen1 = {0x0f, 0x90, 0x00},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x5a, 0x03, /* h_fsz */
++		0x8a, 0x00, 0xd0, 0x02, /* hact */
++		0x0d, 0x02, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x2d, 0x00, 0xe0, 0x01, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x49, 0x02, /* vact_st2 */
++		0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++	},
++};
++
++static const struct hdmi_v13_preset_conf hdmi_v13_conf_720p60 = {
++	.core = {
++		.h_blank = {0x72, 0x01},
++		.v_blank = {0xee, 0xf2, 0x00},
++		.h_v_line = {0xee, 0x22, 0x67},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x00},
++		.v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
++		.h_sync_gen = {0x6c, 0x50, 0x02},
++		.v_sync_gen1 = {0x0a, 0x50, 0x00},
++		.v_sync_gen2 = {0x01, 0x10, 0x00},
++		.v_sync_gen3 = {0x01, 0x10, 0x00},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x72, 0x06, /* h_fsz */
++		0x71, 0x01, 0x01, 0x05, /* hact */
++		0xee, 0x02, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x1e, 0x00, 0xd0, 0x02, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x49, 0x02, /* vact_st2 */
++		0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++	},
++};
++
++static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i50 = {
++	.core = {
++		.h_blank = {0xd0, 0x02},
++		.v_blank = {0x32, 0xB2, 0x00},
++		.h_v_line = {0x65, 0x04, 0xa5},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x01},
++		.v_blank_f = {0x49, 0x2A, 0x23},
++		.h_sync_gen = {0x0E, 0xEA, 0x08},
++		.v_sync_gen1 = {0x07, 0x20, 0x00},
++		.v_sync_gen2 = {0x39, 0x42, 0x23},
++		.v_sync_gen3 = {0x38, 0x87, 0x73},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x50, 0x0A, /* h_fsz */
++		0xCF, 0x02, 0x81, 0x07, /* hact */
++		0x65, 0x04, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x16, 0x00, 0x1c, 0x02, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x49, 0x02, /* vact_st2 */
++		0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++	},
++};
++
++static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p50 = {
++	.core = {
++		.h_blank = {0xd0, 0x02},
++		.v_blank = {0x65, 0x6c, 0x01},
++		.h_v_line = {0x65, 0x04, 0xa5},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x00},
++		.v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
++		.h_sync_gen = {0x0e, 0xea, 0x08},
++		.v_sync_gen1 = {0x09, 0x40, 0x00},
++		.v_sync_gen2 = {0x01, 0x10, 0x00},
++		.v_sync_gen3 = {0x01, 0x10, 0x00},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x50, 0x0A, /* h_fsz */
++		0xCF, 0x02, 0x81, 0x07, /* hact */
++		0x65, 0x04, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x2d, 0x00, 0x38, 0x04, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x48, 0x02, /* vact_st2 */
++		0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++	},
++};
++
++static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080i60 = {
++	.core = {
++		.h_blank = {0x18, 0x01},
++		.v_blank = {0x32, 0xB2, 0x00},
++		.h_v_line = {0x65, 0x84, 0x89},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x01},
++		.v_blank_f = {0x49, 0x2A, 0x23},
++		.h_sync_gen = {0x56, 0x08, 0x02},
++		.v_sync_gen1 = {0x07, 0x20, 0x00},
++		.v_sync_gen2 = {0x39, 0x42, 0x23},
++		.v_sync_gen3 = {0xa4, 0x44, 0x4a},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x98, 0x08, /* h_fsz */
++		0x17, 0x01, 0x81, 0x07, /* hact */
++		0x65, 0x04, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x16, 0x00, 0x1c, 0x02, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x49, 0x02, /* vact_st2 */
++		0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++	},
++};
++
++static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
++	.core = {
++		.h_blank = {0x18, 0x01},
++		.v_blank = {0x65, 0x6c, 0x01},
++		.h_v_line = {0x65, 0x84, 0x89},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x00},
++		.v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
++		.h_sync_gen = {0x56, 0x08, 0x02},
++		.v_sync_gen1 = {0x09, 0x40, 0x00},
++		.v_sync_gen2 = {0x01, 0x10, 0x00},
++		.v_sync_gen3 = {0x01, 0x10, 0x00},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x98, 0x08, /* h_fsz */
++		0x17, 0x01, 0x81, 0x07, /* hact */
++		0x65, 0x04, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x2d, 0x00, 0x38, 0x04, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x48, 0x02, /* vact_st2 */
++		0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++	},
++};
++
++static const struct hdmi_v13_conf hdmi_v13_confs[] = {
++	{ 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
++	{ 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 },
++	{ 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p },
++	{ 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 },
++	{ 1920, 1080, 50, false, hdmiphy_v13_conf148_5,
++				 &hdmi_v13_conf_1080p50 },
++	{ 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 },
++	{ 1920, 1080, 60, false, hdmiphy_v13_conf148_5,
++				 &hdmi_v13_conf_1080p60 },
++};
++
++/* HDMI Version 1.4 */
++static const u8 hdmiphy_conf27_027[32] = {
++	0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
++	0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
++	0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
++	0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
++};
++
++static const u8 hdmiphy_conf74_25[32] = {
++	0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
++	0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
++	0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
++	0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
++};
++
++static const u8 hdmiphy_conf148_5[32] = {
++	0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
++	0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
++	0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
++	0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
++};
++
++struct hdmi_tg_regs {
++	u8 cmd;
++	u8 h_fsz_l;
++	u8 h_fsz_h;
++	u8 hact_st_l;
++	u8 hact_st_h;
++	u8 hact_sz_l;
++	u8 hact_sz_h;
++	u8 v_fsz_l;
++	u8 v_fsz_h;
++	u8 vsync_l;
++	u8 vsync_h;
++	u8 vsync2_l;
++	u8 vsync2_h;
++	u8 vact_st_l;
++	u8 vact_st_h;
++	u8 vact_sz_l;
++	u8 vact_sz_h;
++	u8 field_chg_l;
++	u8 field_chg_h;
++	u8 vact_st2_l;
++	u8 vact_st2_h;
++	u8 vact_st3_l;
++	u8 vact_st3_h;
++	u8 vact_st4_l;
++	u8 vact_st4_h;
++	u8 vsync_top_hdmi_l;
++	u8 vsync_top_hdmi_h;
++	u8 vsync_bot_hdmi_l;
++	u8 vsync_bot_hdmi_h;
++	u8 field_top_hdmi_l;
++	u8 field_top_hdmi_h;
++	u8 field_bot_hdmi_l;
++	u8 field_bot_hdmi_h;
++	u8 tg_3d;
++};
++
++struct hdmi_core_regs {
++	u8 h_blank[2];
++	u8 v2_blank[2];
++	u8 v1_blank[2];
++	u8 v_line[2];
++	u8 h_line[2];
++	u8 hsync_pol[1];
++	u8 vsync_pol[1];
++	u8 int_pro_mode[1];
++	u8 v_blank_f0[2];
++	u8 v_blank_f1[2];
++	u8 h_sync_start[2];
++	u8 h_sync_end[2];
++	u8 v_sync_line_bef_2[2];
++	u8 v_sync_line_bef_1[2];
++	u8 v_sync_line_aft_2[2];
++	u8 v_sync_line_aft_1[2];
++	u8 v_sync_line_aft_pxl_2[2];
++	u8 v_sync_line_aft_pxl_1[2];
++	u8 v_blank_f2[2]; /* for 3D mode */
++	u8 v_blank_f3[2]; /* for 3D mode */
++	u8 v_blank_f4[2]; /* for 3D mode */
++	u8 v_blank_f5[2]; /* for 3D mode */
++	u8 v_sync_line_aft_3[2];
++	u8 v_sync_line_aft_4[2];
++	u8 v_sync_line_aft_5[2];
++	u8 v_sync_line_aft_6[2];
++	u8 v_sync_line_aft_pxl_3[2];
++	u8 v_sync_line_aft_pxl_4[2];
++	u8 v_sync_line_aft_pxl_5[2];
++	u8 v_sync_line_aft_pxl_6[2];
++	u8 vact_space_1[2];
++	u8 vact_space_2[2];
++	u8 vact_space_3[2];
++	u8 vact_space_4[2];
++	u8 vact_space_5[2];
++	u8 vact_space_6[2];
++};
++
++struct hdmi_preset_conf {
++	struct hdmi_core_regs core;
++	struct hdmi_tg_regs tg;
++};
++
++struct hdmi_conf {
++	int width;
++	int height;
++	int vrefresh;
++	bool interlace;
++	const u8 *hdmiphy_data;
++	const struct hdmi_preset_conf *conf;
++};
++
++static const struct hdmi_preset_conf hdmi_conf_480p60 = {
++	.core = {
++		.h_blank = {0x8a, 0x00},
++		.v2_blank = {0x0d, 0x02},
++		.v1_blank = {0x2d, 0x00},
++		.v_line = {0x0d, 0x02},
++		.h_line = {0x5a, 0x03},
++		.hsync_pol = {0x01},
++		.vsync_pol = {0x01},
++		.int_pro_mode = {0x00},
++		.v_blank_f0 = {0xff, 0xff},
++		.v_blank_f1 = {0xff, 0xff},
++		.h_sync_start = {0x0e, 0x00},
++		.h_sync_end = {0x4c, 0x00},
++		.v_sync_line_bef_2 = {0x0f, 0x00},
++		.v_sync_line_bef_1 = {0x09, 0x00},
++		.v_sync_line_aft_2 = {0xff, 0xff},
++		.v_sync_line_aft_1 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_2 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_1 = {0xff, 0xff},
++		.v_blank_f2 = {0xff, 0xff},
++		.v_blank_f3 = {0xff, 0xff},
++		.v_blank_f4 = {0xff, 0xff},
++		.v_blank_f5 = {0xff, 0xff},
++		.v_sync_line_aft_3 = {0xff, 0xff},
++		.v_sync_line_aft_4 = {0xff, 0xff},
++		.v_sync_line_aft_5 = {0xff, 0xff},
++		.v_sync_line_aft_6 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_3 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_4 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_5 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_6 = {0xff, 0xff},
++		.vact_space_1 = {0xff, 0xff},
++		.vact_space_2 = {0xff, 0xff},
++		.vact_space_3 = {0xff, 0xff},
++		.vact_space_4 = {0xff, 0xff},
++		.vact_space_5 = {0xff, 0xff},
++		.vact_space_6 = {0xff, 0xff},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x5a, 0x03, /* h_fsz */
++		0x8a, 0x00, 0xd0, 0x02, /* hact */
++		0x0d, 0x02, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x2d, 0x00, 0xe0, 0x01, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x48, 0x02, /* vact_st2 */
++		0x00, 0x00, /* vact_st3 */
++		0x00, 0x00, /* vact_st4 */
++		0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++		0x00, /* 3d FP */
++	},
++};
++
++static const struct hdmi_preset_conf hdmi_conf_720p50 = {
++	.core = {
++		.h_blank = {0xbc, 0x02},
++		.v2_blank = {0xee, 0x02},
++		.v1_blank = {0x1e, 0x00},
++		.v_line = {0xee, 0x02},
++		.h_line = {0xbc, 0x07},
++		.hsync_pol = {0x00},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x00},
++		.v_blank_f0 = {0xff, 0xff},
++		.v_blank_f1 = {0xff, 0xff},
++		.h_sync_start = {0xb6, 0x01},
++		.h_sync_end = {0xde, 0x01},
++		.v_sync_line_bef_2 = {0x0a, 0x00},
++		.v_sync_line_bef_1 = {0x05, 0x00},
++		.v_sync_line_aft_2 = {0xff, 0xff},
++		.v_sync_line_aft_1 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_2 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_1 = {0xff, 0xff},
++		.v_blank_f2 = {0xff, 0xff},
++		.v_blank_f3 = {0xff, 0xff},
++		.v_blank_f4 = {0xff, 0xff},
++		.v_blank_f5 = {0xff, 0xff},
++		.v_sync_line_aft_3 = {0xff, 0xff},
++		.v_sync_line_aft_4 = {0xff, 0xff},
++		.v_sync_line_aft_5 = {0xff, 0xff},
++		.v_sync_line_aft_6 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_3 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_4 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_5 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_6 = {0xff, 0xff},
++		.vact_space_1 = {0xff, 0xff},
++		.vact_space_2 = {0xff, 0xff},
++		.vact_space_3 = {0xff, 0xff},
++		.vact_space_4 = {0xff, 0xff},
++		.vact_space_5 = {0xff, 0xff},
++		.vact_space_6 = {0xff, 0xff},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0xbc, 0x07, /* h_fsz */
++		0xbc, 0x02, 0x00, 0x05, /* hact */
++		0xee, 0x02, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x1e, 0x00, 0xd0, 0x02, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x48, 0x02, /* vact_st2 */
++		0x00, 0x00, /* vact_st3 */
++		0x00, 0x00, /* vact_st4 */
++		0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++		0x00, /* 3d FP */
++	},
++};
++
++static const struct hdmi_preset_conf hdmi_conf_720p60 = {
++	.core = {
++		.h_blank = {0x72, 0x01},
++		.v2_blank = {0xee, 0x02},
++		.v1_blank = {0x1e, 0x00},
++		.v_line = {0xee, 0x02},
++		.h_line = {0x72, 0x06},
++		.hsync_pol = {0x00},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x00},
++		.v_blank_f0 = {0xff, 0xff},
++		.v_blank_f1 = {0xff, 0xff},
++		.h_sync_start = {0x6c, 0x00},
++		.h_sync_end = {0x94, 0x00},
++		.v_sync_line_bef_2 = {0x0a, 0x00},
++		.v_sync_line_bef_1 = {0x05, 0x00},
++		.v_sync_line_aft_2 = {0xff, 0xff},
++		.v_sync_line_aft_1 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_2 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_1 = {0xff, 0xff},
++		.v_blank_f2 = {0xff, 0xff},
++		.v_blank_f3 = {0xff, 0xff},
++		.v_blank_f4 = {0xff, 0xff},
++		.v_blank_f5 = {0xff, 0xff},
++		.v_sync_line_aft_3 = {0xff, 0xff},
++		.v_sync_line_aft_4 = {0xff, 0xff},
++		.v_sync_line_aft_5 = {0xff, 0xff},
++		.v_sync_line_aft_6 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_3 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_4 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_5 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_6 = {0xff, 0xff},
++		.vact_space_1 = {0xff, 0xff},
++		.vact_space_2 = {0xff, 0xff},
++		.vact_space_3 = {0xff, 0xff},
++		.vact_space_4 = {0xff, 0xff},
++		.vact_space_5 = {0xff, 0xff},
++		.vact_space_6 = {0xff, 0xff},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x72, 0x06, /* h_fsz */
++		0x72, 0x01, 0x00, 0x05, /* hact */
++		0xee, 0x02, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x1e, 0x00, 0xd0, 0x02, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x48, 0x02, /* vact_st2 */
++		0x00, 0x00, /* vact_st3 */
++		0x00, 0x00, /* vact_st4 */
++		0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++		0x00, /* 3d FP */
++	},
++};
++
++static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
++	.core = {
++		.h_blank = {0xd0, 0x02},
++		.v2_blank = {0x32, 0x02},
++		.v1_blank = {0x16, 0x00},
++		.v_line = {0x65, 0x04},
++		.h_line = {0x50, 0x0a},
++		.hsync_pol = {0x00},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x01},
++		.v_blank_f0 = {0x49, 0x02},
++		.v_blank_f1 = {0x65, 0x04},
++		.h_sync_start = {0x0e, 0x02},
++		.h_sync_end = {0x3a, 0x02},
++		.v_sync_line_bef_2 = {0x07, 0x00},
++		.v_sync_line_bef_1 = {0x02, 0x00},
++		.v_sync_line_aft_2 = {0x39, 0x02},
++		.v_sync_line_aft_1 = {0x34, 0x02},
++		.v_sync_line_aft_pxl_2 = {0x38, 0x07},
++		.v_sync_line_aft_pxl_1 = {0x38, 0x07},
++		.v_blank_f2 = {0xff, 0xff},
++		.v_blank_f3 = {0xff, 0xff},
++		.v_blank_f4 = {0xff, 0xff},
++		.v_blank_f5 = {0xff, 0xff},
++		.v_sync_line_aft_3 = {0xff, 0xff},
++		.v_sync_line_aft_4 = {0xff, 0xff},
++		.v_sync_line_aft_5 = {0xff, 0xff},
++		.v_sync_line_aft_6 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_3 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_4 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_5 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_6 = {0xff, 0xff},
++		.vact_space_1 = {0xff, 0xff},
++		.vact_space_2 = {0xff, 0xff},
++		.vact_space_3 = {0xff, 0xff},
++		.vact_space_4 = {0xff, 0xff},
++		.vact_space_5 = {0xff, 0xff},
++		.vact_space_6 = {0xff, 0xff},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x50, 0x0a, /* h_fsz */
++		0xd0, 0x02, 0x80, 0x07, /* hact */
++		0x65, 0x04, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x16, 0x00, 0x1c, 0x02, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x49, 0x02, /* vact_st2 */
++		0x00, 0x00, /* vact_st3 */
++		0x00, 0x00, /* vact_st4 */
++		0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++		0x00, /* 3d FP */
++	},
++};
++
++static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
++	.core = {
++		.h_blank = {0x18, 0x01},
++		.v2_blank = {0x32, 0x02},
++		.v1_blank = {0x16, 0x00},
++		.v_line = {0x65, 0x04},
++		.h_line = {0x98, 0x08},
++		.hsync_pol = {0x00},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x01},
++		.v_blank_f0 = {0x49, 0x02},
++		.v_blank_f1 = {0x65, 0x04},
++		.h_sync_start = {0x56, 0x00},
++		.h_sync_end = {0x82, 0x00},
++		.v_sync_line_bef_2 = {0x07, 0x00},
++		.v_sync_line_bef_1 = {0x02, 0x00},
++		.v_sync_line_aft_2 = {0x39, 0x02},
++		.v_sync_line_aft_1 = {0x34, 0x02},
++		.v_sync_line_aft_pxl_2 = {0xa4, 0x04},
++		.v_sync_line_aft_pxl_1 = {0xa4, 0x04},
++		.v_blank_f2 = {0xff, 0xff},
++		.v_blank_f3 = {0xff, 0xff},
++		.v_blank_f4 = {0xff, 0xff},
++		.v_blank_f5 = {0xff, 0xff},
++		.v_sync_line_aft_3 = {0xff, 0xff},
++		.v_sync_line_aft_4 = {0xff, 0xff},
++		.v_sync_line_aft_5 = {0xff, 0xff},
++		.v_sync_line_aft_6 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_3 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_4 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_5 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_6 = {0xff, 0xff},
++		.vact_space_1 = {0xff, 0xff},
++		.vact_space_2 = {0xff, 0xff},
++		.vact_space_3 = {0xff, 0xff},
++		.vact_space_4 = {0xff, 0xff},
++		.vact_space_5 = {0xff, 0xff},
++		.vact_space_6 = {0xff, 0xff},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x98, 0x08, /* h_fsz */
++		0x18, 0x01, 0x80, 0x07, /* hact */
++		0x65, 0x04, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x16, 0x00, 0x1c, 0x02, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x49, 0x02, /* vact_st2 */
++		0x00, 0x00, /* vact_st3 */
++		0x00, 0x00, /* vact_st4 */
++		0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++		0x00, /* 3d FP */
++	},
++};
++
++static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
++	.core = {
++		.h_blank = {0xd0, 0x02},
++		.v2_blank = {0x65, 0x04},
++		.v1_blank = {0x2d, 0x00},
++		.v_line = {0x65, 0x04},
++		.h_line = {0x50, 0x0a},
++		.hsync_pol = {0x00},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x00},
++		.v_blank_f0 = {0xff, 0xff},
++		.v_blank_f1 = {0xff, 0xff},
++		.h_sync_start = {0x0e, 0x02},
++		.h_sync_end = {0x3a, 0x02},
++		.v_sync_line_bef_2 = {0x09, 0x00},
++		.v_sync_line_bef_1 = {0x04, 0x00},
++		.v_sync_line_aft_2 = {0xff, 0xff},
++		.v_sync_line_aft_1 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_2 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_1 = {0xff, 0xff},
++		.v_blank_f2 = {0xff, 0xff},
++		.v_blank_f3 = {0xff, 0xff},
++		.v_blank_f4 = {0xff, 0xff},
++		.v_blank_f5 = {0xff, 0xff},
++		.v_sync_line_aft_3 = {0xff, 0xff},
++		.v_sync_line_aft_4 = {0xff, 0xff},
++		.v_sync_line_aft_5 = {0xff, 0xff},
++		.v_sync_line_aft_6 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_3 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_4 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_5 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_6 = {0xff, 0xff},
++		.vact_space_1 = {0xff, 0xff},
++		.vact_space_2 = {0xff, 0xff},
++		.vact_space_3 = {0xff, 0xff},
++		.vact_space_4 = {0xff, 0xff},
++		.vact_space_5 = {0xff, 0xff},
++		.vact_space_6 = {0xff, 0xff},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x50, 0x0a, /* h_fsz */
++		0xd0, 0x02, 0x80, 0x07, /* hact */
++		0x65, 0x04, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x2d, 0x00, 0x38, 0x04, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x48, 0x02, /* vact_st2 */
++		0x00, 0x00, /* vact_st3 */
++		0x00, 0x00, /* vact_st4 */
++		0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++		0x00, /* 3d FP */
++	},
++};
++
++static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
++	.core = {
++		.h_blank = {0x18, 0x01},
++		.v2_blank = {0x65, 0x04},
++		.v1_blank = {0x2d, 0x00},
++		.v_line = {0x65, 0x04},
++		.h_line = {0x98, 0x08},
++		.hsync_pol = {0x00},
++		.vsync_pol = {0x00},
++		.int_pro_mode = {0x00},
++		.v_blank_f0 = {0xff, 0xff},
++		.v_blank_f1 = {0xff, 0xff},
++		.h_sync_start = {0x56, 0x00},
++		.h_sync_end = {0x82, 0x00},
++		.v_sync_line_bef_2 = {0x09, 0x00},
++		.v_sync_line_bef_1 = {0x04, 0x00},
++		.v_sync_line_aft_2 = {0xff, 0xff},
++		.v_sync_line_aft_1 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_2 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_1 = {0xff, 0xff},
++		.v_blank_f2 = {0xff, 0xff},
++		.v_blank_f3 = {0xff, 0xff},
++		.v_blank_f4 = {0xff, 0xff},
++		.v_blank_f5 = {0xff, 0xff},
++		.v_sync_line_aft_3 = {0xff, 0xff},
++		.v_sync_line_aft_4 = {0xff, 0xff},
++		.v_sync_line_aft_5 = {0xff, 0xff},
++		.v_sync_line_aft_6 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_3 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_4 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_5 = {0xff, 0xff},
++		.v_sync_line_aft_pxl_6 = {0xff, 0xff},
++		/* other don't care */
++	},
++	.tg = {
++		0x00, /* cmd */
++		0x98, 0x08, /* h_fsz */
++		0x18, 0x01, 0x80, 0x07, /* hact */
++		0x65, 0x04, /* v_fsz */
++		0x01, 0x00, 0x33, 0x02, /* vsync */
++		0x2d, 0x00, 0x38, 0x04, /* vact */
++		0x33, 0x02, /* field_chg */
++		0x48, 0x02, /* vact_st2 */
++		0x00, 0x00, /* vact_st3 */
++		0x00, 0x00, /* vact_st4 */
++		0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
++		0x01, 0x00, 0x33, 0x02, /* field top/bot */
++		0x00, /* 3d FP */
++	},
++};
++
++static const struct hdmi_conf hdmi_confs[] = {
++	{ 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
++	{ 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
++	{ 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
++	{ 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
++	{ 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
++	{ 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
++	{ 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
++};
++
++
++static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
++{
++	return readl(hdata->regs + reg_id);
++}
++
++static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
++				 u32 reg_id, u8 value)
++{
++	writeb(value, hdata->regs + reg_id);
++}
++
++static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
++				 u32 reg_id, u32 value, u32 mask)
++{
++	u32 old = readl(hdata->regs + reg_id);
++	value = (value & mask) | (old & ~mask);
++	writel(value, hdata->regs + reg_id);
++}
++
++static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix)
++{
++#define DUMPREG(reg_id) \
++	DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
++	readl(hdata->regs + reg_id))
++	DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
++	DUMPREG(HDMI_INTC_FLAG);
++	DUMPREG(HDMI_INTC_CON);
++	DUMPREG(HDMI_HPD_STATUS);
++	DUMPREG(HDMI_V13_PHY_RSTOUT);
++	DUMPREG(HDMI_V13_PHY_VPLL);
++	DUMPREG(HDMI_V13_PHY_CMU);
++	DUMPREG(HDMI_V13_CORE_RSTOUT);
++
++	DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
++	DUMPREG(HDMI_CON_0);
++	DUMPREG(HDMI_CON_1);
++	DUMPREG(HDMI_CON_2);
++	DUMPREG(HDMI_SYS_STATUS);
++	DUMPREG(HDMI_V13_PHY_STATUS);
++	DUMPREG(HDMI_STATUS_EN);
++	DUMPREG(HDMI_HPD);
++	DUMPREG(HDMI_MODE_SEL);
++	DUMPREG(HDMI_V13_HPD_GEN);
++	DUMPREG(HDMI_V13_DC_CONTROL);
++	DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN);
++
++	DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
++	DUMPREG(HDMI_H_BLANK_0);
++	DUMPREG(HDMI_H_BLANK_1);
++	DUMPREG(HDMI_V13_V_BLANK_0);
++	DUMPREG(HDMI_V13_V_BLANK_1);
++	DUMPREG(HDMI_V13_V_BLANK_2);
++	DUMPREG(HDMI_V13_H_V_LINE_0);
++	DUMPREG(HDMI_V13_H_V_LINE_1);
++	DUMPREG(HDMI_V13_H_V_LINE_2);
++	DUMPREG(HDMI_VSYNC_POL);
++	DUMPREG(HDMI_INT_PRO_MODE);
++	DUMPREG(HDMI_V13_V_BLANK_F_0);
++	DUMPREG(HDMI_V13_V_BLANK_F_1);
++	DUMPREG(HDMI_V13_V_BLANK_F_2);
++	DUMPREG(HDMI_V13_H_SYNC_GEN_0);
++	DUMPREG(HDMI_V13_H_SYNC_GEN_1);
++	DUMPREG(HDMI_V13_H_SYNC_GEN_2);
++	DUMPREG(HDMI_V13_V_SYNC_GEN_1_0);
++	DUMPREG(HDMI_V13_V_SYNC_GEN_1_1);
++	DUMPREG(HDMI_V13_V_SYNC_GEN_1_2);
++	DUMPREG(HDMI_V13_V_SYNC_GEN_2_0);
++	DUMPREG(HDMI_V13_V_SYNC_GEN_2_1);
++	DUMPREG(HDMI_V13_V_SYNC_GEN_2_2);
++	DUMPREG(HDMI_V13_V_SYNC_GEN_3_0);
++	DUMPREG(HDMI_V13_V_SYNC_GEN_3_1);
++	DUMPREG(HDMI_V13_V_SYNC_GEN_3_2);
++
++	DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
++	DUMPREG(HDMI_TG_CMD);
++	DUMPREG(HDMI_TG_H_FSZ_L);
++	DUMPREG(HDMI_TG_H_FSZ_H);
++	DUMPREG(HDMI_TG_HACT_ST_L);
++	DUMPREG(HDMI_TG_HACT_ST_H);
++	DUMPREG(HDMI_TG_HACT_SZ_L);
++	DUMPREG(HDMI_TG_HACT_SZ_H);
++	DUMPREG(HDMI_TG_V_FSZ_L);
++	DUMPREG(HDMI_TG_V_FSZ_H);
++	DUMPREG(HDMI_TG_VSYNC_L);
++	DUMPREG(HDMI_TG_VSYNC_H);
++	DUMPREG(HDMI_TG_VSYNC2_L);
++	DUMPREG(HDMI_TG_VSYNC2_H);
++	DUMPREG(HDMI_TG_VACT_ST_L);
++	DUMPREG(HDMI_TG_VACT_ST_H);
++	DUMPREG(HDMI_TG_VACT_SZ_L);
++	DUMPREG(HDMI_TG_VACT_SZ_H);
++	DUMPREG(HDMI_TG_FIELD_CHG_L);
++	DUMPREG(HDMI_TG_FIELD_CHG_H);
++	DUMPREG(HDMI_TG_VACT_ST2_L);
++	DUMPREG(HDMI_TG_VACT_ST2_H);
++	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
++	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
++	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
++	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
++	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
++	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
++	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
++	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
++#undef DUMPREG
++}
++
++static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
++{
++	int i;
++
++#define DUMPREG(reg_id) \
++	DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
++	readl(hdata->regs + reg_id))
++
++	DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
++	DUMPREG(HDMI_INTC_CON);
++	DUMPREG(HDMI_INTC_FLAG);
++	DUMPREG(HDMI_HPD_STATUS);
++	DUMPREG(HDMI_INTC_CON_1);
++	DUMPREG(HDMI_INTC_FLAG_1);
++	DUMPREG(HDMI_PHY_STATUS_0);
++	DUMPREG(HDMI_PHY_STATUS_PLL);
++	DUMPREG(HDMI_PHY_CON_0);
++	DUMPREG(HDMI_PHY_RSTOUT);
++	DUMPREG(HDMI_PHY_VPLL);
++	DUMPREG(HDMI_PHY_CMU);
++	DUMPREG(HDMI_CORE_RSTOUT);
++
++	DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
++	DUMPREG(HDMI_CON_0);
++	DUMPREG(HDMI_CON_1);
++	DUMPREG(HDMI_CON_2);
++	DUMPREG(HDMI_SYS_STATUS);
++	DUMPREG(HDMI_PHY_STATUS_0);
++	DUMPREG(HDMI_STATUS_EN);
++	DUMPREG(HDMI_HPD);
++	DUMPREG(HDMI_MODE_SEL);
++	DUMPREG(HDMI_ENC_EN);
++	DUMPREG(HDMI_DC_CONTROL);
++	DUMPREG(HDMI_VIDEO_PATTERN_GEN);
++
++	DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
++	DUMPREG(HDMI_H_BLANK_0);
++	DUMPREG(HDMI_H_BLANK_1);
++	DUMPREG(HDMI_V2_BLANK_0);
++	DUMPREG(HDMI_V2_BLANK_1);
++	DUMPREG(HDMI_V1_BLANK_0);
++	DUMPREG(HDMI_V1_BLANK_1);
++	DUMPREG(HDMI_V_LINE_0);
++	DUMPREG(HDMI_V_LINE_1);
++	DUMPREG(HDMI_H_LINE_0);
++	DUMPREG(HDMI_H_LINE_1);
++	DUMPREG(HDMI_HSYNC_POL);
++
++	DUMPREG(HDMI_VSYNC_POL);
++	DUMPREG(HDMI_INT_PRO_MODE);
++	DUMPREG(HDMI_V_BLANK_F0_0);
++	DUMPREG(HDMI_V_BLANK_F0_1);
++	DUMPREG(HDMI_V_BLANK_F1_0);
++	DUMPREG(HDMI_V_BLANK_F1_1);
++
++	DUMPREG(HDMI_H_SYNC_START_0);
++	DUMPREG(HDMI_H_SYNC_START_1);
++	DUMPREG(HDMI_H_SYNC_END_0);
++	DUMPREG(HDMI_H_SYNC_END_1);
++
++	DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0);
++	DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1);
++	DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0);
++	DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1);
++
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1);
++
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1);
++
++	DUMPREG(HDMI_V_BLANK_F2_0);
++	DUMPREG(HDMI_V_BLANK_F2_1);
++	DUMPREG(HDMI_V_BLANK_F3_0);
++	DUMPREG(HDMI_V_BLANK_F3_1);
++	DUMPREG(HDMI_V_BLANK_F4_0);
++	DUMPREG(HDMI_V_BLANK_F4_1);
++	DUMPREG(HDMI_V_BLANK_F5_0);
++	DUMPREG(HDMI_V_BLANK_F5_1);
++
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1);
++
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0);
++	DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1);
++
++	DUMPREG(HDMI_VACT_SPACE_1_0);
++	DUMPREG(HDMI_VACT_SPACE_1_1);
++	DUMPREG(HDMI_VACT_SPACE_2_0);
++	DUMPREG(HDMI_VACT_SPACE_2_1);
++	DUMPREG(HDMI_VACT_SPACE_3_0);
++	DUMPREG(HDMI_VACT_SPACE_3_1);
++	DUMPREG(HDMI_VACT_SPACE_4_0);
++	DUMPREG(HDMI_VACT_SPACE_4_1);
++	DUMPREG(HDMI_VACT_SPACE_5_0);
++	DUMPREG(HDMI_VACT_SPACE_5_1);
++	DUMPREG(HDMI_VACT_SPACE_6_0);
++	DUMPREG(HDMI_VACT_SPACE_6_1);
++
++	DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
++	DUMPREG(HDMI_TG_CMD);
++	DUMPREG(HDMI_TG_H_FSZ_L);
++	DUMPREG(HDMI_TG_H_FSZ_H);
++	DUMPREG(HDMI_TG_HACT_ST_L);
++	DUMPREG(HDMI_TG_HACT_ST_H);
++	DUMPREG(HDMI_TG_HACT_SZ_L);
++	DUMPREG(HDMI_TG_HACT_SZ_H);
++	DUMPREG(HDMI_TG_V_FSZ_L);
++	DUMPREG(HDMI_TG_V_FSZ_H);
++	DUMPREG(HDMI_TG_VSYNC_L);
++	DUMPREG(HDMI_TG_VSYNC_H);
++	DUMPREG(HDMI_TG_VSYNC2_L);
++	DUMPREG(HDMI_TG_VSYNC2_H);
++	DUMPREG(HDMI_TG_VACT_ST_L);
++	DUMPREG(HDMI_TG_VACT_ST_H);
++	DUMPREG(HDMI_TG_VACT_SZ_L);
++	DUMPREG(HDMI_TG_VACT_SZ_H);
++	DUMPREG(HDMI_TG_FIELD_CHG_L);
++	DUMPREG(HDMI_TG_FIELD_CHG_H);
++	DUMPREG(HDMI_TG_VACT_ST2_L);
++	DUMPREG(HDMI_TG_VACT_ST2_H);
++	DUMPREG(HDMI_TG_VACT_ST3_L);
++	DUMPREG(HDMI_TG_VACT_ST3_H);
++	DUMPREG(HDMI_TG_VACT_ST4_L);
++	DUMPREG(HDMI_TG_VACT_ST4_H);
++	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
++	DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
++	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
++	DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
++	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
++	DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
++	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
++	DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
++	DUMPREG(HDMI_TG_3D);
++
++	DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix);
++	DUMPREG(HDMI_AVI_CON);
++	DUMPREG(HDMI_AVI_HEADER0);
++	DUMPREG(HDMI_AVI_HEADER1);
++	DUMPREG(HDMI_AVI_HEADER2);
++	DUMPREG(HDMI_AVI_CHECK_SUM);
++	DUMPREG(HDMI_VSI_CON);
++	DUMPREG(HDMI_VSI_HEADER0);
++	DUMPREG(HDMI_VSI_HEADER1);
++	DUMPREG(HDMI_VSI_HEADER2);
++	for (i = 0; i < 7; ++i)
++		DUMPREG(HDMI_VSI_DATA(i));
++
++#undef DUMPREG
++}
++
++static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
++{
++	if (hdata->is_v13)
++		hdmi_v13_regs_dump(hdata, prefix);
++	else
++		hdmi_v14_regs_dump(hdata, prefix);
++}
++
++static int hdmi_v13_conf_index(struct drm_display_mode *mode)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
++		if (hdmi_v13_confs[i].width == mode->hdisplay &&
++				hdmi_v13_confs[i].height == mode->vdisplay &&
++				hdmi_v13_confs[i].vrefresh == mode->vrefresh &&
++				hdmi_v13_confs[i].interlace ==
++				((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
++				 true : false))
++			return i;
++
++	return -EINVAL;
++}
++
++static int hdmi_v14_conf_index(struct drm_display_mode *mode)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
++		if (hdmi_confs[i].width == mode->hdisplay &&
++				hdmi_confs[i].height == mode->vdisplay &&
++				hdmi_confs[i].vrefresh == mode->vrefresh &&
++				hdmi_confs[i].interlace ==
++				((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
++				 true : false))
++			return i;
++
++	return -EINVAL;
++}
++
++static int hdmi_conf_index(struct hdmi_context *hdata,
++			   struct drm_display_mode *mode)
++{
++	if (hdata->is_v13)
++		return hdmi_v13_conf_index(mode);
++
++	return hdmi_v14_conf_index(mode);
++}
++
++static bool hdmi_is_connected(void *ctx)
++{
++	struct hdmi_context *hdata = ctx;
++	u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
++
++	if (val)
++		return true;
++
++	return false;
++}
++
++static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
++				u8 *edid, int len)
++{
++	struct edid *raw_edid;
++	struct hdmi_context *hdata = ctx;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	if (!hdata->ddc_port)
++		return -ENODEV;
++
++	raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
++	if (raw_edid) {
++		memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
++					* EDID_LENGTH, len));
++		DRM_DEBUG_KMS("width[%d] x height[%d]\n",
++				raw_edid->width_cm, raw_edid->height_cm);
++	} else {
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
++static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
++{
++	int i;
++
++	DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n",
++			check_timing->xres, check_timing->yres,
++			check_timing->refresh, (check_timing->vmode &
++			FB_VMODE_INTERLACED) ? true : false);
++
++	for (i = 0; i < ARRAY_SIZE(hdmi_v13_confs); ++i)
++		if (hdmi_v13_confs[i].width == check_timing->xres &&
++			hdmi_v13_confs[i].height == check_timing->yres &&
++			hdmi_v13_confs[i].vrefresh == check_timing->refresh &&
++			hdmi_v13_confs[i].interlace ==
++			((check_timing->vmode & FB_VMODE_INTERLACED) ?
++			 true : false))
++				return 0;
++
++	/* TODO */
++
++	return -EINVAL;
++}
++
++static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
++{
++	int i;
++
++	DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n",
++			check_timing->xres, check_timing->yres,
++			check_timing->refresh, (check_timing->vmode &
++			FB_VMODE_INTERLACED) ? true : false);
++
++	for (i = 0; i < ARRAY_SIZE(hdmi_confs); i++)
++		if (hdmi_confs[i].width == check_timing->xres &&
++			hdmi_confs[i].height == check_timing->yres &&
++			hdmi_confs[i].vrefresh == check_timing->refresh &&
++			hdmi_confs[i].interlace ==
++			((check_timing->vmode & FB_VMODE_INTERLACED) ?
++			 true : false))
++				return 0;
++
++	/* TODO */
++
++	return -EINVAL;
++}
++
++static int hdmi_check_timing(void *ctx, void *timing)
++{
++	struct hdmi_context *hdata = ctx;
++	struct fb_videomode *check_timing = timing;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
++			check_timing->yres, check_timing->refresh,
++			check_timing->vmode);
++
++	if (hdata->is_v13)
++		return hdmi_v13_check_timing(check_timing);
++	else
++		return hdmi_v14_check_timing(check_timing);
++}
++
++static int hdmi_display_power_on(void *ctx, int mode)
++{
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++		DRM_DEBUG_KMS("hdmi [on]\n");
++		break;
++	case DRM_MODE_DPMS_STANDBY:
++		break;
++	case DRM_MODE_DPMS_SUSPEND:
++		break;
++	case DRM_MODE_DPMS_OFF:
++		DRM_DEBUG_KMS("hdmi [off]\n");
++		break;
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++static void hdmi_set_acr(u32 freq, u8 *acr)
++{
++	u32 n, cts;
++
++	switch (freq) {
++	case 32000:
++		n = 4096;
++		cts = 27000;
++		break;
++	case 44100:
++		n = 6272;
++		cts = 30000;
++		break;
++	case 88200:
++		n = 12544;
++		cts = 30000;
++		break;
++	case 176400:
++		n = 25088;
++		cts = 30000;
++		break;
++	case 48000:
++		n = 6144;
++		cts = 27000;
++		break;
++	case 96000:
++		n = 12288;
++		cts = 27000;
++		break;
++	case 192000:
++		n = 24576;
++		cts = 27000;
++		break;
++	default:
++		n = 0;
++		cts = 0;
++		break;
++	}
++
++	acr[1] = cts >> 16;
++	acr[2] = cts >> 8 & 0xff;
++	acr[3] = cts & 0xff;
++
++	acr[4] = n >> 16;
++	acr[5] = n >> 8 & 0xff;
++	acr[6] = n & 0xff;
++}
++
++static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
++{
++	hdmi_reg_writeb(hdata, HDMI_ACR_N0, acr[6]);
++	hdmi_reg_writeb(hdata, HDMI_ACR_N1, acr[5]);
++	hdmi_reg_writeb(hdata, HDMI_ACR_N2, acr[4]);
++	hdmi_reg_writeb(hdata, HDMI_ACR_MCTS0, acr[3]);
++	hdmi_reg_writeb(hdata, HDMI_ACR_MCTS1, acr[2]);
++	hdmi_reg_writeb(hdata, HDMI_ACR_MCTS2, acr[1]);
++	hdmi_reg_writeb(hdata, HDMI_ACR_CTS0, acr[3]);
++	hdmi_reg_writeb(hdata, HDMI_ACR_CTS1, acr[2]);
++	hdmi_reg_writeb(hdata, HDMI_ACR_CTS2, acr[1]);
++
++	if (hdata->is_v13)
++		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 4);
++	else
++		hdmi_reg_writeb(hdata, HDMI_ACR_CON, 4);
++}
++
++static void hdmi_audio_init(struct hdmi_context *hdata)
++{
++	u32 sample_rate, bits_per_sample, frame_size_code;
++	u32 data_num, bit_ch, sample_frq;
++	u32 val;
++	u8 acr[7];
++
++	sample_rate = 44100;
++	bits_per_sample = 16;
++	frame_size_code = 0;
++
++	switch (bits_per_sample) {
++	case 20:
++		data_num = 2;
++		bit_ch  = 1;
++		break;
++	case 24:
++		data_num = 3;
++		bit_ch  = 1;
++		break;
++	default:
++		data_num = 1;
++		bit_ch  = 0;
++		break;
++	}
++
++	hdmi_set_acr(sample_rate, acr);
++	hdmi_reg_acr(hdata, acr);
++
++	hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CON, HDMI_I2S_IN_DISABLE
++				| HDMI_I2S_AUD_I2S | HDMI_I2S_CUV_I2S_ENABLE
++				| HDMI_I2S_MUX_ENABLE);
++
++	hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CH, HDMI_I2S_CH0_EN
++			| HDMI_I2S_CH1_EN | HDMI_I2S_CH2_EN);
++
++	hdmi_reg_writeb(hdata, HDMI_I2S_MUX_CUV, HDMI_I2S_CUV_RL_EN);
++
++	sample_frq = (sample_rate == 44100) ? 0 :
++			(sample_rate == 48000) ? 2 :
++			(sample_rate == 32000) ? 3 :
++			(sample_rate == 96000) ? 0xa : 0x0;
++
++	hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_DIS);
++	hdmi_reg_writeb(hdata, HDMI_I2S_CLK_CON, HDMI_I2S_CLK_EN);
++
++	val = hdmi_reg_read(hdata, HDMI_I2S_DSD_CON) | 0x01;
++	hdmi_reg_writeb(hdata, HDMI_I2S_DSD_CON, val);
++
++	/* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
++	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
++			| HDMI_I2S_SEL_LRCK(6));
++	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1)
++			| HDMI_I2S_SEL_SDATA2(4));
++	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
++			| HDMI_I2S_SEL_SDATA2(2));
++	hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
++
++	/* I2S_CON_1 & 2 */
++	hdmi_reg_writeb(hdata, HDMI_I2S_CON_1, HDMI_I2S_SCLK_FALLING_EDGE
++			| HDMI_I2S_L_CH_LOW_POL);
++	hdmi_reg_writeb(hdata, HDMI_I2S_CON_2, HDMI_I2S_MSB_FIRST_MODE
++			| HDMI_I2S_SET_BIT_CH(bit_ch)
++			| HDMI_I2S_SET_SDATA_BIT(data_num)
++			| HDMI_I2S_BASIC_FORMAT);
++
++	/* Configure register related to CUV information */
++	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_0, HDMI_I2S_CH_STATUS_MODE_0
++			| HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH
++			| HDMI_I2S_COPYRIGHT
++			| HDMI_I2S_LINEAR_PCM
++			| HDMI_I2S_CONSUMER_FORMAT);
++	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_1, HDMI_I2S_CD_PLAYER);
++	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_2, HDMI_I2S_SET_SOURCE_NUM(0));
++	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_3, HDMI_I2S_CLK_ACCUR_LEVEL_2
++			| HDMI_I2S_SET_SMP_FREQ(sample_frq));
++	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_4,
++			HDMI_I2S_ORG_SMP_FREQ_44_1
++			| HDMI_I2S_WORD_LEN_MAX24_24BITS
++			| HDMI_I2S_WORD_LEN_MAX_24BITS);
++
++	hdmi_reg_writeb(hdata, HDMI_I2S_CH_ST_CON, HDMI_I2S_CH_STATUS_RELOAD);
++}
++
++static void hdmi_audio_control(struct hdmi_context *hdata, bool onoff)
++{
++	u32 mod;
++
++	mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
++	if (mod & HDMI_DVI_MODE_EN)
++		return;
++
++	hdmi_reg_writeb(hdata, HDMI_AUI_CON, onoff ? 2 : 0);
++	hdmi_reg_writemask(hdata, HDMI_CON_0, onoff ?
++			HDMI_ASP_EN : HDMI_ASP_DIS, HDMI_ASP_MASK);
++}
++
++static void hdmi_conf_reset(struct hdmi_context *hdata)
++{
++	u32 reg;
++
++	/* disable hpd handle for drm */
++	hdata->hpd_handle = false;
++
++	if (hdata->is_v13)
++		reg = HDMI_V13_CORE_RSTOUT;
++	else
++		reg = HDMI_CORE_RSTOUT;
++
++	/* resetting HDMI core */
++	hdmi_reg_writemask(hdata, reg,  0, HDMI_CORE_SW_RSTOUT);
++	mdelay(10);
++	hdmi_reg_writemask(hdata, reg, ~0, HDMI_CORE_SW_RSTOUT);
++	mdelay(10);
++
++	/* enable hpd handle for drm */
++	hdata->hpd_handle = true;
++}
++
++static void hdmi_conf_init(struct hdmi_context *hdata)
++{
++	/* disable hpd handle for drm */
++	hdata->hpd_handle = false;
++
++	/* enable HPD interrupts */
++	hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
++		HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
++	mdelay(10);
++	hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
++		HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
++
++	/* choose HDMI mode */
++	hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
++		HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
++	/* disable bluescreen */
++	hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
++
++	if (hdata->is_v13) {
++		/* choose bluescreen (fecal) color */
++		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_0, 0x12);
++		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_1, 0x34);
++		hdmi_reg_writeb(hdata, HDMI_V13_BLUE_SCREEN_2, 0x56);
++
++		/* enable AVI packet every vsync, fixes purple line problem */
++		hdmi_reg_writeb(hdata, HDMI_V13_AVI_CON, 0x02);
++		/* force RGB, look to CEA-861-D, table 7 for more detail */
++		hdmi_reg_writeb(hdata, HDMI_V13_AVI_BYTE(0), 0 << 5);
++		hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
++
++		hdmi_reg_writeb(hdata, HDMI_V13_SPD_CON, 0x02);
++		hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
++		hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
++	} else {
++		/* enable AVI packet every vsync, fixes purple line problem */
++		hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
++		hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
++		hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
++	}
++
++	/* enable hpd handle for drm */
++	hdata->hpd_handle = true;
++}
++
++static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
++{
++	const struct hdmi_v13_preset_conf *conf =
++		hdmi_v13_confs[hdata->cur_conf].conf;
++	const struct hdmi_v13_core_regs *core = &conf->core;
++	const struct hdmi_v13_tg_regs *tg = &conf->tg;
++	int tries;
++
++	/* setting core registers */
++	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
++	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_0, core->v_blank[0]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_1, core->v_blank[1]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_2, core->v_blank[2]);
++	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_0, core->h_v_line[0]);
++	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_1, core->h_v_line[1]);
++	hdmi_reg_writeb(hdata, HDMI_V13_H_V_LINE_2, core->h_v_line[2]);
++	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
++	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_0, core->v_blank_f[0]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_1, core->v_blank_f[1]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_BLANK_F_2, core->v_blank_f[2]);
++	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_0, core->h_sync_gen[0]);
++	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_1, core->h_sync_gen[1]);
++	hdmi_reg_writeb(hdata, HDMI_V13_H_SYNC_GEN_2, core->h_sync_gen[2]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
++	hdmi_reg_writeb(hdata, HDMI_V13_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
++	/* Timing generator registers */
++	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
++
++	/* waiting for HDMIPHY's PLL to get to steady state */
++	for (tries = 100; tries; --tries) {
++		u32 val = hdmi_reg_read(hdata, HDMI_V13_PHY_STATUS);
++		if (val & HDMI_PHY_STATUS_READY)
++			break;
++		mdelay(1);
++	}
++	/* steady state not achieved */
++	if (tries == 0) {
++		DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
++		hdmi_regs_dump(hdata, "timing apply");
++	}
++
++	clk_disable(hdata->res.sclk_hdmi);
++	clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
++	clk_enable(hdata->res.sclk_hdmi);
++
++	/* enable HDMI and timing generator */
++	hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
++	if (core->int_pro_mode[0])
++		hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
++				HDMI_FIELD_EN);
++	else
++		hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
++}
++
++static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
++{
++	const struct hdmi_preset_conf *conf = hdmi_confs[hdata->cur_conf].conf;
++	const struct hdmi_core_regs *core = &conf->core;
++	const struct hdmi_tg_regs *tg = &conf->tg;
++	int tries;
++
++	/* setting core registers */
++	hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
++	hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
++	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_0, core->v2_blank[0]);
++	hdmi_reg_writeb(hdata, HDMI_V2_BLANK_1, core->v2_blank[1]);
++	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_0, core->v1_blank[0]);
++	hdmi_reg_writeb(hdata, HDMI_V1_BLANK_1, core->v1_blank[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_LINE_0, core->v_line[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_LINE_1, core->v_line[1]);
++	hdmi_reg_writeb(hdata, HDMI_H_LINE_0, core->h_line[0]);
++	hdmi_reg_writeb(hdata, HDMI_H_LINE_1, core->h_line[1]);
++	hdmi_reg_writeb(hdata, HDMI_HSYNC_POL, core->hsync_pol[0]);
++	hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
++	hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_0, core->v_blank_f0[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F0_1, core->v_blank_f0[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_0, core->v_blank_f1[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F1_1, core->v_blank_f1[1]);
++	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_0, core->h_sync_start[0]);
++	hdmi_reg_writeb(hdata, HDMI_H_SYNC_START_1, core->h_sync_start[1]);
++	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_0, core->h_sync_end[0]);
++	hdmi_reg_writeb(hdata, HDMI_H_SYNC_END_1, core->h_sync_end[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_0,
++			core->v_sync_line_bef_2[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_2_1,
++			core->v_sync_line_bef_2[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_0,
++			core->v_sync_line_bef_1[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_BEF_1_1,
++			core->v_sync_line_bef_1[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_0,
++			core->v_sync_line_aft_2[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_2_1,
++			core->v_sync_line_aft_2[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_0,
++			core->v_sync_line_aft_1[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_1_1,
++			core->v_sync_line_aft_1[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_0,
++			core->v_sync_line_aft_pxl_2[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_2_1,
++			core->v_sync_line_aft_pxl_2[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_0,
++			core->v_sync_line_aft_pxl_1[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_1_1,
++			core->v_sync_line_aft_pxl_1[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_0, core->v_blank_f2[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F2_1, core->v_blank_f2[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_0, core->v_blank_f3[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F3_1, core->v_blank_f3[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_0, core->v_blank_f4[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F4_1, core->v_blank_f4[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_0, core->v_blank_f5[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_BLANK_F5_1, core->v_blank_f5[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_0,
++			core->v_sync_line_aft_3[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_3_1,
++			core->v_sync_line_aft_3[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_0,
++			core->v_sync_line_aft_4[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_4_1,
++			core->v_sync_line_aft_4[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_0,
++			core->v_sync_line_aft_5[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_5_1,
++			core->v_sync_line_aft_5[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_0,
++			core->v_sync_line_aft_6[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_6_1,
++			core->v_sync_line_aft_6[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_0,
++			core->v_sync_line_aft_pxl_3[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_3_1,
++			core->v_sync_line_aft_pxl_3[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_0,
++			core->v_sync_line_aft_pxl_4[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_4_1,
++			core->v_sync_line_aft_pxl_4[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0,
++			core->v_sync_line_aft_pxl_5[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_1,
++			core->v_sync_line_aft_pxl_5[1]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0,
++			core->v_sync_line_aft_pxl_6[0]);
++	hdmi_reg_writeb(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_1,
++			core->v_sync_line_aft_pxl_6[1]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_0, core->vact_space_1[0]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_1_1, core->vact_space_1[1]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_0, core->vact_space_2[0]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_2_1, core->vact_space_2[1]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_0, core->vact_space_3[0]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_3_1, core->vact_space_3[1]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_0, core->vact_space_4[0]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_4_1, core->vact_space_4[1]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_0, core->vact_space_5[0]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_5_1, core->vact_space_5[1]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_0, core->vact_space_6[0]);
++	hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
++
++	/* Timing generator registers */
++	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
++	hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
++	hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d);
++
++	/* waiting for HDMIPHY's PLL to get to steady state */
++	for (tries = 100; tries; --tries) {
++		u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS_0);
++		if (val & HDMI_PHY_STATUS_READY)
++			break;
++		mdelay(1);
++	}
++	/* steady state not achieved */
++	if (tries == 0) {
++		DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
++		hdmi_regs_dump(hdata, "timing apply");
++	}
++
++	clk_disable(hdata->res.sclk_hdmi);
++	clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
++	clk_enable(hdata->res.sclk_hdmi);
++
++	/* enable HDMI and timing generator */
++	hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
++	if (core->int_pro_mode[0])
++		hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
++				HDMI_FIELD_EN);
++	else
++		hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
++}
++
++static void hdmi_timing_apply(struct hdmi_context *hdata)
++{
++	if (hdata->is_v13)
++		hdmi_v13_timing_apply(hdata);
++	else
++		hdmi_v14_timing_apply(hdata);
++}
++
++static void hdmiphy_conf_reset(struct hdmi_context *hdata)
++{
++	u8 buffer[2];
++	u32 reg;
++
++	clk_disable(hdata->res.sclk_hdmi);
++	clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel);
++	clk_enable(hdata->res.sclk_hdmi);
++
++	/* operation mode */
++	buffer[0] = 0x1f;
++	buffer[1] = 0x00;
++
++	if (hdata->hdmiphy_port)
++		i2c_master_send(hdata->hdmiphy_port, buffer, 2);
++
++	if (hdata->is_v13)
++		reg = HDMI_V13_PHY_RSTOUT;
++	else
++		reg = HDMI_PHY_RSTOUT;
++
++	/* reset hdmiphy */
++	hdmi_reg_writemask(hdata, reg, ~0, HDMI_PHY_SW_RSTOUT);
++	mdelay(10);
++	hdmi_reg_writemask(hdata, reg,  0, HDMI_PHY_SW_RSTOUT);
++	mdelay(10);
++}
++
++static void hdmiphy_conf_apply(struct hdmi_context *hdata)
++{
++	const u8 *hdmiphy_data;
++	u8 buffer[32];
++	u8 operation[2];
++	u8 read_buffer[32] = {0, };
++	int ret;
++	int i;
++
++	if (!hdata->hdmiphy_port) {
++		DRM_ERROR("hdmiphy is not attached\n");
++		return;
++	}
++
++	/* pixel clock */
++	if (hdata->is_v13)
++		hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data;
++	else
++		hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data;
++
++	memcpy(buffer, hdmiphy_data, 32);
++	ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
++	if (ret != 32) {
++		DRM_ERROR("failed to configure HDMIPHY via I2C\n");
++		return;
++	}
++
++	mdelay(10);
++
++	/* operation mode */
++	operation[0] = 0x1f;
++	operation[1] = 0x80;
++
++	ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
++	if (ret != 2) {
++		DRM_ERROR("failed to enable hdmiphy\n");
++		return;
++	}
++
++	ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
++	if (ret < 0) {
++		DRM_ERROR("failed to read hdmiphy config\n");
++		return;
++	}
++
++	for (i = 0; i < ret; i++)
++		DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
++			"recv [0x%02x]\n", i, buffer[i], read_buffer[i]);
++}
++
++static void hdmi_conf_apply(struct hdmi_context *hdata)
++{
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	hdmiphy_conf_reset(hdata);
++	hdmiphy_conf_apply(hdata);
++
++	hdmi_conf_reset(hdata);
++	hdmi_conf_init(hdata);
++	hdmi_audio_init(hdata);
++
++	/* setting core registers */
++	hdmi_timing_apply(hdata);
++	hdmi_audio_control(hdata, true);
++
++	hdmi_regs_dump(hdata, "start");
++}
++
++static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode)
++{
++	struct drm_display_mode *m;
++	struct hdmi_context *hdata = ctx;
++	int index;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	drm_mode_set_crtcinfo(adjusted_mode, 0);
++
++	if (hdata->is_v13)
++		index = hdmi_v13_conf_index(adjusted_mode);
++	else
++		index = hdmi_v14_conf_index(adjusted_mode);
++
++	/* just return if user desired mode exists. */
++	if (index >= 0)
++		return;
++
++	/*
++	 * otherwise, find the most suitable mode among modes and change it
++	 * to adjusted_mode.
++	 */
++	list_for_each_entry(m, &connector->modes, head) {
++		if (hdata->is_v13)
++			index = hdmi_v13_conf_index(m);
++		else
++			index = hdmi_v14_conf_index(m);
++
++		if (index >= 0) {
++			DRM_INFO("desired mode doesn't exist so\n");
++			DRM_INFO("use the most suitable mode among modes.\n");
++			memcpy(adjusted_mode, m, sizeof(*m));
++			break;
++		}
++	}
++}
++
++static void hdmi_mode_set(void *ctx, void *mode)
++{
++	struct hdmi_context *hdata = ctx;
++	int conf_idx;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	conf_idx = hdmi_conf_index(hdata, mode);
++	if (conf_idx >= 0)
++		hdata->cur_conf = conf_idx;
++	else
++		DRM_DEBUG_KMS("not supported mode\n");
++}
++
++static void hdmi_get_max_resol(void *ctx, unsigned int *width,
++					unsigned int *height)
++{
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	*width = MAX_WIDTH;
++	*height = MAX_HEIGHT;
++}
++
++static void hdmi_commit(void *ctx)
++{
++	struct hdmi_context *hdata = ctx;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	hdmi_conf_apply(hdata);
++
++	hdata->enabled = true;
++}
++
++static void hdmi_disable(void *ctx)
++{
++	struct hdmi_context *hdata = ctx;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	if (hdata->enabled) {
++		hdmi_audio_control(hdata, false);
++		hdmiphy_conf_reset(hdata);
++		hdmi_conf_reset(hdata);
++	}
++}
++
++static struct exynos_hdmi_ops hdmi_ops = {
++	/* display */
++	.is_connected	= hdmi_is_connected,
++	.get_edid	= hdmi_get_edid,
++	.check_timing	= hdmi_check_timing,
++	.power_on	= hdmi_display_power_on,
++
++	/* manager */
++	.mode_fixup	= hdmi_mode_fixup,
++	.mode_set	= hdmi_mode_set,
++	.get_max_resol	= hdmi_get_max_resol,
++	.commit		= hdmi_commit,
++	.disable	= hdmi_disable,
++};
++
++/*
++ * Handle hotplug events outside the interrupt handler proper.
++ */
++static void hdmi_hotplug_func(struct work_struct *work)
++{
++	struct hdmi_context *hdata =
++		container_of(work, struct hdmi_context, hotplug_work);
++	struct exynos_drm_hdmi_context *ctx =
++		(struct exynos_drm_hdmi_context *)hdata->parent_ctx;
++
++	drm_helper_hpd_irq_event(ctx->drm_dev);
++}
++
++static irqreturn_t hdmi_irq_handler(int irq, void *arg)
++{
++	struct exynos_drm_hdmi_context *ctx = arg;
++	struct hdmi_context *hdata = ctx->ctx;
++	u32 intc_flag;
++
++	intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
++	/* clearing flags for HPD plug/unplug */
++	if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
++		DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
++		hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
++			HDMI_INTC_FLAG_HPD_UNPLUG);
++	}
++	if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
++		DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
++		hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
++			HDMI_INTC_FLAG_HPD_PLUG);
++	}
++
++	if (ctx->drm_dev && hdata->hpd_handle)
++		queue_work(hdata->wq, &hdata->hotplug_work);
++
++	return IRQ_HANDLED;
++}
++
++static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
++{
++	struct device *dev = hdata->dev;
++	struct hdmi_resources *res = &hdata->res;
++	static char *supply[] = {
++		"hdmi-en",
++		"vdd",
++		"vdd_osc",
++		"vdd_pll",
++	};
++	int i, ret;
++
++	DRM_DEBUG_KMS("HDMI resource init\n");
++
++	memset(res, 0, sizeof *res);
++
++	/* get clocks, power */
++	res->hdmi = clk_get(dev, "hdmi");
++	if (IS_ERR_OR_NULL(res->hdmi)) {
++		DRM_ERROR("failed to get clock 'hdmi'\n");
++		goto fail;
++	}
++	res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
++	if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
++		DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
++		goto fail;
++	}
++	res->sclk_pixel = clk_get(dev, "sclk_pixel");
++	if (IS_ERR_OR_NULL(res->sclk_pixel)) {
++		DRM_ERROR("failed to get clock 'sclk_pixel'\n");
++		goto fail;
++	}
++	res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
++	if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
++		DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
++		goto fail;
++	}
++	res->hdmiphy = clk_get(dev, "hdmiphy");
++	if (IS_ERR_OR_NULL(res->hdmiphy)) {
++		DRM_ERROR("failed to get clock 'hdmiphy'\n");
++		goto fail;
++	}
++
++	clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
++
++	res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
++		sizeof res->regul_bulk[0], GFP_KERNEL);
++	if (!res->regul_bulk) {
++		DRM_ERROR("failed to get memory for regulators\n");
++		goto fail;
++	}
++	for (i = 0; i < ARRAY_SIZE(supply); ++i) {
++		res->regul_bulk[i].supply = supply[i];
++		res->regul_bulk[i].consumer = NULL;
++	}
++	ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
++	if (ret) {
++		DRM_ERROR("failed to get regulators\n");
++		goto fail;
++	}
++	res->regul_count = ARRAY_SIZE(supply);
++
++	return 0;
++fail:
++	DRM_ERROR("HDMI resource init - failed\n");
++	return -ENODEV;
++}
++
++static int hdmi_resources_cleanup(struct hdmi_context *hdata)
++{
++	struct hdmi_resources *res = &hdata->res;
++
++	regulator_bulk_free(res->regul_count, res->regul_bulk);
++	/* kfree is NULL-safe */
++	kfree(res->regul_bulk);
++	if (!IS_ERR_OR_NULL(res->hdmiphy))
++		clk_put(res->hdmiphy);
++	if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
++		clk_put(res->sclk_hdmiphy);
++	if (!IS_ERR_OR_NULL(res->sclk_pixel))
++		clk_put(res->sclk_pixel);
++	if (!IS_ERR_OR_NULL(res->sclk_hdmi))
++		clk_put(res->sclk_hdmi);
++	if (!IS_ERR_OR_NULL(res->hdmi))
++		clk_put(res->hdmi);
++	memset(res, 0, sizeof *res);
++
++	return 0;
++}
++
++static void hdmi_resource_poweron(struct hdmi_context *hdata)
++{
++	struct hdmi_resources *res = &hdata->res;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	/* turn HDMI power on */
++	regulator_bulk_enable(res->regul_count, res->regul_bulk);
++	/* power-on hdmi physical interface */
++	clk_enable(res->hdmiphy);
++	/* turn clocks on */
++	clk_enable(res->hdmi);
++	clk_enable(res->sclk_hdmi);
++
++	hdmiphy_conf_reset(hdata);
++	hdmi_conf_reset(hdata);
++	hdmi_conf_init(hdata);
++	hdmi_audio_init(hdata);
++}
++
++static void hdmi_resource_poweroff(struct hdmi_context *hdata)
++{
++	struct hdmi_resources *res = &hdata->res;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	/* turn clocks off */
++	clk_disable(res->sclk_hdmi);
++	clk_disable(res->hdmi);
++	/* power-off hdmiphy */
++	clk_disable(res->hdmiphy);
++	/* turn HDMI power off */
++	regulator_bulk_disable(res->regul_count, res->regul_bulk);
++}
++
++static int hdmi_runtime_suspend(struct device *dev)
++{
++	struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __func__);
++
++	hdmi_resource_poweroff(ctx->ctx);
++
++	return 0;
++}
++
++static int hdmi_runtime_resume(struct device *dev)
++{
++	struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
++
++	DRM_DEBUG_KMS("%s\n", __func__);
++
++	hdmi_resource_poweron(ctx->ctx);
++
++	return 0;
++}
++
++static const struct dev_pm_ops hdmi_pm_ops = {
++	.runtime_suspend = hdmi_runtime_suspend,
++	.runtime_resume	 = hdmi_runtime_resume,
++};
++
++static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
++
++void hdmi_attach_ddc_client(struct i2c_client *ddc)
++{
++	if (ddc)
++		hdmi_ddc = ddc;
++}
++
++void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
++{
++	if (hdmiphy)
++		hdmi_hdmiphy = hdmiphy;
++}
++
++static int __devinit hdmi_probe(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct exynos_drm_hdmi_context *drm_hdmi_ctx;
++	struct hdmi_context *hdata;
++	struct exynos_drm_hdmi_pdata *pdata;
++	struct resource *res;
++	int ret;
++
++	DRM_DEBUG_KMS("[%d]\n", __LINE__);
++
++	pdata = pdev->dev.platform_data;
++	if (!pdata) {
++		DRM_ERROR("no platform data specified\n");
++		return -EINVAL;
++	}
++
++	drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
++	if (!drm_hdmi_ctx) {
++		DRM_ERROR("failed to allocate common hdmi context.\n");
++		return -ENOMEM;
++	}
++
++	hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL);
++	if (!hdata) {
++		DRM_ERROR("out of memory\n");
++		kfree(drm_hdmi_ctx);
++		return -ENOMEM;
++	}
++
++	drm_hdmi_ctx->ctx = (void *)hdata;
++	hdata->parent_ctx = (void *)drm_hdmi_ctx;
++
++	platform_set_drvdata(pdev, drm_hdmi_ctx);
++
++	hdata->is_v13 = pdata->is_v13;
++	hdata->default_win = pdata->default_win;
++	hdata->default_timing = &pdata->timing;
++	hdata->default_bpp = pdata->bpp;
++	hdata->dev = dev;
++
++	ret = hdmi_resources_init(hdata);
++	if (ret) {
++		ret = -EINVAL;
++		goto err_data;
++	}
++
++	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (!res) {
++		DRM_ERROR("failed to find registers\n");
++		ret = -ENOENT;
++		goto err_resource;
++	}
++
++	hdata->regs_res = request_mem_region(res->start, resource_size(res),
++					   dev_name(dev));
++	if (!hdata->regs_res) {
++		DRM_ERROR("failed to claim register region\n");
++		ret = -ENOENT;
++		goto err_resource;
++	}
++
++	hdata->regs = ioremap(res->start, resource_size(res));
++	if (!hdata->regs) {
++		DRM_ERROR("failed to map registers\n");
++		ret = -ENXIO;
++		goto err_req_region;
++	}
++
++	/* DDC i2c driver */
++	if (i2c_add_driver(&ddc_driver)) {
++		DRM_ERROR("failed to register ddc i2c driver\n");
++		ret = -ENOENT;
++		goto err_iomap;
++	}
++
++	hdata->ddc_port = hdmi_ddc;
++
++	/* hdmiphy i2c driver */
++	if (i2c_add_driver(&hdmiphy_driver)) {
++		DRM_ERROR("failed to register hdmiphy i2c driver\n");
++		ret = -ENOENT;
++		goto err_ddc;
++	}
++
++	hdata->hdmiphy_port = hdmi_hdmiphy;
++
++	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++	if (res == NULL) {
++		DRM_ERROR("get interrupt resource failed.\n");
++		ret = -ENXIO;
++		goto err_hdmiphy;
++	}
++
++	/* create workqueue and hotplug work */
++	hdata->wq = alloc_workqueue("exynos-drm-hdmi",
++			WQ_UNBOUND | WQ_NON_REENTRANT, 1);
++	if (hdata->wq == NULL) {
++		DRM_ERROR("Failed to create workqueue.\n");
++		ret = -ENOMEM;
++		goto err_hdmiphy;
++	}
++	INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
++
++	/* register hpd interrupt */
++	ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
++				drm_hdmi_ctx);
++	if (ret) {
++		DRM_ERROR("request interrupt failed.\n");
++		goto err_workqueue;
++	}
++	hdata->irq = res->start;
++
++	/* register specific callbacks to common hdmi. */
++	exynos_hdmi_ops_register(&hdmi_ops);
++
++	hdmi_resource_poweron(hdata);
++
++	return 0;
++
++err_workqueue:
++	destroy_workqueue(hdata->wq);
++err_hdmiphy:
++	i2c_del_driver(&hdmiphy_driver);
++err_ddc:
++	i2c_del_driver(&ddc_driver);
++err_iomap:
++	iounmap(hdata->regs);
++err_req_region:
++	release_mem_region(hdata->regs_res->start,
++			resource_size(hdata->regs_res));
++err_resource:
++	hdmi_resources_cleanup(hdata);
++err_data:
++	kfree(hdata);
++	kfree(drm_hdmi_ctx);
++	return ret;
++}
++
++static int __devexit hdmi_remove(struct platform_device *pdev)
++{
++	struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
++	struct hdmi_context *hdata = ctx->ctx;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	hdmi_resource_poweroff(hdata);
++
++	disable_irq(hdata->irq);
++	free_irq(hdata->irq, hdata);
++
++	cancel_work_sync(&hdata->hotplug_work);
++	destroy_workqueue(hdata->wq);
++
++	hdmi_resources_cleanup(hdata);
++
++	iounmap(hdata->regs);
++
++	release_mem_region(hdata->regs_res->start,
++			resource_size(hdata->regs_res));
++
++	/* hdmiphy i2c driver */
++	i2c_del_driver(&hdmiphy_driver);
++	/* DDC i2c driver */
++	i2c_del_driver(&ddc_driver);
++
++	kfree(hdata);
++
++	return 0;
++}
++
++struct platform_driver hdmi_driver = {
++	.probe		= hdmi_probe,
++	.remove		= __devexit_p(hdmi_remove),
++	.driver		= {
++		.name	= "exynos4-hdmi",
++		.owner	= THIS_MODULE,
++		.pm = &hdmi_pm_ops,
++	},
++};
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
+new file mode 100644
+index 0000000..1c3b6d8
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_hdmi.h
+@@ -0,0 +1,37 @@
++/*
++ *
++ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
++ * Authors:
++ *	Inki Dae <inki.dae at samsung.com>
++ *	Seung-Woo Kim <sw0312.kim at samsung.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _EXYNOS_HDMI_H_
++#define _EXYNOS_HDMI_H_
++
++void hdmi_attach_ddc_client(struct i2c_client *ddc);
++void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
++
++extern struct i2c_driver hdmiphy_driver;
++extern struct i2c_driver ddc_driver;
++
++#endif
+diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+new file mode 100644
+index 0000000..9fe2995
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+@@ -0,0 +1,58 @@
++/*
++ * Copyright (C) 2011 Samsung Electronics Co.Ltd
++ * Authors:
++ *	Seung-Woo Kim <sw0312.kim at samsung.com>
++ *	Inki Dae <inki.dae at samsung.com>
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include "drmP.h"
++
++#include <linux/kernel.h>
++#include <linux/i2c.h>
++#include <linux/module.h>
++
++#include "exynos_drm_drv.h"
++#include "exynos_hdmi.h"
++
++
++static int hdmiphy_probe(struct i2c_client *client,
++	const struct i2c_device_id *id)
++{
++	hdmi_attach_hdmiphy_client(client);
++
++	dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
++		"into i2c adapter successfully\n");
++
++	return 0;
++}
++
++static int hdmiphy_remove(struct i2c_client *client)
++{
++	dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
++		"from i2c adapter successfully\n");
++
++	return 0;
++}
++
++static const struct i2c_device_id hdmiphy_id[] = {
++	{ "s5p_hdmiphy", 0 },
++	{ },
++};
++
++struct i2c_driver hdmiphy_driver = {
++	.driver = {
++		.name	= "s5p-hdmiphy",
++		.owner	= THIS_MODULE,
++	},
++	.id_table = hdmiphy_id,
++	.probe		= hdmiphy_probe,
++	.remove		= __devexit_p(hdmiphy_remove),
++	.command		= NULL,
++};
++EXPORT_SYMBOL(hdmiphy_driver);
+diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
+new file mode 100644
+index 0000000..e15438c
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/exynos_mixer.c
+@@ -0,0 +1,1112 @@
++/*
++ * Copyright (C) 2011 Samsung Electronics Co.Ltd
++ * Authors:
++ * Seung-Woo Kim <sw0312.kim at samsung.com>
++ *	Inki Dae <inki.dae at samsung.com>
++ *	Joonyoung Shim <jy0922.shim at samsung.com>
++ *
++ * Based on drivers/media/video/s5p-tv/mixer_reg.c
++ *
++ * This program is free software; you can redistribute  it and/or modify it
++ * under  the terms of  the GNU General  Public License as published by the
++ * Free Software Foundation;  either version 2 of the  License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include "drmP.h"
++
++#include "regs-mixer.h"
++#include "regs-vp.h"
++
++#include <linux/kernel.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include <linux/i2c.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/delay.h>
++#include <linux/pm_runtime.h>
++#include <linux/clk.h>
++#include <linux/regulator/consumer.h>
++
++#include <drm/exynos_drm.h>
++
++#include "exynos_drm_drv.h"
++#include "exynos_drm_hdmi.h"
++
++#define MIXER_WIN_NR		3
++#define MIXER_DEFAULT_WIN	0
++
++#define get_mixer_context(dev)	platform_get_drvdata(to_platform_device(dev))
++
++struct hdmi_win_data {
++	dma_addr_t		dma_addr;
++	void __iomem		*vaddr;
++	dma_addr_t		chroma_dma_addr;
++	void __iomem		*chroma_vaddr;
++	uint32_t		pixel_format;
++	unsigned int		bpp;
++	unsigned int		crtc_x;
++	unsigned int		crtc_y;
++	unsigned int		crtc_width;
++	unsigned int		crtc_height;
++	unsigned int		fb_x;
++	unsigned int		fb_y;
++	unsigned int		fb_width;
++	unsigned int		fb_height;
++	unsigned int		mode_width;
++	unsigned int		mode_height;
++	unsigned int		scan_flags;
++};
++
++struct mixer_resources {
++	struct device		*dev;
++	int			irq;
++	void __iomem		*mixer_regs;
++	void __iomem		*vp_regs;
++	spinlock_t		reg_slock;
++	struct clk		*mixer;
++	struct clk		*vp;
++	struct clk		*sclk_mixer;
++	struct clk		*sclk_hdmi;
++	struct clk		*sclk_dac;
++};
++
++struct mixer_context {
++	unsigned int		irq;
++	int			pipe;
++	bool			interlace;
++
++	struct mixer_resources	mixer_res;
++	struct hdmi_win_data	win_data[MIXER_WIN_NR];
++};
++
++static const u8 filter_y_horiz_tap8[] = {
++	0,	-1,	-1,	-1,	-1,	-1,	-1,	-1,
++	-1,	-1,	-1,	-1,	-1,	0,	0,	0,
++	0,	2,	4,	5,	6,	6,	6,	6,
++	6,	5,	5,	4,	3,	2,	1,	1,
++	0,	-6,	-12,	-16,	-18,	-20,	-21,	-20,
++	-20,	-18,	-16,	-13,	-10,	-8,	-5,	-2,
++	127,	126,	125,	121,	114,	107,	99,	89,
++	79,	68,	57,	46,	35,	25,	16,	8,
++};
++
++static const u8 filter_y_vert_tap4[] = {
++	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
++	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
++	127,	126,	124,	118,	111,	102,	92,	81,
++	70,	59,	48,	37,	27,	19,	11,	5,
++	0,	5,	11,	19,	27,	37,	48,	59,
++	70,	81,	92,	102,	111,	118,	124,	126,
++	0,	0,	-1,	-1,	-2,	-3,	-4,	-5,
++	-6,	-7,	-8,	-8,	-8,	-8,	-6,	-3,
++};
++
++static const u8 filter_cr_horiz_tap4[] = {
++	0,	-3,	-6,	-8,	-8,	-8,	-8,	-7,
++	-6,	-5,	-4,	-3,	-2,	-1,	-1,	0,
++	127,	126,	124,	118,	111,	102,	92,	81,
++	70,	59,	48,	37,	27,	19,	11,	5,
++};
++
++static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
++{
++	return readl(res->vp_regs + reg_id);
++}
++
++static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
++				 u32 val)
++{
++	writel(val, res->vp_regs + reg_id);
++}
++
++static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
++				 u32 val, u32 mask)
++{
++	u32 old = vp_reg_read(res, reg_id);
++
++	val = (val & mask) | (old & ~mask);
++	writel(val, res->vp_regs + reg_id);
++}
++
++static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
++{
++	return readl(res->mixer_regs + reg_id);
++}
++
++static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
++				 u32 val)
++{
++	writel(val, res->mixer_regs + reg_id);
++}
++
++static inline void mixer_reg_writemask(struct mixer_resources *res,
++				 u32 reg_id, u32 val, u32 mask)
++{
++	u32 old = mixer_reg_read(res, reg_id);
++
++	val = (val & mask) | (old & ~mask);
++	writel(val, res->mixer_regs + reg_id);
++}
++
++static void mixer_regs_dump(struct mixer_context *ctx)
++{
++#define DUMPREG(reg_id) \
++do { \
++	DRM_DEBUG_KMS(#reg_id " = %08x\n", \
++		(u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
++} while (0)
++
++	DUMPREG(MXR_STATUS);
++	DUMPREG(MXR_CFG);
++	DUMPREG(MXR_INT_EN);
++	DUMPREG(MXR_INT_STATUS);
++
++	DUMPREG(MXR_LAYER_CFG);
++	DUMPREG(MXR_VIDEO_CFG);
++
++	DUMPREG(MXR_GRAPHIC0_CFG);
++	DUMPREG(MXR_GRAPHIC0_BASE);
++	DUMPREG(MXR_GRAPHIC0_SPAN);
++	DUMPREG(MXR_GRAPHIC0_WH);
++	DUMPREG(MXR_GRAPHIC0_SXY);
++	DUMPREG(MXR_GRAPHIC0_DXY);
++
++	DUMPREG(MXR_GRAPHIC1_CFG);
++	DUMPREG(MXR_GRAPHIC1_BASE);
++	DUMPREG(MXR_GRAPHIC1_SPAN);
++	DUMPREG(MXR_GRAPHIC1_WH);
++	DUMPREG(MXR_GRAPHIC1_SXY);
++	DUMPREG(MXR_GRAPHIC1_DXY);
++#undef DUMPREG
++}
++
++static void vp_regs_dump(struct mixer_context *ctx)
++{
++#define DUMPREG(reg_id) \
++do { \
++	DRM_DEBUG_KMS(#reg_id " = %08x\n", \
++		(u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
++} while (0)
++
++	DUMPREG(VP_ENABLE);
++	DUMPREG(VP_SRESET);
++	DUMPREG(VP_SHADOW_UPDATE);
++	DUMPREG(VP_FIELD_ID);
++	DUMPREG(VP_MODE);
++	DUMPREG(VP_IMG_SIZE_Y);
++	DUMPREG(VP_IMG_SIZE_C);
++	DUMPREG(VP_PER_RATE_CTRL);
++	DUMPREG(VP_TOP_Y_PTR);
++	DUMPREG(VP_BOT_Y_PTR);
++	DUMPREG(VP_TOP_C_PTR);
++	DUMPREG(VP_BOT_C_PTR);
++	DUMPREG(VP_ENDIAN_MODE);
++	DUMPREG(VP_SRC_H_POSITION);
++	DUMPREG(VP_SRC_V_POSITION);
++	DUMPREG(VP_SRC_WIDTH);
++	DUMPREG(VP_SRC_HEIGHT);
++	DUMPREG(VP_DST_H_POSITION);
++	DUMPREG(VP_DST_V_POSITION);
++	DUMPREG(VP_DST_WIDTH);
++	DUMPREG(VP_DST_HEIGHT);
++	DUMPREG(VP_H_RATIO);
++	DUMPREG(VP_V_RATIO);
++
++#undef DUMPREG
++}
++
++static inline void vp_filter_set(struct mixer_resources *res,
++		int reg_id, const u8 *data, unsigned int size)
++{
++	/* assure 4-byte align */
++	BUG_ON(size & 3);
++	for (; size; size -= 4, reg_id += 4, data += 4) {
++		u32 val = (data[0] << 24) |  (data[1] << 16) |
++			(data[2] << 8) | data[3];
++		vp_reg_write(res, reg_id, val);
++	}
++}
++
++static void vp_default_filter(struct mixer_resources *res)
++{
++	vp_filter_set(res, VP_POLY8_Y0_LL,
++		filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
++	vp_filter_set(res, VP_POLY4_Y0_LL,
++		filter_y_vert_tap4, sizeof filter_y_vert_tap4);
++	vp_filter_set(res, VP_POLY4_C0_LL,
++		filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
++}
++
++static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++
++	/* block update on vsync */
++	mixer_reg_writemask(res, MXR_STATUS, enable ?
++			MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
++
++	vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
++			VP_SHADOW_UPDATE_ENABLE : 0);
++}
++
++static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++	u32 val;
++
++	/* choosing between interlace and progressive mode */
++	val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
++				MXR_CFG_SCAN_PROGRASSIVE);
++
++	/* choosing between porper HD and SD mode */
++	if (height == 480)
++		val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
++	else if (height == 576)
++		val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
++	else if (height == 720)
++		val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
++	else if (height == 1080)
++		val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
++	else
++		val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
++
++	mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
++}
++
++static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++	u32 val;
++
++	if (height == 480) {
++		val = MXR_CFG_RGB601_0_255;
++	} else if (height == 576) {
++		val = MXR_CFG_RGB601_0_255;
++	} else if (height == 720) {
++		val = MXR_CFG_RGB709_16_235;
++		mixer_reg_write(res, MXR_CM_COEFF_Y,
++				(1 << 30) | (94 << 20) | (314 << 10) |
++				(32 << 0));
++		mixer_reg_write(res, MXR_CM_COEFF_CB,
++				(972 << 20) | (851 << 10) | (225 << 0));
++		mixer_reg_write(res, MXR_CM_COEFF_CR,
++				(225 << 20) | (820 << 10) | (1004 << 0));
++	} else if (height == 1080) {
++		val = MXR_CFG_RGB709_16_235;
++		mixer_reg_write(res, MXR_CM_COEFF_Y,
++				(1 << 30) | (94 << 20) | (314 << 10) |
++				(32 << 0));
++		mixer_reg_write(res, MXR_CM_COEFF_CB,
++				(972 << 20) | (851 << 10) | (225 << 0));
++		mixer_reg_write(res, MXR_CM_COEFF_CR,
++				(225 << 20) | (820 << 10) | (1004 << 0));
++	} else {
++		val = MXR_CFG_RGB709_16_235;
++		mixer_reg_write(res, MXR_CM_COEFF_Y,
++				(1 << 30) | (94 << 20) | (314 << 10) |
++				(32 << 0));
++		mixer_reg_write(res, MXR_CM_COEFF_CB,
++				(972 << 20) | (851 << 10) | (225 << 0));
++		mixer_reg_write(res, MXR_CM_COEFF_CR,
++				(225 << 20) | (820 << 10) | (1004 << 0));
++	}
++
++	mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
++}
++
++static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++	u32 val = enable ? ~0 : 0;
++
++	switch (win) {
++	case 0:
++		mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
++		break;
++	case 1:
++		mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
++		break;
++	case 2:
++		vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
++		mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE);
++		break;
++	}
++}
++
++static void mixer_run(struct mixer_context *ctx)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++
++	mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
++
++	mixer_regs_dump(ctx);
++}
++
++static void vp_video_buffer(struct mixer_context *ctx, int win)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++	unsigned long flags;
++	struct hdmi_win_data *win_data;
++	unsigned int full_width, full_height, width, height;
++	unsigned int x_ratio, y_ratio;
++	unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
++	unsigned int mode_width, mode_height;
++	unsigned int buf_num;
++	dma_addr_t luma_addr[2], chroma_addr[2];
++	bool tiled_mode = false;
++	bool crcb_mode = false;
++	u32 val;
++
++	win_data = &ctx->win_data[win];
++
++	switch (win_data->pixel_format) {
++	case DRM_FORMAT_NV12MT:
++		tiled_mode = true;
++	case DRM_FORMAT_NV12M:
++		crcb_mode = false;
++		buf_num = 2;
++		break;
++	/* TODO: single buffer format NV12, NV21 */
++	default:
++		/* ignore pixel format at disable time */
++		if (!win_data->dma_addr)
++			break;
++
++		DRM_ERROR("pixel format for vp is wrong [%d].\n",
++				win_data->pixel_format);
++		return;
++	}
++
++	full_width = win_data->fb_width;
++	full_height = win_data->fb_height;
++	width = win_data->crtc_width;
++	height = win_data->crtc_height;
++	mode_width = win_data->mode_width;
++	mode_height = win_data->mode_height;
++
++	/* scaling feature: (src << 16) / dst */
++	x_ratio = (width << 16) / width;
++	y_ratio = (height << 16) / height;
++
++	src_x_offset = win_data->fb_x;
++	src_y_offset = win_data->fb_y;
++	dst_x_offset = win_data->crtc_x;
++	dst_y_offset = win_data->crtc_y;
++
++	if (buf_num == 2) {
++		luma_addr[0] = win_data->dma_addr;
++		chroma_addr[0] = win_data->chroma_dma_addr;
++	} else {
++		luma_addr[0] = win_data->dma_addr;
++		chroma_addr[0] = win_data->dma_addr
++			+ (full_width * full_height);
++	}
++
++	if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
++		ctx->interlace = true;
++		if (tiled_mode) {
++			luma_addr[1] = luma_addr[0] + 0x40;
++			chroma_addr[1] = chroma_addr[0] + 0x40;
++		} else {
++			luma_addr[1] = luma_addr[0] + full_width;
++			chroma_addr[1] = chroma_addr[0] + full_width;
++		}
++	} else {
++		ctx->interlace = false;
++		luma_addr[1] = 0;
++		chroma_addr[1] = 0;
++	}
++
++	spin_lock_irqsave(&res->reg_slock, flags);
++	mixer_vsync_set_update(ctx, false);
++
++	/* interlace or progressive scan mode */
++	val = (ctx->interlace ? ~0 : 0);
++	vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
++
++	/* setup format */
++	val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
++	val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
++	vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
++
++	/* setting size of input image */
++	vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
++		VP_IMG_VSIZE(full_height));
++	/* chroma height has to reduced by 2 to avoid chroma distorions */
++	vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
++		VP_IMG_VSIZE(full_height / 2));
++
++	vp_reg_write(res, VP_SRC_WIDTH, width);
++	vp_reg_write(res, VP_SRC_HEIGHT, height);
++	vp_reg_write(res, VP_SRC_H_POSITION,
++			VP_SRC_H_POSITION_VAL(src_x_offset));
++	vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
++
++	vp_reg_write(res, VP_DST_WIDTH, width);
++	vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
++	if (ctx->interlace) {
++		vp_reg_write(res, VP_DST_HEIGHT, height / 2);
++		vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
++	} else {
++		vp_reg_write(res, VP_DST_HEIGHT, height);
++		vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
++	}
++
++	vp_reg_write(res, VP_H_RATIO, x_ratio);
++	vp_reg_write(res, VP_V_RATIO, y_ratio);
++
++	vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
++
++	/* set buffer address to vp */
++	vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
++	vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
++	vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
++	vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
++
++	mixer_cfg_scan(ctx, mode_height);
++	mixer_cfg_rgb_fmt(ctx, mode_height);
++	mixer_cfg_layer(ctx, win, true);
++	mixer_run(ctx);
++
++	mixer_vsync_set_update(ctx, true);
++	spin_unlock_irqrestore(&res->reg_slock, flags);
++
++	vp_regs_dump(ctx);
++}
++
++static void mixer_graph_buffer(struct mixer_context *ctx, int win)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++	unsigned long flags;
++	struct hdmi_win_data *win_data;
++	unsigned int full_width, width, height;
++	unsigned int x_ratio, y_ratio;
++	unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
++	unsigned int mode_width, mode_height;
++	dma_addr_t dma_addr;
++	unsigned int fmt;
++	u32 val;
++
++	win_data = &ctx->win_data[win];
++
++	#define RGB565 4
++	#define ARGB1555 5
++	#define ARGB4444 6
++	#define ARGB8888 7
++
++	switch (win_data->bpp) {
++	case 16:
++		fmt = ARGB4444;
++		break;
++	case 32:
++		fmt = ARGB8888;
++		break;
++	default:
++		fmt = ARGB8888;
++	}
++
++	dma_addr = win_data->dma_addr;
++	full_width = win_data->fb_width;
++	width = win_data->crtc_width;
++	height = win_data->crtc_height;
++	mode_width = win_data->mode_width;
++	mode_height = win_data->mode_height;
++
++	/* 2x scaling feature */
++	x_ratio = 0;
++	y_ratio = 0;
++
++	src_x_offset = win_data->fb_x;
++	src_y_offset = win_data->fb_y;
++	dst_x_offset = win_data->crtc_x;
++	dst_y_offset = win_data->crtc_y;
++
++	/* converting dma address base and source offset */
++	dma_addr = dma_addr
++		+ (src_x_offset * win_data->bpp >> 3)
++		+ (src_y_offset * full_width * win_data->bpp >> 3);
++	src_x_offset = 0;
++	src_y_offset = 0;
++
++	if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
++		ctx->interlace = true;
++	else
++		ctx->interlace = false;
++
++	spin_lock_irqsave(&res->reg_slock, flags);
++	mixer_vsync_set_update(ctx, false);
++
++	/* setup format */
++	mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
++		MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
++
++	/* setup geometry */
++	mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
++
++	val  = MXR_GRP_WH_WIDTH(width);
++	val |= MXR_GRP_WH_HEIGHT(height);
++	val |= MXR_GRP_WH_H_SCALE(x_ratio);
++	val |= MXR_GRP_WH_V_SCALE(y_ratio);
++	mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
++
++	/* setup offsets in source image */
++	val  = MXR_GRP_SXY_SX(src_x_offset);
++	val |= MXR_GRP_SXY_SY(src_y_offset);
++	mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
++
++	/* setup offsets in display image */
++	val  = MXR_GRP_DXY_DX(dst_x_offset);
++	val |= MXR_GRP_DXY_DY(dst_y_offset);
++	mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
++
++	/* set buffer address to mixer */
++	mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
++
++	mixer_cfg_scan(ctx, mode_height);
++	mixer_cfg_rgb_fmt(ctx, mode_height);
++	mixer_cfg_layer(ctx, win, true);
++	mixer_run(ctx);
++
++	mixer_vsync_set_update(ctx, true);
++	spin_unlock_irqrestore(&res->reg_slock, flags);
++}
++
++static void vp_win_reset(struct mixer_context *ctx)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++	int tries = 100;
++
++	vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
++	for (tries = 100; tries; --tries) {
++		/* waiting until VP_SRESET_PROCESSING is 0 */
++		if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
++			break;
++		mdelay(10);
++	}
++	WARN(tries == 0, "failed to reset Video Processor\n");
++}
++
++static int mixer_enable_vblank(void *ctx, int pipe)
++{
++	struct mixer_context *mixer_ctx = ctx;
++	struct mixer_resources *res = &mixer_ctx->mixer_res;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	mixer_ctx->pipe = pipe;
++
++	/* enable vsync interrupt */
++	mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
++			MXR_INT_EN_VSYNC);
++
++	return 0;
++}
++
++static void mixer_disable_vblank(void *ctx)
++{
++	struct mixer_context *mixer_ctx = ctx;
++	struct mixer_resources *res = &mixer_ctx->mixer_res;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	/* disable vsync interrupt */
++	mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
++}
++
++static void mixer_win_mode_set(void *ctx,
++			      struct exynos_drm_overlay *overlay)
++{
++	struct mixer_context *mixer_ctx = ctx;
++	struct hdmi_win_data *win_data;
++	int win;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	if (!overlay) {
++		DRM_ERROR("overlay is NULL\n");
++		return;
++	}
++
++	DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
++				 overlay->fb_width, overlay->fb_height,
++				 overlay->fb_x, overlay->fb_y,
++				 overlay->crtc_width, overlay->crtc_height,
++				 overlay->crtc_x, overlay->crtc_y);
++
++	win = overlay->zpos;
++	if (win == DEFAULT_ZPOS)
++		win = MIXER_DEFAULT_WIN;
++
++	if (win < 0 || win > MIXER_WIN_NR) {
++		DRM_ERROR("overlay plane[%d] is wrong\n", win);
++		return;
++	}
++
++	win_data = &mixer_ctx->win_data[win];
++
++	win_data->dma_addr = overlay->dma_addr[0];
++	win_data->vaddr = overlay->vaddr[0];
++	win_data->chroma_dma_addr = overlay->dma_addr[1];
++	win_data->chroma_vaddr = overlay->vaddr[1];
++	win_data->pixel_format = overlay->pixel_format;
++	win_data->bpp = overlay->bpp;
++
++	win_data->crtc_x = overlay->crtc_x;
++	win_data->crtc_y = overlay->crtc_y;
++	win_data->crtc_width = overlay->crtc_width;
++	win_data->crtc_height = overlay->crtc_height;
++
++	win_data->fb_x = overlay->fb_x;
++	win_data->fb_y = overlay->fb_y;
++	win_data->fb_width = overlay->fb_width;
++	win_data->fb_height = overlay->fb_height;
++
++	win_data->mode_width = overlay->mode_width;
++	win_data->mode_height = overlay->mode_height;
++
++	win_data->scan_flags = overlay->scan_flag;
++}
++
++static void mixer_win_commit(void *ctx, int zpos)
++{
++	struct mixer_context *mixer_ctx = ctx;
++	int win = zpos;
++
++	DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
++
++	if (win == DEFAULT_ZPOS)
++		win = MIXER_DEFAULT_WIN;
++
++	if (win < 0 || win > MIXER_WIN_NR) {
++		DRM_ERROR("overlay plane[%d] is wrong\n", win);
++		return;
++	}
++
++	if (win > 1)
++		vp_video_buffer(mixer_ctx, win);
++	else
++		mixer_graph_buffer(mixer_ctx, win);
++}
++
++static void mixer_win_disable(void *ctx, int zpos)
++{
++	struct mixer_context *mixer_ctx = ctx;
++	struct mixer_resources *res = &mixer_ctx->mixer_res;
++	unsigned long flags;
++	int win = zpos;
++
++	DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
++
++	if (win == DEFAULT_ZPOS)
++		win = MIXER_DEFAULT_WIN;
++
++	if (win < 0 || win > MIXER_WIN_NR) {
++		DRM_ERROR("overlay plane[%d] is wrong\n", win);
++		return;
++	}
++
++	spin_lock_irqsave(&res->reg_slock, flags);
++	mixer_vsync_set_update(mixer_ctx, false);
++
++	mixer_cfg_layer(mixer_ctx, win, false);
++
++	mixer_vsync_set_update(mixer_ctx, true);
++	spin_unlock_irqrestore(&res->reg_slock, flags);
++}
++
++static struct exynos_mixer_ops mixer_ops = {
++	/* manager */
++	.enable_vblank		= mixer_enable_vblank,
++	.disable_vblank		= mixer_disable_vblank,
++
++	/* overlay */
++	.win_mode_set		= mixer_win_mode_set,
++	.win_commit		= mixer_win_commit,
++	.win_disable		= mixer_win_disable,
++};
++
++/* for pageflip event */
++static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
++{
++	struct exynos_drm_private *dev_priv = drm_dev->dev_private;
++	struct drm_pending_vblank_event *e, *t;
++	struct timeval now;
++	unsigned long flags;
++	bool is_checked = false;
++
++	spin_lock_irqsave(&drm_dev->event_lock, flags);
++
++	list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
++			base.link) {
++		/* if event's pipe isn't same as crtc then ignore it. */
++		if (crtc != e->pipe)
++			continue;
++
++		is_checked = true;
++		do_gettimeofday(&now);
++		e->event.sequence = 0;
++		e->event.tv_sec = now.tv_sec;
++		e->event.tv_usec = now.tv_usec;
++
++		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
++		wake_up_interruptible(&e->base.file_priv->event_wait);
++	}
++
++	if (is_checked)
++		/*
++		 * call drm_vblank_put only in case that drm_vblank_get was
++		 * called.
++		 */
++		if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
++			drm_vblank_put(drm_dev, crtc);
++
++	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
++}
++
++static irqreturn_t mixer_irq_handler(int irq, void *arg)
++{
++	struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
++	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
++	struct mixer_resources *res = &ctx->mixer_res;
++	u32 val, val_base;
++
++	spin_lock(&res->reg_slock);
++
++	/* read interrupt status for handling and clearing flags for VSYNC */
++	val = mixer_reg_read(res, MXR_INT_STATUS);
++
++	/* handling VSYNC */
++	if (val & MXR_INT_STATUS_VSYNC) {
++		/* interlace scan need to check shadow register */
++		if (ctx->interlace) {
++			val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
++			if (ctx->win_data[0].dma_addr != val_base)
++				goto out;
++
++			val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
++			if (ctx->win_data[1].dma_addr != val_base)
++				goto out;
++		}
++
++		drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
++		mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
++	}
++
++out:
++	/* clear interrupts */
++	if (~val & MXR_INT_EN_VSYNC) {
++		/* vsync interrupt use different bit for read and clear */
++		val &= ~MXR_INT_EN_VSYNC;
++		val |= MXR_INT_CLEAR_VSYNC;
++	}
++	mixer_reg_write(res, MXR_INT_STATUS, val);
++
++	spin_unlock(&res->reg_slock);
++
++	return IRQ_HANDLED;
++}
++
++static void mixer_win_reset(struct mixer_context *ctx)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++	unsigned long flags;
++	u32 val; /* value stored to register */
++
++	spin_lock_irqsave(&res->reg_slock, flags);
++	mixer_vsync_set_update(ctx, false);
++
++	mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
++
++	/* set output in RGB888 mode */
++	mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
++
++	/* 16 beat burst in DMA */
++	mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
++		MXR_STATUS_BURST_MASK);
++
++	/* setting default layer priority: layer1 > layer0 > video
++	 * because typical usage scenario would be
++	 * layer1 - OSD
++	 * layer0 - framebuffer
++	 * video - video overlay
++	 */
++	val = MXR_LAYER_CFG_GRP1_VAL(3);
++	val |= MXR_LAYER_CFG_GRP0_VAL(2);
++	val |= MXR_LAYER_CFG_VP_VAL(1);
++	mixer_reg_write(res, MXR_LAYER_CFG, val);
++
++	/* setting background color */
++	mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
++	mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
++	mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
++
++	/* setting graphical layers */
++
++	val  = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
++	val |= MXR_GRP_CFG_WIN_BLEND_EN;
++	val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
++
++	/* the same configuration for both layers */
++	mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
++
++	val |= MXR_GRP_CFG_BLEND_PRE_MUL;
++	val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
++	mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
++
++	/* configuration of Video Processor Registers */
++	vp_win_reset(ctx);
++	vp_default_filter(res);
++
++	/* disable all layers */
++	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
++	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
++	mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
++
++	mixer_vsync_set_update(ctx, true);
++	spin_unlock_irqrestore(&res->reg_slock, flags);
++}
++
++static void mixer_resource_poweron(struct mixer_context *ctx)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	clk_enable(res->mixer);
++	clk_enable(res->vp);
++	clk_enable(res->sclk_mixer);
++
++	mixer_win_reset(ctx);
++}
++
++static void mixer_resource_poweroff(struct mixer_context *ctx)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++
++	DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
++
++	clk_disable(res->mixer);
++	clk_disable(res->vp);
++	clk_disable(res->sclk_mixer);
++}
++
++static int mixer_runtime_resume(struct device *dev)
++{
++	struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
++
++	DRM_DEBUG_KMS("resume - start\n");
++
++	mixer_resource_poweron(ctx->ctx);
++
++	return 0;
++}
++
++static int mixer_runtime_suspend(struct device *dev)
++{
++	struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
++
++	DRM_DEBUG_KMS("suspend - start\n");
++
++	mixer_resource_poweroff(ctx->ctx);
++
++	return 0;
++}
++
++static const struct dev_pm_ops mixer_pm_ops = {
++	.runtime_suspend = mixer_runtime_suspend,
++	.runtime_resume	 = mixer_runtime_resume,
++};
++
++static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
++				 struct platform_device *pdev)
++{
++	struct mixer_context *mixer_ctx = ctx->ctx;
++	struct device *dev = &pdev->dev;
++	struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
++	struct resource *res;
++	int ret;
++
++	mixer_res->dev = dev;
++	spin_lock_init(&mixer_res->reg_slock);
++
++	mixer_res->mixer = clk_get(dev, "mixer");
++	if (IS_ERR_OR_NULL(mixer_res->mixer)) {
++		dev_err(dev, "failed to get clock 'mixer'\n");
++		ret = -ENODEV;
++		goto fail;
++	}
++	mixer_res->vp = clk_get(dev, "vp");
++	if (IS_ERR_OR_NULL(mixer_res->vp)) {
++		dev_err(dev, "failed to get clock 'vp'\n");
++		ret = -ENODEV;
++		goto fail;
++	}
++	mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
++	if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
++		dev_err(dev, "failed to get clock 'sclk_mixer'\n");
++		ret = -ENODEV;
++		goto fail;
++	}
++	mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
++	if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
++		dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
++		ret = -ENODEV;
++		goto fail;
++	}
++	mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
++	if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
++		dev_err(dev, "failed to get clock 'sclk_dac'\n");
++		ret = -ENODEV;
++		goto fail;
++	}
++	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
++	if (res == NULL) {
++		dev_err(dev, "get memory resource failed.\n");
++		ret = -ENXIO;
++		goto fail;
++	}
++
++	clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
++
++	mixer_res->mixer_regs = ioremap(res->start, resource_size(res));
++	if (mixer_res->mixer_regs == NULL) {
++		dev_err(dev, "register mapping failed.\n");
++		ret = -ENXIO;
++		goto fail;
++	}
++
++	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
++	if (res == NULL) {
++		dev_err(dev, "get memory resource failed.\n");
++		ret = -ENXIO;
++		goto fail_mixer_regs;
++	}
++
++	mixer_res->vp_regs = ioremap(res->start, resource_size(res));
++	if (mixer_res->vp_regs == NULL) {
++		dev_err(dev, "register mapping failed.\n");
++		ret = -ENXIO;
++		goto fail_mixer_regs;
++	}
++
++	res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
++	if (res == NULL) {
++		dev_err(dev, "get interrupt resource failed.\n");
++		ret = -ENXIO;
++		goto fail_vp_regs;
++	}
++
++	ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx);
++	if (ret) {
++		dev_err(dev, "request interrupt failed.\n");
++		goto fail_vp_regs;
++	}
++	mixer_res->irq = res->start;
++
++	return 0;
++
++fail_vp_regs:
++	iounmap(mixer_res->vp_regs);
++
++fail_mixer_regs:
++	iounmap(mixer_res->mixer_regs);
++
++fail:
++	if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
++		clk_put(mixer_res->sclk_dac);
++	if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
++		clk_put(mixer_res->sclk_hdmi);
++	if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
++		clk_put(mixer_res->sclk_mixer);
++	if (!IS_ERR_OR_NULL(mixer_res->vp))
++		clk_put(mixer_res->vp);
++	if (!IS_ERR_OR_NULL(mixer_res->mixer))
++		clk_put(mixer_res->mixer);
++	mixer_res->dev = NULL;
++	return ret;
++}
++
++static void mixer_resources_cleanup(struct mixer_context *ctx)
++{
++	struct mixer_resources *res = &ctx->mixer_res;
++
++	disable_irq(res->irq);
++	free_irq(res->irq, ctx);
++
++	iounmap(res->vp_regs);
++	iounmap(res->mixer_regs);
++}
++
++static int __devinit mixer_probe(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct exynos_drm_hdmi_context *drm_hdmi_ctx;
++	struct mixer_context *ctx;
++	int ret;
++
++	dev_info(dev, "probe start\n");
++
++	drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
++	if (!drm_hdmi_ctx) {
++		DRM_ERROR("failed to allocate common hdmi context.\n");
++		return -ENOMEM;
++	}
++
++	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++	if (!ctx) {
++		DRM_ERROR("failed to alloc mixer context.\n");
++		kfree(drm_hdmi_ctx);
++		return -ENOMEM;
++	}
++
++	drm_hdmi_ctx->ctx = (void *)ctx;
++
++	platform_set_drvdata(pdev, drm_hdmi_ctx);
++
++	/* acquire resources: regs, irqs, clocks */
++	ret = mixer_resources_init(drm_hdmi_ctx, pdev);
++	if (ret)
++		goto fail;
++
++	/* register specific callback point to common hdmi. */
++	exynos_mixer_ops_register(&mixer_ops);
++
++	mixer_resource_poweron(ctx);
++
++	return 0;
++
++
++fail:
++	dev_info(dev, "probe failed\n");
++	return ret;
++}
++
++static int mixer_remove(struct platform_device *pdev)
++{
++	struct device *dev = &pdev->dev;
++	struct exynos_drm_hdmi_context *drm_hdmi_ctx =
++					platform_get_drvdata(pdev);
++	struct mixer_context *ctx = drm_hdmi_ctx->ctx;
++
++	dev_info(dev, "remove successful\n");
++
++	mixer_resource_poweroff(ctx);
++	mixer_resources_cleanup(ctx);
++
++	return 0;
++}
++
++struct platform_driver mixer_driver = {
++	.driver = {
++		.name = "s5p-mixer",
++		.owner = THIS_MODULE,
++		.pm = &mixer_pm_ops,
++	},
++	.probe = mixer_probe,
++	.remove = __devexit_p(mixer_remove),
++};
+diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
+new file mode 100644
+index 0000000..3c04bea
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/regs-hdmi.h
+@@ -0,0 +1,561 @@
++/*
++ *
++ *  Cloned from drivers/media/video/s5p-tv/regs-hdmi.h
++ *
++ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
++ * http://www.samsung.com/
++ *
++ * HDMI register header file for Samsung TVOUT driver
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++*/
++
++#ifndef SAMSUNG_REGS_HDMI_H
++#define SAMSUNG_REGS_HDMI_H
++
++/*
++ * Register part
++*/
++
++/* HDMI Version 1.3 & Common */
++#define HDMI_CTRL_BASE(x)		((x) + 0x00000000)
++#define HDMI_CORE_BASE(x)		((x) + 0x00010000)
++#define HDMI_I2S_BASE(x)		((x) + 0x00040000)
++#define HDMI_TG_BASE(x)			((x) + 0x00050000)
++
++/* Control registers */
++#define HDMI_INTC_CON			HDMI_CTRL_BASE(0x0000)
++#define HDMI_INTC_FLAG			HDMI_CTRL_BASE(0x0004)
++#define HDMI_HPD_STATUS			HDMI_CTRL_BASE(0x000C)
++#define HDMI_V13_PHY_RSTOUT		HDMI_CTRL_BASE(0x0014)
++#define HDMI_V13_PHY_VPLL		HDMI_CTRL_BASE(0x0018)
++#define HDMI_V13_PHY_CMU		HDMI_CTRL_BASE(0x001C)
++#define HDMI_V13_CORE_RSTOUT		HDMI_CTRL_BASE(0x0020)
++
++/* Core registers */
++#define HDMI_CON_0			HDMI_CORE_BASE(0x0000)
++#define HDMI_CON_1			HDMI_CORE_BASE(0x0004)
++#define HDMI_CON_2			HDMI_CORE_BASE(0x0008)
++#define HDMI_SYS_STATUS			HDMI_CORE_BASE(0x0010)
++#define HDMI_V13_PHY_STATUS		HDMI_CORE_BASE(0x0014)
++#define HDMI_STATUS_EN			HDMI_CORE_BASE(0x0020)
++#define HDMI_HPD			HDMI_CORE_BASE(0x0030)
++#define HDMI_MODE_SEL			HDMI_CORE_BASE(0x0040)
++#define HDMI_ENC_EN			HDMI_CORE_BASE(0x0044)
++#define HDMI_V13_BLUE_SCREEN_0		HDMI_CORE_BASE(0x0050)
++#define HDMI_V13_BLUE_SCREEN_1		HDMI_CORE_BASE(0x0054)
++#define HDMI_V13_BLUE_SCREEN_2		HDMI_CORE_BASE(0x0058)
++#define HDMI_H_BLANK_0			HDMI_CORE_BASE(0x00A0)
++#define HDMI_H_BLANK_1			HDMI_CORE_BASE(0x00A4)
++#define HDMI_V13_V_BLANK_0		HDMI_CORE_BASE(0x00B0)
++#define HDMI_V13_V_BLANK_1		HDMI_CORE_BASE(0x00B4)
++#define HDMI_V13_V_BLANK_2		HDMI_CORE_BASE(0x00B8)
++#define HDMI_V13_H_V_LINE_0		HDMI_CORE_BASE(0x00C0)
++#define HDMI_V13_H_V_LINE_1		HDMI_CORE_BASE(0x00C4)
++#define HDMI_V13_H_V_LINE_2		HDMI_CORE_BASE(0x00C8)
++#define HDMI_VSYNC_POL			HDMI_CORE_BASE(0x00E4)
++#define HDMI_INT_PRO_MODE		HDMI_CORE_BASE(0x00E8)
++#define HDMI_V13_V_BLANK_F_0		HDMI_CORE_BASE(0x0110)
++#define HDMI_V13_V_BLANK_F_1		HDMI_CORE_BASE(0x0114)
++#define HDMI_V13_V_BLANK_F_2		HDMI_CORE_BASE(0x0118)
++#define HDMI_V13_H_SYNC_GEN_0		HDMI_CORE_BASE(0x0120)
++#define HDMI_V13_H_SYNC_GEN_1		HDMI_CORE_BASE(0x0124)
++#define HDMI_V13_H_SYNC_GEN_2		HDMI_CORE_BASE(0x0128)
++#define HDMI_V13_V_SYNC_GEN_1_0		HDMI_CORE_BASE(0x0130)
++#define HDMI_V13_V_SYNC_GEN_1_1		HDMI_CORE_BASE(0x0134)
++#define HDMI_V13_V_SYNC_GEN_1_2		HDMI_CORE_BASE(0x0138)
++#define HDMI_V13_V_SYNC_GEN_2_0		HDMI_CORE_BASE(0x0140)
++#define HDMI_V13_V_SYNC_GEN_2_1		HDMI_CORE_BASE(0x0144)
++#define HDMI_V13_V_SYNC_GEN_2_2		HDMI_CORE_BASE(0x0148)
++#define HDMI_V13_V_SYNC_GEN_3_0		HDMI_CORE_BASE(0x0150)
++#define HDMI_V13_V_SYNC_GEN_3_1		HDMI_CORE_BASE(0x0154)
++#define HDMI_V13_V_SYNC_GEN_3_2		HDMI_CORE_BASE(0x0158)
++#define HDMI_V13_ACR_CON		HDMI_CORE_BASE(0x0180)
++#define HDMI_V13_AVI_CON		HDMI_CORE_BASE(0x0300)
++#define HDMI_V13_AVI_BYTE(n)		HDMI_CORE_BASE(0x0320 + 4 * (n))
++#define HDMI_V13_DC_CONTROL		HDMI_CORE_BASE(0x05C0)
++#define HDMI_V13_VIDEO_PATTERN_GEN	HDMI_CORE_BASE(0x05C4)
++#define HDMI_V13_HPD_GEN		HDMI_CORE_BASE(0x05C8)
++#define HDMI_V13_AUI_CON		HDMI_CORE_BASE(0x0360)
++#define HDMI_V13_SPD_CON		HDMI_CORE_BASE(0x0400)
++
++/* Timing generator registers */
++#define HDMI_TG_CMD			HDMI_TG_BASE(0x0000)
++#define HDMI_TG_H_FSZ_L			HDMI_TG_BASE(0x0018)
++#define HDMI_TG_H_FSZ_H			HDMI_TG_BASE(0x001C)
++#define HDMI_TG_HACT_ST_L		HDMI_TG_BASE(0x0020)
++#define HDMI_TG_HACT_ST_H		HDMI_TG_BASE(0x0024)
++#define HDMI_TG_HACT_SZ_L		HDMI_TG_BASE(0x0028)
++#define HDMI_TG_HACT_SZ_H		HDMI_TG_BASE(0x002C)
++#define HDMI_TG_V_FSZ_L			HDMI_TG_BASE(0x0030)
++#define HDMI_TG_V_FSZ_H			HDMI_TG_BASE(0x0034)
++#define HDMI_TG_VSYNC_L			HDMI_TG_BASE(0x0038)
++#define HDMI_TG_VSYNC_H			HDMI_TG_BASE(0x003C)
++#define HDMI_TG_VSYNC2_L		HDMI_TG_BASE(0x0040)
++#define HDMI_TG_VSYNC2_H		HDMI_TG_BASE(0x0044)
++#define HDMI_TG_VACT_ST_L		HDMI_TG_BASE(0x0048)
++#define HDMI_TG_VACT_ST_H		HDMI_TG_BASE(0x004C)
++#define HDMI_TG_VACT_SZ_L		HDMI_TG_BASE(0x0050)
++#define HDMI_TG_VACT_SZ_H		HDMI_TG_BASE(0x0054)
++#define HDMI_TG_FIELD_CHG_L		HDMI_TG_BASE(0x0058)
++#define HDMI_TG_FIELD_CHG_H		HDMI_TG_BASE(0x005C)
++#define HDMI_TG_VACT_ST2_L		HDMI_TG_BASE(0x0060)
++#define HDMI_TG_VACT_ST2_H		HDMI_TG_BASE(0x0064)
++#define HDMI_TG_VSYNC_TOP_HDMI_L	HDMI_TG_BASE(0x0078)
++#define HDMI_TG_VSYNC_TOP_HDMI_H	HDMI_TG_BASE(0x007C)
++#define HDMI_TG_VSYNC_BOT_HDMI_L	HDMI_TG_BASE(0x0080)
++#define HDMI_TG_VSYNC_BOT_HDMI_H	HDMI_TG_BASE(0x0084)
++#define HDMI_TG_FIELD_TOP_HDMI_L	HDMI_TG_BASE(0x0088)
++#define HDMI_TG_FIELD_TOP_HDMI_H	HDMI_TG_BASE(0x008C)
++#define HDMI_TG_FIELD_BOT_HDMI_L	HDMI_TG_BASE(0x0090)
++#define HDMI_TG_FIELD_BOT_HDMI_H	HDMI_TG_BASE(0x0094)
++
++/*
++ * Bit definition part
++ */
++
++/* HDMI_INTC_CON */
++#define HDMI_INTC_EN_GLOBAL		(1 << 6)
++#define HDMI_INTC_EN_HPD_PLUG		(1 << 3)
++#define HDMI_INTC_EN_HPD_UNPLUG		(1 << 2)
++
++/* HDMI_INTC_FLAG */
++#define HDMI_INTC_FLAG_HPD_PLUG		(1 << 3)
++#define HDMI_INTC_FLAG_HPD_UNPLUG	(1 << 2)
++
++/* HDMI_PHY_RSTOUT */
++#define HDMI_PHY_SW_RSTOUT		(1 << 0)
++
++/* HDMI_CORE_RSTOUT */
++#define HDMI_CORE_SW_RSTOUT		(1 << 0)
++
++/* HDMI_CON_0 */
++#define HDMI_BLUE_SCR_EN		(1 << 5)
++#define HDMI_ASP_EN			(1 << 2)
++#define HDMI_ASP_DIS			(0 << 2)
++#define HDMI_ASP_MASK			(1 << 2)
++#define HDMI_EN				(1 << 0)
++
++/* HDMI_PHY_STATUS */
++#define HDMI_PHY_STATUS_READY		(1 << 0)
++
++/* HDMI_MODE_SEL */
++#define HDMI_MODE_HDMI_EN		(1 << 1)
++#define HDMI_MODE_DVI_EN		(1 << 0)
++#define HDMI_DVI_MODE_EN		(1)
++#define HDMI_DVI_MODE_DIS		(0)
++#define HDMI_MODE_MASK			(3 << 0)
++
++/* HDMI_TG_CMD */
++#define HDMI_TG_EN			(1 << 0)
++#define HDMI_FIELD_EN			(1 << 1)
++
++
++/* HDMI Version 1.4 */
++/* Control registers */
++/* #define HDMI_INTC_CON		HDMI_CTRL_BASE(0x0000) */
++/* #define HDMI_INTC_FLAG		HDMI_CTRL_BASE(0x0004) */
++#define HDMI_HDCP_KEY_LOAD		HDMI_CTRL_BASE(0x0008)
++/* #define HDMI_HPD_STATUS		HDMI_CTRL_BASE(0x000C) */
++#define HDMI_INTC_CON_1			HDMI_CTRL_BASE(0x0010)
++#define HDMI_INTC_FLAG_1		HDMI_CTRL_BASE(0x0014)
++#define HDMI_PHY_STATUS_0		HDMI_CTRL_BASE(0x0020)
++#define HDMI_PHY_STATUS_CMU		HDMI_CTRL_BASE(0x0024)
++#define HDMI_PHY_STATUS_PLL		HDMI_CTRL_BASE(0x0028)
++#define HDMI_PHY_CON_0			HDMI_CTRL_BASE(0x0030)
++#define HDMI_HPD_CTRL			HDMI_CTRL_BASE(0x0040)
++#define HDMI_HPD_ST			HDMI_CTRL_BASE(0x0044)
++#define HDMI_HPD_TH_X			HDMI_CTRL_BASE(0x0050)
++#define HDMI_AUDIO_CLKSEL		HDMI_CTRL_BASE(0x0070)
++#define HDMI_PHY_RSTOUT			HDMI_CTRL_BASE(0x0074)
++#define HDMI_PHY_VPLL			HDMI_CTRL_BASE(0x0078)
++#define HDMI_PHY_CMU			HDMI_CTRL_BASE(0x007C)
++#define HDMI_CORE_RSTOUT		HDMI_CTRL_BASE(0x0080)
++
++/* Video related registers */
++#define HDMI_YMAX			HDMI_CORE_BASE(0x0060)
++#define HDMI_YMIN			HDMI_CORE_BASE(0x0064)
++#define HDMI_CMAX			HDMI_CORE_BASE(0x0068)
++#define HDMI_CMIN			HDMI_CORE_BASE(0x006C)
++
++#define HDMI_V2_BLANK_0			HDMI_CORE_BASE(0x00B0)
++#define HDMI_V2_BLANK_1			HDMI_CORE_BASE(0x00B4)
++#define HDMI_V1_BLANK_0			HDMI_CORE_BASE(0x00B8)
++#define HDMI_V1_BLANK_1			HDMI_CORE_BASE(0x00BC)
++
++#define HDMI_V_LINE_0			HDMI_CORE_BASE(0x00C0)
++#define HDMI_V_LINE_1			HDMI_CORE_BASE(0x00C4)
++#define HDMI_H_LINE_0			HDMI_CORE_BASE(0x00C8)
++#define HDMI_H_LINE_1			HDMI_CORE_BASE(0x00CC)
++
++#define HDMI_HSYNC_POL			HDMI_CORE_BASE(0x00E0)
++
++#define HDMI_V_BLANK_F0_0		HDMI_CORE_BASE(0x0110)
++#define HDMI_V_BLANK_F0_1		HDMI_CORE_BASE(0x0114)
++#define HDMI_V_BLANK_F1_0		HDMI_CORE_BASE(0x0118)
++#define HDMI_V_BLANK_F1_1		HDMI_CORE_BASE(0x011C)
++
++#define HDMI_H_SYNC_START_0		HDMI_CORE_BASE(0x0120)
++#define HDMI_H_SYNC_START_1		HDMI_CORE_BASE(0x0124)
++#define HDMI_H_SYNC_END_0		HDMI_CORE_BASE(0x0128)
++#define HDMI_H_SYNC_END_1		HDMI_CORE_BASE(0x012C)
++
++#define HDMI_V_SYNC_LINE_BEF_2_0	HDMI_CORE_BASE(0x0130)
++#define HDMI_V_SYNC_LINE_BEF_2_1	HDMI_CORE_BASE(0x0134)
++#define HDMI_V_SYNC_LINE_BEF_1_0	HDMI_CORE_BASE(0x0138)
++#define HDMI_V_SYNC_LINE_BEF_1_1	HDMI_CORE_BASE(0x013C)
++
++#define HDMI_V_SYNC_LINE_AFT_2_0	HDMI_CORE_BASE(0x0140)
++#define HDMI_V_SYNC_LINE_AFT_2_1	HDMI_CORE_BASE(0x0144)
++#define HDMI_V_SYNC_LINE_AFT_1_0	HDMI_CORE_BASE(0x0148)
++#define HDMI_V_SYNC_LINE_AFT_1_1	HDMI_CORE_BASE(0x014C)
++
++#define HDMI_V_SYNC_LINE_AFT_PXL_2_0	HDMI_CORE_BASE(0x0150)
++#define HDMI_V_SYNC_LINE_AFT_PXL_2_1	HDMI_CORE_BASE(0x0154)
++#define HDMI_V_SYNC_LINE_AFT_PXL_1_0	HDMI_CORE_BASE(0x0158)
++#define HDMI_V_SYNC_LINE_AFT_PXL_1_1	HDMI_CORE_BASE(0x015C)
++
++#define HDMI_V_BLANK_F2_0		HDMI_CORE_BASE(0x0160)
++#define HDMI_V_BLANK_F2_1		HDMI_CORE_BASE(0x0164)
++#define HDMI_V_BLANK_F3_0		HDMI_CORE_BASE(0x0168)
++#define HDMI_V_BLANK_F3_1		HDMI_CORE_BASE(0x016C)
++#define HDMI_V_BLANK_F4_0		HDMI_CORE_BASE(0x0170)
++#define HDMI_V_BLANK_F4_1		HDMI_CORE_BASE(0x0174)
++#define HDMI_V_BLANK_F5_0		HDMI_CORE_BASE(0x0178)
++#define HDMI_V_BLANK_F5_1		HDMI_CORE_BASE(0x017C)
++
++#define HDMI_V_SYNC_LINE_AFT_3_0	HDMI_CORE_BASE(0x0180)
++#define HDMI_V_SYNC_LINE_AFT_3_1	HDMI_CORE_BASE(0x0184)
++#define HDMI_V_SYNC_LINE_AFT_4_0	HDMI_CORE_BASE(0x0188)
++#define HDMI_V_SYNC_LINE_AFT_4_1	HDMI_CORE_BASE(0x018C)
++#define HDMI_V_SYNC_LINE_AFT_5_0	HDMI_CORE_BASE(0x0190)
++#define HDMI_V_SYNC_LINE_AFT_5_1	HDMI_CORE_BASE(0x0194)
++#define HDMI_V_SYNC_LINE_AFT_6_0	HDMI_CORE_BASE(0x0198)
++#define HDMI_V_SYNC_LINE_AFT_6_1	HDMI_CORE_BASE(0x019C)
++
++#define HDMI_V_SYNC_LINE_AFT_PXL_3_0	HDMI_CORE_BASE(0x01A0)
++#define HDMI_V_SYNC_LINE_AFT_PXL_3_1	HDMI_CORE_BASE(0x01A4)
++#define HDMI_V_SYNC_LINE_AFT_PXL_4_0	HDMI_CORE_BASE(0x01A8)
++#define HDMI_V_SYNC_LINE_AFT_PXL_4_1	HDMI_CORE_BASE(0x01AC)
++#define HDMI_V_SYNC_LINE_AFT_PXL_5_0	HDMI_CORE_BASE(0x01B0)
++#define HDMI_V_SYNC_LINE_AFT_PXL_5_1	HDMI_CORE_BASE(0x01B4)
++#define HDMI_V_SYNC_LINE_AFT_PXL_6_0	HDMI_CORE_BASE(0x01B8)
++#define HDMI_V_SYNC_LINE_AFT_PXL_6_1	HDMI_CORE_BASE(0x01BC)
++
++#define HDMI_VACT_SPACE_1_0		HDMI_CORE_BASE(0x01C0)
++#define HDMI_VACT_SPACE_1_1		HDMI_CORE_BASE(0x01C4)
++#define HDMI_VACT_SPACE_2_0		HDMI_CORE_BASE(0x01C8)
++#define HDMI_VACT_SPACE_2_1		HDMI_CORE_BASE(0x01CC)
++#define HDMI_VACT_SPACE_3_0		HDMI_CORE_BASE(0x01D0)
++#define HDMI_VACT_SPACE_3_1		HDMI_CORE_BASE(0x01D4)
++#define HDMI_VACT_SPACE_4_0		HDMI_CORE_BASE(0x01D8)
++#define HDMI_VACT_SPACE_4_1		HDMI_CORE_BASE(0x01DC)
++#define HDMI_VACT_SPACE_5_0		HDMI_CORE_BASE(0x01E0)
++#define HDMI_VACT_SPACE_5_1		HDMI_CORE_BASE(0x01E4)
++#define HDMI_VACT_SPACE_6_0		HDMI_CORE_BASE(0x01E8)
++#define HDMI_VACT_SPACE_6_1		HDMI_CORE_BASE(0x01EC)
++
++#define HDMI_GCP_CON			HDMI_CORE_BASE(0x0200)
++#define HDMI_GCP_BYTE1			HDMI_CORE_BASE(0x0210)
++#define HDMI_GCP_BYTE2			HDMI_CORE_BASE(0x0214)
++#define HDMI_GCP_BYTE3			HDMI_CORE_BASE(0x0218)
++
++/* Audio related registers */
++#define HDMI_ASP_CON			HDMI_CORE_BASE(0x0300)
++#define HDMI_ASP_SP_FLAT		HDMI_CORE_BASE(0x0304)
++#define HDMI_ASP_CHCFG0			HDMI_CORE_BASE(0x0310)
++#define HDMI_ASP_CHCFG1			HDMI_CORE_BASE(0x0314)
++#define HDMI_ASP_CHCFG2			HDMI_CORE_BASE(0x0318)
++#define HDMI_ASP_CHCFG3			HDMI_CORE_BASE(0x031C)
++
++#define HDMI_ACR_CON			HDMI_CORE_BASE(0x0400)
++#define HDMI_ACR_MCTS0			HDMI_CORE_BASE(0x0410)
++#define HDMI_ACR_MCTS1			HDMI_CORE_BASE(0x0414)
++#define HDMI_ACR_MCTS2			HDMI_CORE_BASE(0x0418)
++#define HDMI_ACR_CTS0			HDMI_CORE_BASE(0x0420)
++#define HDMI_ACR_CTS1			HDMI_CORE_BASE(0x0424)
++#define HDMI_ACR_CTS2			HDMI_CORE_BASE(0x0428)
++#define HDMI_ACR_N0			HDMI_CORE_BASE(0x0430)
++#define HDMI_ACR_N1			HDMI_CORE_BASE(0x0434)
++#define HDMI_ACR_N2			HDMI_CORE_BASE(0x0438)
++
++/* Packet related registers */
++#define HDMI_ACP_CON			HDMI_CORE_BASE(0x0500)
++#define HDMI_ACP_TYPE			HDMI_CORE_BASE(0x0514)
++#define HDMI_ACP_DATA(n)		HDMI_CORE_BASE(0x0520 + 4 * (n))
++
++#define HDMI_ISRC_CON			HDMI_CORE_BASE(0x0600)
++#define HDMI_ISRC1_HEADER1		HDMI_CORE_BASE(0x0614)
++#define HDMI_ISRC1_DATA(n)		HDMI_CORE_BASE(0x0620 + 4 * (n))
++#define HDMI_ISRC2_DATA(n)		HDMI_CORE_BASE(0x06A0 + 4 * (n))
++
++#define HDMI_AVI_CON			HDMI_CORE_BASE(0x0700)
++#define HDMI_AVI_HEADER0		HDMI_CORE_BASE(0x0710)
++#define HDMI_AVI_HEADER1		HDMI_CORE_BASE(0x0714)
++#define HDMI_AVI_HEADER2		HDMI_CORE_BASE(0x0718)
++#define HDMI_AVI_CHECK_SUM		HDMI_CORE_BASE(0x071C)
++#define HDMI_AVI_BYTE(n)		HDMI_CORE_BASE(0x0720 + 4 * (n))
++
++#define HDMI_AUI_CON			HDMI_CORE_BASE(0x0800)
++#define HDMI_AUI_HEADER0		HDMI_CORE_BASE(0x0810)
++#define HDMI_AUI_HEADER1		HDMI_CORE_BASE(0x0814)
++#define HDMI_AUI_HEADER2		HDMI_CORE_BASE(0x0818)
++#define HDMI_AUI_CHECK_SUM		HDMI_CORE_BASE(0x081C)
++#define HDMI_AUI_BYTE(n)		HDMI_CORE_BASE(0x0820 + 4 * (n))
++
++#define HDMI_MPG_CON			HDMI_CORE_BASE(0x0900)
++#define HDMI_MPG_CHECK_SUM		HDMI_CORE_BASE(0x091C)
++#define HDMI_MPG_DATA(n)		HDMI_CORE_BASE(0x0920 + 4 * (n))
++
++#define HDMI_SPD_CON			HDMI_CORE_BASE(0x0A00)
++#define HDMI_SPD_HEADER0		HDMI_CORE_BASE(0x0A10)
++#define HDMI_SPD_HEADER1		HDMI_CORE_BASE(0x0A14)
++#define HDMI_SPD_HEADER2		HDMI_CORE_BASE(0x0A18)
++#define HDMI_SPD_DATA(n)		HDMI_CORE_BASE(0x0A20 + 4 * (n))
++
++#define HDMI_GAMUT_CON			HDMI_CORE_BASE(0x0B00)
++#define HDMI_GAMUT_HEADER0		HDMI_CORE_BASE(0x0B10)
++#define HDMI_GAMUT_HEADER1		HDMI_CORE_BASE(0x0B14)
++#define HDMI_GAMUT_HEADER2		HDMI_CORE_BASE(0x0B18)
++#define HDMI_GAMUT_METADATA(n)		HDMI_CORE_BASE(0x0B20 + 4 * (n))
++
++#define HDMI_VSI_CON			HDMI_CORE_BASE(0x0C00)
++#define HDMI_VSI_HEADER0		HDMI_CORE_BASE(0x0C10)
++#define HDMI_VSI_HEADER1		HDMI_CORE_BASE(0x0C14)
++#define HDMI_VSI_HEADER2		HDMI_CORE_BASE(0x0C18)
++#define HDMI_VSI_DATA(n)		HDMI_CORE_BASE(0x0C20 + 4 * (n))
++
++#define HDMI_DC_CONTROL			HDMI_CORE_BASE(0x0D00)
++#define HDMI_VIDEO_PATTERN_GEN		HDMI_CORE_BASE(0x0D04)
++
++#define HDMI_AN_SEED_SEL		HDMI_CORE_BASE(0x0E48)
++#define HDMI_AN_SEED_0			HDMI_CORE_BASE(0x0E58)
++#define HDMI_AN_SEED_1			HDMI_CORE_BASE(0x0E5C)
++#define HDMI_AN_SEED_2			HDMI_CORE_BASE(0x0E60)
++#define HDMI_AN_SEED_3			HDMI_CORE_BASE(0x0E64)
++
++/* HDCP related registers */
++#define HDMI_HDCP_SHA1(n)		HDMI_CORE_BASE(0x7000 + 4 * (n))
++#define HDMI_HDCP_KSV_LIST(n)		HDMI_CORE_BASE(0x7050 + 4 * (n))
++
++#define HDMI_HDCP_KSV_LIST_CON		HDMI_CORE_BASE(0x7064)
++#define HDMI_HDCP_SHA_RESULT		HDMI_CORE_BASE(0x7070)
++#define HDMI_HDCP_CTRL1			HDMI_CORE_BASE(0x7080)
++#define HDMI_HDCP_CTRL2			HDMI_CORE_BASE(0x7084)
++#define HDMI_HDCP_CHECK_RESULT		HDMI_CORE_BASE(0x7090)
++#define HDMI_HDCP_BKSV(n)		HDMI_CORE_BASE(0x70A0 + 4 * (n))
++#define HDMI_HDCP_AKSV(n)		HDMI_CORE_BASE(0x70C0 + 4 * (n))
++#define HDMI_HDCP_AN(n)			HDMI_CORE_BASE(0x70E0 + 4 * (n))
++
++#define HDMI_HDCP_BCAPS			HDMI_CORE_BASE(0x7100)
++#define HDMI_HDCP_BSTATUS_0		HDMI_CORE_BASE(0x7110)
++#define HDMI_HDCP_BSTATUS_1		HDMI_CORE_BASE(0x7114)
++#define HDMI_HDCP_RI_0			HDMI_CORE_BASE(0x7140)
++#define HDMI_HDCP_RI_1			HDMI_CORE_BASE(0x7144)
++#define HDMI_HDCP_I2C_INT		HDMI_CORE_BASE(0x7180)
++#define HDMI_HDCP_AN_INT		HDMI_CORE_BASE(0x7190)
++#define HDMI_HDCP_WDT_INT		HDMI_CORE_BASE(0x71A0)
++#define HDMI_HDCP_RI_INT		HDMI_CORE_BASE(0x71B0)
++#define HDMI_HDCP_RI_COMPARE_0		HDMI_CORE_BASE(0x71D0)
++#define HDMI_HDCP_RI_COMPARE_1		HDMI_CORE_BASE(0x71D4)
++#define HDMI_HDCP_FRAME_COUNT		HDMI_CORE_BASE(0x71E0)
++
++#define HDMI_RGB_ROUND_EN		HDMI_CORE_BASE(0xD500)
++#define HDMI_VACT_SPACE_R_0		HDMI_CORE_BASE(0xD504)
++#define HDMI_VACT_SPACE_R_1		HDMI_CORE_BASE(0xD508)
++#define HDMI_VACT_SPACE_G_0		HDMI_CORE_BASE(0xD50C)
++#define HDMI_VACT_SPACE_G_1		HDMI_CORE_BASE(0xD510)
++#define HDMI_VACT_SPACE_B_0		HDMI_CORE_BASE(0xD514)
++#define HDMI_VACT_SPACE_B_1		HDMI_CORE_BASE(0xD518)
++
++#define HDMI_BLUE_SCREEN_B_0		HDMI_CORE_BASE(0xD520)
++#define HDMI_BLUE_SCREEN_B_1		HDMI_CORE_BASE(0xD524)
++#define HDMI_BLUE_SCREEN_G_0		HDMI_CORE_BASE(0xD528)
++#define HDMI_BLUE_SCREEN_G_1		HDMI_CORE_BASE(0xD52C)
++#define HDMI_BLUE_SCREEN_R_0		HDMI_CORE_BASE(0xD530)
++#define HDMI_BLUE_SCREEN_R_1		HDMI_CORE_BASE(0xD534)
++
++/* HDMI I2S register */
++#define HDMI_I2S_CLK_CON		HDMI_I2S_BASE(0x000)
++#define HDMI_I2S_CON_1			HDMI_I2S_BASE(0x004)
++#define HDMI_I2S_CON_2			HDMI_I2S_BASE(0x008)
++#define HDMI_I2S_PIN_SEL_0		HDMI_I2S_BASE(0x00c)
++#define HDMI_I2S_PIN_SEL_1		HDMI_I2S_BASE(0x010)
++#define HDMI_I2S_PIN_SEL_2		HDMI_I2S_BASE(0x014)
++#define HDMI_I2S_PIN_SEL_3		HDMI_I2S_BASE(0x018)
++#define HDMI_I2S_DSD_CON		HDMI_I2S_BASE(0x01c)
++#define HDMI_I2S_MUX_CON		HDMI_I2S_BASE(0x020)
++#define HDMI_I2S_CH_ST_CON		HDMI_I2S_BASE(0x024)
++#define HDMI_I2S_CH_ST_0		HDMI_I2S_BASE(0x028)
++#define HDMI_I2S_CH_ST_1		HDMI_I2S_BASE(0x02c)
++#define HDMI_I2S_CH_ST_2		HDMI_I2S_BASE(0x030)
++#define HDMI_I2S_CH_ST_3		HDMI_I2S_BASE(0x034)
++#define HDMI_I2S_CH_ST_4		HDMI_I2S_BASE(0x038)
++#define HDMI_I2S_CH_ST_SH_0		HDMI_I2S_BASE(0x03c)
++#define HDMI_I2S_CH_ST_SH_1		HDMI_I2S_BASE(0x040)
++#define HDMI_I2S_CH_ST_SH_2		HDMI_I2S_BASE(0x044)
++#define HDMI_I2S_CH_ST_SH_3		HDMI_I2S_BASE(0x048)
++#define HDMI_I2S_CH_ST_SH_4		HDMI_I2S_BASE(0x04c)
++#define HDMI_I2S_MUX_CH			HDMI_I2S_BASE(0x054)
++#define HDMI_I2S_MUX_CUV		HDMI_I2S_BASE(0x058)
++
++/* I2S bit definition */
++
++/* I2S_CLK_CON */
++#define HDMI_I2S_CLK_DIS		(0)
++#define HDMI_I2S_CLK_EN			(1)
++
++/* I2S_CON_1 */
++#define HDMI_I2S_SCLK_FALLING_EDGE	(0 << 1)
++#define HDMI_I2S_SCLK_RISING_EDGE	(1 << 1)
++#define HDMI_I2S_L_CH_LOW_POL		(0)
++#define HDMI_I2S_L_CH_HIGH_POL		(1)
++
++/* I2S_CON_2 */
++#define HDMI_I2S_MSB_FIRST_MODE		(0 << 6)
++#define HDMI_I2S_LSB_FIRST_MODE		(1 << 6)
++#define HDMI_I2S_BIT_CH_32FS		(0 << 4)
++#define HDMI_I2S_BIT_CH_48FS		(1 << 4)
++#define HDMI_I2S_BIT_CH_RESERVED	(2 << 4)
++#define HDMI_I2S_SDATA_16BIT		(1 << 2)
++#define HDMI_I2S_SDATA_20BIT		(2 << 2)
++#define HDMI_I2S_SDATA_24BIT		(3 << 2)
++#define HDMI_I2S_BASIC_FORMAT		(0)
++#define HDMI_I2S_L_JUST_FORMAT		(2)
++#define HDMI_I2S_R_JUST_FORMAT		(3)
++#define HDMI_I2S_CON_2_CLR		(~(0xFF))
++#define HDMI_I2S_SET_BIT_CH(x)		(((x) & 0x7) << 4)
++#define HDMI_I2S_SET_SDATA_BIT(x)	(((x) & 0x7) << 2)
++
++/* I2S_PIN_SEL_0 */
++#define HDMI_I2S_SEL_SCLK(x)		(((x) & 0x7) << 4)
++#define HDMI_I2S_SEL_LRCK(x)		((x) & 0x7)
++
++/* I2S_PIN_SEL_1 */
++#define HDMI_I2S_SEL_SDATA1(x)		(((x) & 0x7) << 4)
++#define HDMI_I2S_SEL_SDATA2(x)		((x) & 0x7)
++
++/* I2S_PIN_SEL_2 */
++#define HDMI_I2S_SEL_SDATA3(x)		(((x) & 0x7) << 4)
++#define HDMI_I2S_SEL_SDATA2(x)		((x) & 0x7)
++
++/* I2S_PIN_SEL_3 */
++#define HDMI_I2S_SEL_DSD(x)		((x) & 0x7)
++
++/* I2S_DSD_CON */
++#define HDMI_I2S_DSD_CLK_RI_EDGE	(1 << 1)
++#define HDMI_I2S_DSD_CLK_FA_EDGE	(0 << 1)
++#define HDMI_I2S_DSD_ENABLE		(1)
++#define HDMI_I2S_DSD_DISABLE		(0)
++
++/* I2S_MUX_CON */
++#define HDMI_I2S_NOISE_FILTER_ZERO	(0 << 5)
++#define HDMI_I2S_NOISE_FILTER_2_STAGE	(1 << 5)
++#define HDMI_I2S_NOISE_FILTER_3_STAGE	(2 << 5)
++#define HDMI_I2S_NOISE_FILTER_4_STAGE	(3 << 5)
++#define HDMI_I2S_NOISE_FILTER_5_STAGE	(4 << 5)
++#define HDMI_I2S_IN_DISABLE		(1 << 4)
++#define HDMI_I2S_IN_ENABLE		(0 << 4)
++#define HDMI_I2S_AUD_SPDIF		(0 << 2)
++#define HDMI_I2S_AUD_I2S		(1 << 2)
++#define HDMI_I2S_AUD_DSD		(2 << 2)
++#define HDMI_I2S_CUV_SPDIF_ENABLE	(0 << 1)
++#define HDMI_I2S_CUV_I2S_ENABLE		(1 << 1)
++#define HDMI_I2S_MUX_DISABLE		(0)
++#define HDMI_I2S_MUX_ENABLE		(1)
++#define HDMI_I2S_MUX_CON_CLR		(~(0xFF))
++
++/* I2S_CH_ST_CON */
++#define HDMI_I2S_CH_STATUS_RELOAD	(1)
++#define HDMI_I2S_CH_ST_CON_CLR		(~(1))
++
++/* I2S_CH_ST_0 / I2S_CH_ST_SH_0 */
++#define HDMI_I2S_CH_STATUS_MODE_0	(0 << 6)
++#define HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH	(0 << 3)
++#define HDMI_I2S_2AUD_CH_WITH_PREEMPH	(1 << 3)
++#define HDMI_I2S_DEFAULT_EMPHASIS	(0 << 3)
++#define HDMI_I2S_COPYRIGHT		(0 << 2)
++#define HDMI_I2S_NO_COPYRIGHT		(1 << 2)
++#define HDMI_I2S_LINEAR_PCM		(0 << 1)
++#define HDMI_I2S_NO_LINEAR_PCM		(1 << 1)
++#define HDMI_I2S_CONSUMER_FORMAT	(0)
++#define HDMI_I2S_PROF_FORMAT		(1)
++#define HDMI_I2S_CH_ST_0_CLR		(~(0xFF))
++
++/* I2S_CH_ST_1 / I2S_CH_ST_SH_1 */
++#define HDMI_I2S_CD_PLAYER		(0x00)
++#define HDMI_I2S_DAT_PLAYER		(0x03)
++#define HDMI_I2S_DCC_PLAYER		(0x43)
++#define HDMI_I2S_MINI_DISC_PLAYER	(0x49)
++
++/* I2S_CH_ST_2 / I2S_CH_ST_SH_2 */
++#define HDMI_I2S_CHANNEL_NUM_MASK	(0xF << 4)
++#define HDMI_I2S_SOURCE_NUM_MASK	(0xF)
++#define HDMI_I2S_SET_CHANNEL_NUM(x)	(((x) & (0xF)) << 4)
++#define HDMI_I2S_SET_SOURCE_NUM(x)	((x) & (0xF))
++
++/* I2S_CH_ST_3 / I2S_CH_ST_SH_3 */
++#define HDMI_I2S_CLK_ACCUR_LEVEL_1	(1 << 4)
++#define HDMI_I2S_CLK_ACCUR_LEVEL_2	(0 << 4)
++#define HDMI_I2S_CLK_ACCUR_LEVEL_3	(2 << 4)
++#define HDMI_I2S_SMP_FREQ_44_1		(0x0)
++#define HDMI_I2S_SMP_FREQ_48		(0x2)
++#define HDMI_I2S_SMP_FREQ_32		(0x3)
++#define HDMI_I2S_SMP_FREQ_96		(0xA)
++#define HDMI_I2S_SET_SMP_FREQ(x)	((x) & (0xF))
++
++/* I2S_CH_ST_4 / I2S_CH_ST_SH_4 */
++#define HDMI_I2S_ORG_SMP_FREQ_44_1	(0xF << 4)
++#define HDMI_I2S_ORG_SMP_FREQ_88_2	(0x7 << 4)
++#define HDMI_I2S_ORG_SMP_FREQ_22_05	(0xB << 4)
++#define HDMI_I2S_ORG_SMP_FREQ_176_4	(0x3 << 4)
++#define HDMI_I2S_WORD_LEN_NOT_DEFINE	(0x0 << 1)
++#define HDMI_I2S_WORD_LEN_MAX24_20BITS	(0x1 << 1)
++#define HDMI_I2S_WORD_LEN_MAX24_22BITS	(0x2 << 1)
++#define HDMI_I2S_WORD_LEN_MAX24_23BITS	(0x4 << 1)
++#define HDMI_I2S_WORD_LEN_MAX24_24BITS	(0x5 << 1)
++#define HDMI_I2S_WORD_LEN_MAX24_21BITS	(0x6 << 1)
++#define HDMI_I2S_WORD_LEN_MAX20_16BITS	(0x1 << 1)
++#define HDMI_I2S_WORD_LEN_MAX20_18BITS	(0x2 << 1)
++#define HDMI_I2S_WORD_LEN_MAX20_19BITS	(0x4 << 1)
++#define HDMI_I2S_WORD_LEN_MAX20_20BITS	(0x5 << 1)
++#define HDMI_I2S_WORD_LEN_MAX20_17BITS	(0x6 << 1)
++#define HDMI_I2S_WORD_LEN_MAX_24BITS	(1)
++#define HDMI_I2S_WORD_LEN_MAX_20BITS	(0)
++
++/* I2S_MUX_CH */
++#define HDMI_I2S_CH3_R_EN		(1 << 7)
++#define HDMI_I2S_CH3_L_EN		(1 << 6)
++#define HDMI_I2S_CH3_EN			(3 << 6)
++#define HDMI_I2S_CH2_R_EN		(1 << 5)
++#define HDMI_I2S_CH2_L_EN		(1 << 4)
++#define HDMI_I2S_CH2_EN			(3 << 4)
++#define HDMI_I2S_CH1_R_EN		(1 << 3)
++#define HDMI_I2S_CH1_L_EN		(1 << 2)
++#define HDMI_I2S_CH1_EN			(3 << 2)
++#define HDMI_I2S_CH0_R_EN		(1 << 1)
++#define HDMI_I2S_CH0_L_EN		(1)
++#define HDMI_I2S_CH0_EN			(3)
++#define HDMI_I2S_CH_ALL_EN		(0xFF)
++#define HDMI_I2S_MUX_CH_CLR		(~HDMI_I2S_CH_ALL_EN)
++
++/* I2S_MUX_CUV */
++#define HDMI_I2S_CUV_R_EN		(1 << 1)
++#define HDMI_I2S_CUV_L_EN		(1)
++#define HDMI_I2S_CUV_RL_EN		(0x03)
++
++/* I2S_CUV_L_R */
++#define HDMI_I2S_CUV_R_DATA_MASK	(0x7 << 4)
++#define HDMI_I2S_CUV_L_DATA_MASK	(0x7)
++
++/* Timing generator registers */
++/* TG configure/status registers */
++#define HDMI_TG_VACT_ST3_L		HDMI_TG_BASE(0x0068)
++#define HDMI_TG_VACT_ST3_H		HDMI_TG_BASE(0x006c)
++#define HDMI_TG_VACT_ST4_L		HDMI_TG_BASE(0x0070)
++#define HDMI_TG_VACT_ST4_H		HDMI_TG_BASE(0x0074)
++#define HDMI_TG_3D			HDMI_TG_BASE(0x00F0)
++
++#endif /* SAMSUNG_REGS_HDMI_H */
+diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
+new file mode 100644
+index 0000000..fd2f4d1
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/regs-mixer.h
+@@ -0,0 +1,141 @@
++/*
++ *
++ *  Cloned from drivers/media/video/s5p-tv/regs-mixer.h
++ *
++ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
++ * http://www.samsung.com/
++ *
++ * Mixer register header file for Samsung Mixer driver
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++*/
++#ifndef SAMSUNG_REGS_MIXER_H
++#define SAMSUNG_REGS_MIXER_H
++
++/*
++ * Register part
++ */
++#define MXR_STATUS			0x0000
++#define MXR_CFG				0x0004
++#define MXR_INT_EN			0x0008
++#define MXR_INT_STATUS			0x000C
++#define MXR_LAYER_CFG			0x0010
++#define MXR_VIDEO_CFG			0x0014
++#define MXR_GRAPHIC0_CFG		0x0020
++#define MXR_GRAPHIC0_BASE		0x0024
++#define MXR_GRAPHIC0_SPAN		0x0028
++#define MXR_GRAPHIC0_SXY		0x002C
++#define MXR_GRAPHIC0_WH			0x0030
++#define MXR_GRAPHIC0_DXY		0x0034
++#define MXR_GRAPHIC0_BLANK		0x0038
++#define MXR_GRAPHIC1_CFG		0x0040
++#define MXR_GRAPHIC1_BASE		0x0044
++#define MXR_GRAPHIC1_SPAN		0x0048
++#define MXR_GRAPHIC1_SXY		0x004C
++#define MXR_GRAPHIC1_WH			0x0050
++#define MXR_GRAPHIC1_DXY		0x0054
++#define MXR_GRAPHIC1_BLANK		0x0058
++#define MXR_BG_CFG			0x0060
++#define MXR_BG_COLOR0			0x0064
++#define MXR_BG_COLOR1			0x0068
++#define MXR_BG_COLOR2			0x006C
++#define MXR_CM_COEFF_Y			0x0080
++#define MXR_CM_COEFF_CB			0x0084
++#define MXR_CM_COEFF_CR			0x0088
++#define MXR_GRAPHIC0_BASE_S		0x2024
++#define MXR_GRAPHIC1_BASE_S		0x2044
++
++/* for parametrized access to layer registers */
++#define MXR_GRAPHIC_CFG(i)		(0x0020 + (i) * 0x20)
++#define MXR_GRAPHIC_BASE(i)		(0x0024 + (i) * 0x20)
++#define MXR_GRAPHIC_SPAN(i)		(0x0028 + (i) * 0x20)
++#define MXR_GRAPHIC_SXY(i)		(0x002C + (i) * 0x20)
++#define MXR_GRAPHIC_WH(i)		(0x0030 + (i) * 0x20)
++#define MXR_GRAPHIC_DXY(i)		(0x0034 + (i) * 0x20)
++#define MXR_GRAPHIC_BLANK(i)		(0x0038 + (i) * 0x20)
++#define MXR_GRAPHIC_BASE_S(i)		(0x2024 + (i) * 0x20)
++
++/*
++ * Bit definition part
++ */
++
++/* generates mask for range of bits */
++#define MXR_MASK(high_bit, low_bit) \
++	(((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
++
++#define MXR_MASK_VAL(val, high_bit, low_bit) \
++	(((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
++
++/* bits for MXR_STATUS */
++#define MXR_STATUS_16_BURST		(1 << 7)
++#define MXR_STATUS_BURST_MASK		(1 << 7)
++#define MXR_STATUS_BIG_ENDIAN		(1 << 3)
++#define MXR_STATUS_ENDIAN_MASK		(1 << 3)
++#define MXR_STATUS_SYNC_ENABLE		(1 << 2)
++#define MXR_STATUS_REG_RUN		(1 << 0)
++
++/* bits for MXR_CFG */
++#define MXR_CFG_RGB601_0_255		(0 << 9)
++#define MXR_CFG_RGB601_16_235		(1 << 9)
++#define MXR_CFG_RGB709_0_255		(2 << 9)
++#define MXR_CFG_RGB709_16_235		(3 << 9)
++#define MXR_CFG_RGB_FMT_MASK		0x600
++#define MXR_CFG_OUT_YUV444		(0 << 8)
++#define MXR_CFG_OUT_RGB888		(1 << 8)
++#define MXR_CFG_OUT_MASK		(1 << 8)
++#define MXR_CFG_DST_SDO			(0 << 7)
++#define MXR_CFG_DST_HDMI		(1 << 7)
++#define MXR_CFG_DST_MASK		(1 << 7)
++#define MXR_CFG_SCAN_HD_720		(0 << 6)
++#define MXR_CFG_SCAN_HD_1080		(1 << 6)
++#define MXR_CFG_GRP1_ENABLE		(1 << 5)
++#define MXR_CFG_GRP0_ENABLE		(1 << 4)
++#define MXR_CFG_VP_ENABLE		(1 << 3)
++#define MXR_CFG_SCAN_INTERLACE		(0 << 2)
++#define MXR_CFG_SCAN_PROGRASSIVE	(1 << 2)
++#define MXR_CFG_SCAN_NTSC		(0 << 1)
++#define MXR_CFG_SCAN_PAL		(1 << 1)
++#define MXR_CFG_SCAN_SD			(0 << 0)
++#define MXR_CFG_SCAN_HD			(1 << 0)
++#define MXR_CFG_SCAN_MASK		0x47
++
++/* bits for MXR_GRAPHICn_CFG */
++#define MXR_GRP_CFG_COLOR_KEY_DISABLE	(1 << 21)
++#define MXR_GRP_CFG_BLEND_PRE_MUL	(1 << 20)
++#define MXR_GRP_CFG_WIN_BLEND_EN	(1 << 17)
++#define MXR_GRP_CFG_PIXEL_BLEND_EN	(1 << 16)
++#define MXR_GRP_CFG_FORMAT_VAL(x)	MXR_MASK_VAL(x, 11, 8)
++#define MXR_GRP_CFG_FORMAT_MASK		MXR_GRP_CFG_FORMAT_VAL(~0)
++#define MXR_GRP_CFG_ALPHA_VAL(x)	MXR_MASK_VAL(x, 7, 0)
++
++/* bits for MXR_GRAPHICn_WH */
++#define MXR_GRP_WH_H_SCALE(x)		MXR_MASK_VAL(x, 28, 28)
++#define MXR_GRP_WH_V_SCALE(x)		MXR_MASK_VAL(x, 12, 12)
++#define MXR_GRP_WH_WIDTH(x)		MXR_MASK_VAL(x, 26, 16)
++#define MXR_GRP_WH_HEIGHT(x)		MXR_MASK_VAL(x, 10, 0)
++
++/* bits for MXR_GRAPHICn_SXY */
++#define MXR_GRP_SXY_SX(x)		MXR_MASK_VAL(x, 26, 16)
++#define MXR_GRP_SXY_SY(x)		MXR_MASK_VAL(x, 10, 0)
++
++/* bits for MXR_GRAPHICn_DXY */
++#define MXR_GRP_DXY_DX(x)		MXR_MASK_VAL(x, 26, 16)
++#define MXR_GRP_DXY_DY(x)		MXR_MASK_VAL(x, 10, 0)
++
++/* bits for MXR_INT_EN */
++#define MXR_INT_EN_VSYNC		(1 << 11)
++#define MXR_INT_EN_ALL			(0x0f << 8)
++
++/* bit for MXR_INT_STATUS */
++#define MXR_INT_CLEAR_VSYNC		(1 << 11)
++#define MXR_INT_STATUS_VSYNC		(1 << 0)
++
++/* bit for MXR_LAYER_CFG */
++#define MXR_LAYER_CFG_GRP1_VAL(x)	MXR_MASK_VAL(x, 11, 8)
++#define MXR_LAYER_CFG_GRP0_VAL(x)	MXR_MASK_VAL(x, 7, 4)
++#define MXR_LAYER_CFG_VP_VAL(x)		MXR_MASK_VAL(x, 3, 0)
++
++#endif /* SAMSUNG_REGS_MIXER_H */
++
+diff --git a/drivers/gpu/drm/exynos/regs-vp.h b/drivers/gpu/drm/exynos/regs-vp.h
+new file mode 100644
+index 0000000..10b737a
+--- /dev/null
++++ b/drivers/gpu/drm/exynos/regs-vp.h
+@@ -0,0 +1,91 @@
++/*
++ *
++ *  Cloned from drivers/media/video/s5p-tv/regs-vp.h
++ *
++ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
++ *		http://www.samsung.com/
++ *
++ * Video processor register header file for Samsung Mixer driver
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef SAMSUNG_REGS_VP_H
++#define SAMSUNG_REGS_VP_H
++
++/*
++ * Register part
++ */
++
++#define VP_ENABLE			0x0000
++#define VP_SRESET			0x0004
++#define VP_SHADOW_UPDATE		0x0008
++#define VP_FIELD_ID			0x000C
++#define VP_MODE				0x0010
++#define VP_IMG_SIZE_Y			0x0014
++#define VP_IMG_SIZE_C			0x0018
++#define VP_PER_RATE_CTRL		0x001C
++#define VP_TOP_Y_PTR			0x0028
++#define VP_BOT_Y_PTR			0x002C
++#define VP_TOP_C_PTR			0x0030
++#define VP_BOT_C_PTR			0x0034
++#define VP_ENDIAN_MODE			0x03CC
++#define VP_SRC_H_POSITION		0x0044
++#define VP_SRC_V_POSITION		0x0048
++#define VP_SRC_WIDTH			0x004C
++#define VP_SRC_HEIGHT			0x0050
++#define VP_DST_H_POSITION		0x0054
++#define VP_DST_V_POSITION		0x0058
++#define VP_DST_WIDTH			0x005C
++#define VP_DST_HEIGHT			0x0060
++#define VP_H_RATIO			0x0064
++#define VP_V_RATIO			0x0068
++#define VP_POLY8_Y0_LL			0x006C
++#define VP_POLY4_Y0_LL			0x00EC
++#define VP_POLY4_C0_LL			0x012C
++
++/*
++ * Bit definition part
++ */
++
++/* generates mask for range of bits */
++
++#define VP_MASK(high_bit, low_bit) \
++	(((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
++
++#define VP_MASK_VAL(val, high_bit, low_bit) \
++	(((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
++
++ /* VP_ENABLE */
++#define VP_ENABLE_ON			(1 << 0)
++
++/* VP_SRESET */
++#define VP_SRESET_PROCESSING		(1 << 0)
++
++/* VP_SHADOW_UPDATE */
++#define VP_SHADOW_UPDATE_ENABLE		(1 << 0)
++
++/* VP_MODE */
++#define VP_MODE_NV12			(0 << 6)
++#define VP_MODE_NV21			(1 << 6)
++#define VP_MODE_LINE_SKIP		(1 << 5)
++#define VP_MODE_MEM_LINEAR		(0 << 4)
++#define VP_MODE_MEM_TILED		(1 << 4)
++#define VP_MODE_FMT_MASK		(5 << 4)
++#define VP_MODE_FIELD_ID_AUTO_TOGGLING	(1 << 2)
++#define VP_MODE_2D_IPC			(1 << 1)
++
++/* VP_IMG_SIZE_Y */
++/* VP_IMG_SIZE_C */
++#define VP_IMG_HSIZE(x)			VP_MASK_VAL(x, 29, 16)
++#define VP_IMG_VSIZE(x)			VP_MASK_VAL(x, 13, 0)
++
++/* VP_SRC_H_POSITION */
++#define VP_SRC_H_POSITION_VAL(x)	VP_MASK_VAL(x, 14, 4)
++
++/* VP_ENDIAN_MODE */
++#define VP_ENDIAN_MODE_LITTLE		(1 << 0)
++
++#endif /* SAMSUNG_REGS_VP_H */
+diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
+new file mode 100644
+index 0000000..42e665c
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/Kconfig
+@@ -0,0 +1,33 @@
++config DRM_GMA500
++	tristate "Intel GMA5/600 KMS Framebuffer"
++	depends on DRM && PCI && X86 && EXPERIMENTAL
++	select FB_CFB_COPYAREA
++        select FB_CFB_FILLRECT
++        select FB_CFB_IMAGEBLIT
++        select DRM_KMS_HELPER
++        select DRM_TTM
++	help
++	  Say yes for an experimental 2D KMS framebuffer driver for the
++	  Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
++	  devices.
++
++config DRM_GMA600
++	bool "Intel GMA600 support (Experimental)"
++	depends on DRM_GMA500
++	help
++	  Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
++	  platforms with LVDS ports. MIPI is not currently supported.
++
++config DRM_GMA3600
++	bool "Intel GMA3600/3650 support (Experimental)"
++	depends on DRM_GMA500
++	help
++	  Say yes to include basic support for Intel GMA3600/3650 (Intel
++	  Cedar Trail) platforms.
++
++config DRM_MEDFIELD
++	bool "Intel Medfield support (Experimental)"
++	depends on DRM_GMA500 && X86_INTEL_MID
++	help
++	  Say yes to include support for the Intel Medfield platform.
++
+diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
+new file mode 100644
+index 0000000..1583982
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/Makefile
+@@ -0,0 +1,50 @@
++#
++#	KMS driver for the GMA500
++#
++ccflags-y += -Iinclude/drm
++
++gma500_gfx-y += gem_glue.o \
++	  accel_2d.o \
++	  backlight.o \
++	  framebuffer.o \
++	  gem.o \
++	  gtt.o \
++	  intel_bios.o \
++	  intel_i2c.o \
++	  intel_gmbus.o \
++	  intel_opregion.o \
++	  mmu.o \
++	  power.o \
++	  psb_drv.o \
++	  psb_intel_display.o \
++	  psb_intel_lvds.o \
++	  psb_intel_modes.o \
++	  psb_intel_sdvo.o \
++	  psb_lid.o \
++	  psb_irq.o \
++	  psb_device.o \
++	  mid_bios.o
++
++gma500_gfx-$(CONFIG_DRM_GMA3600) +=  cdv_device.o \
++	  cdv_intel_crt.o \
++	  cdv_intel_display.o \
++	  cdv_intel_hdmi.o \
++	  cdv_intel_lvds.o
++
++gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
++	  oaktrail_crtc.o \
++	  oaktrail_lvds.o \
++	  oaktrail_hdmi.o \
++	  oaktrail_hdmi_i2c.o
++
++gma500_gfx-$(CONFIG_DRM_MEDFIELD) += mdfld_device.o \
++	  mdfld_output.o \
++	  mdfld_intel_display.o \
++	  mdfld_dsi_output.o \
++	  mdfld_dsi_dpi.o \
++	  mdfld_dsi_pkg_sender.o \
++	  mdfld_tpo_vid.o \
++	  mdfld_tmd_vid.o \
++	  tc35876x-dsi-lvds.o
++
++obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
+diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
+new file mode 100644
+index 0000000..d5ef1a5
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/accel_2d.c
+@@ -0,0 +1,364 @@
++/**************************************************************************
++ * Copyright (c) 2007-2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/tty.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/console.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "framebuffer.h"
++
++/**
++ *	psb_spank		-	reset the 2D engine
++ *	@dev_priv: our PSB DRM device
++ *
++ *	Soft reset the graphics engine and then reload the necessary registers.
++ *	We use this at initialisation time but it will become relevant for
++ *	accelerated X later
++ */
++void psb_spank(struct drm_psb_private *dev_priv)
++{
++	PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
++		_PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
++		_PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
++		_PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
++	PSB_RSGX32(PSB_CR_SOFT_RESET);
++
++	msleep(1);
++
++	PSB_WSGX32(0, PSB_CR_SOFT_RESET);
++	wmb();
++	PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
++		   PSB_CR_BIF_CTRL);
++	wmb();
++	(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
++
++	msleep(1);
++	PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
++		   PSB_CR_BIF_CTRL);
++	(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
++	PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
++}
++
++/**
++ *	psb2_2d_wait_available	-	wait for FIFO room
++ *	@dev_priv: our DRM device
++ *	@size: size (in dwords) of the command we want to issue
++ *
++ *	Wait until there is room to load the FIFO with our data. If the
++ *	device is not responding then reset it
++ */
++static int psb_2d_wait_available(struct drm_psb_private *dev_priv,
++			  unsigned size)
++{
++	uint32_t avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
++	unsigned long t = jiffies + HZ;
++
++	while (avail < size) {
++		avail = PSB_RSGX32(PSB_CR_2D_SOCIF);
++		if (time_after(jiffies, t)) {
++			psb_spank(dev_priv);
++			return -EIO;
++		}
++	}
++	return 0;
++}
++
++/**
++ *	psb_2d_submit		-	submit a 2D command
++ *	@dev_priv: our DRM device
++ *	@cmdbuf: command to issue
++ *	@size: length (in dwords)
++ *
++ *	Issue one or more 2D commands to the accelerator. This needs to be
++ *	serialized later when we add the GEM interfaces for acceleration
++ */
++static int psbfb_2d_submit(struct drm_psb_private *dev_priv, uint32_t *cmdbuf,
++								unsigned size)
++{
++	int ret = 0;
++	int i;
++	unsigned submit_size;
++	unsigned long flags;
++
++	spin_lock_irqsave(&dev_priv->lock_2d, flags);
++	while (size > 0) {
++		submit_size = (size < 0x60) ? size : 0x60;
++		size -= submit_size;
++		ret = psb_2d_wait_available(dev_priv, submit_size);
++		if (ret)
++			break;
++
++		submit_size <<= 2;
++
++		for (i = 0; i < submit_size; i += 4)
++			PSB_WSGX32(*cmdbuf++, PSB_SGX_2D_SLAVE_PORT + i);
++
++		(void)PSB_RSGX32(PSB_SGX_2D_SLAVE_PORT + i - 4);
++	}
++	spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
++	return ret;
++}
++
++
++/**
++ *	psb_accel_2d_copy_direction	-	compute blit order
++ *	@xdir: X direction of move
++ *	@ydir: Y direction of move
++ *
++ *	Compute the correct order setings to ensure that an overlapping blit
++ *	correctly copies all the pixels.
++ */
++static u32 psb_accel_2d_copy_direction(int xdir, int ydir)
++{
++	if (xdir < 0)
++		return (ydir < 0) ? PSB_2D_COPYORDER_BR2TL :
++						PSB_2D_COPYORDER_TR2BL;
++	else
++		return (ydir < 0) ? PSB_2D_COPYORDER_BL2TR :
++						PSB_2D_COPYORDER_TL2BR;
++}
++
++/**
++ *	psb_accel_2d_copy		-	accelerated 2D copy
++ *	@dev_priv: our DRM device
++ *	@src_offset in bytes
++ *	@src_stride in bytes
++ *	@src_format psb 2D format defines
++ *	@dst_offset in bytes
++ *	@dst_stride in bytes
++ *	@dst_format psb 2D format defines
++ *	@src_x offset in pixels
++ *	@src_y offset in pixels
++ *	@dst_x offset in pixels
++ *	@dst_y offset in pixels
++ *	@size_x of the copied area
++ *	@size_y of the copied area
++ *
++ *	Format and issue a 2D accelerated copy command.
++ */
++static int psb_accel_2d_copy(struct drm_psb_private *dev_priv,
++			     uint32_t src_offset, uint32_t src_stride,
++			     uint32_t src_format, uint32_t dst_offset,
++			     uint32_t dst_stride, uint32_t dst_format,
++			     uint16_t src_x, uint16_t src_y,
++			     uint16_t dst_x, uint16_t dst_y,
++			     uint16_t size_x, uint16_t size_y)
++{
++	uint32_t blit_cmd;
++	uint32_t buffer[10];
++	uint32_t *buf;
++	uint32_t direction;
++
++	buf = buffer;
++
++	direction =
++	    psb_accel_2d_copy_direction(src_x - dst_x, src_y - dst_y);
++
++	if (direction == PSB_2D_COPYORDER_BR2TL ||
++	    direction == PSB_2D_COPYORDER_TR2BL) {
++		src_x += size_x - 1;
++		dst_x += size_x - 1;
++	}
++	if (direction == PSB_2D_COPYORDER_BR2TL ||
++	    direction == PSB_2D_COPYORDER_BL2TR) {
++		src_y += size_y - 1;
++		dst_y += size_y - 1;
++	}
++
++	blit_cmd =
++	    PSB_2D_BLIT_BH |
++	    PSB_2D_ROT_NONE |
++	    PSB_2D_DSTCK_DISABLE |
++	    PSB_2D_SRCCK_DISABLE |
++	    PSB_2D_USE_PAT | PSB_2D_ROP3_SRCCOPY | direction;
++
++	*buf++ = PSB_2D_FENCE_BH;
++	*buf++ =
++	    PSB_2D_DST_SURF_BH | dst_format | (dst_stride <<
++					       PSB_2D_DST_STRIDE_SHIFT);
++	*buf++ = dst_offset;
++	*buf++ =
++	    PSB_2D_SRC_SURF_BH | src_format | (src_stride <<
++					       PSB_2D_SRC_STRIDE_SHIFT);
++	*buf++ = src_offset;
++	*buf++ =
++	    PSB_2D_SRC_OFF_BH | (src_x << PSB_2D_SRCOFF_XSTART_SHIFT) |
++	    (src_y << PSB_2D_SRCOFF_YSTART_SHIFT);
++	*buf++ = blit_cmd;
++	*buf++ =
++	    (dst_x << PSB_2D_DST_XSTART_SHIFT) | (dst_y <<
++						  PSB_2D_DST_YSTART_SHIFT);
++	*buf++ =
++	    (size_x << PSB_2D_DST_XSIZE_SHIFT) | (size_y <<
++						  PSB_2D_DST_YSIZE_SHIFT);
++	*buf++ = PSB_2D_FLUSH_BH;
++
++	return psbfb_2d_submit(dev_priv, buffer, buf - buffer);
++}
++
++/**
++ *	psbfb_copyarea_accel	-	copyarea acceleration for /dev/fb
++ *	@info: our framebuffer
++ *	@a: copyarea parameters from the framebuffer core
++ *
++ *	Perform a 2D copy via the accelerator
++ */
++static void psbfb_copyarea_accel(struct fb_info *info,
++				 const struct fb_copyarea *a)
++{
++	struct psb_fbdev *fbdev = info->par;
++	struct psb_framebuffer *psbfb = &fbdev->pfb;
++	struct drm_device *dev = psbfb->base.dev;
++	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	uint32_t offset;
++	uint32_t stride;
++	uint32_t src_format;
++	uint32_t dst_format;
++
++	if (!fb)
++		return;
++
++	offset = psbfb->gtt->offset;
++	stride = fb->pitches[0];
++
++	switch (fb->depth) {
++	case 8:
++		src_format = PSB_2D_SRC_332RGB;
++		dst_format = PSB_2D_DST_332RGB;
++		break;
++	case 15:
++		src_format = PSB_2D_SRC_555RGB;
++		dst_format = PSB_2D_DST_555RGB;
++		break;
++	case 16:
++		src_format = PSB_2D_SRC_565RGB;
++		dst_format = PSB_2D_DST_565RGB;
++		break;
++	case 24:
++	case 32:
++		/* this is wrong but since we don't do blending its okay */
++		src_format = PSB_2D_SRC_8888ARGB;
++		dst_format = PSB_2D_DST_8888ARGB;
++		break;
++	default:
++		/* software fallback */
++		cfb_copyarea(info, a);
++		return;
++	}
++
++	if (!gma_power_begin(dev, false)) {
++		cfb_copyarea(info, a);
++		return;
++	}
++	psb_accel_2d_copy(dev_priv,
++			  offset, stride, src_format,
++			  offset, stride, dst_format,
++			  a->sx, a->sy, a->dx, a->dy, a->width, a->height);
++	gma_power_end(dev);
++}
++
++/**
++ *	psbfb_copyarea	-	2D copy interface
++ *	@info: our framebuffer
++ *	@region: region to copy
++ *
++ *	Copy an area of the framebuffer console either by the accelerator
++ *	or directly using the cfb helpers according to the request
++ */
++void psbfb_copyarea(struct fb_info *info,
++			   const struct fb_copyarea *region)
++{
++	if (unlikely(info->state != FBINFO_STATE_RUNNING))
++		return;
++
++	/* Avoid the 8 pixel erratum */
++	if (region->width == 8 || region->height == 8 ||
++		(info->flags & FBINFO_HWACCEL_DISABLED))
++		return cfb_copyarea(info, region);
++
++	psbfb_copyarea_accel(info, region);
++}
++
++/**
++ *	psbfb_sync	-	synchronize 2D
++ *	@info: our framebuffer
++ *
++ *	Wait for the 2D engine to quiesce so that we can do CPU
++ *	access to the framebuffer again
++ */
++int psbfb_sync(struct fb_info *info)
++{
++	struct psb_fbdev *fbdev = info->par;
++	struct psb_framebuffer *psbfb = &fbdev->pfb;
++	struct drm_device *dev = psbfb->base.dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	unsigned long _end = jiffies + DRM_HZ;
++	int busy = 0;
++	unsigned long flags;
++
++	spin_lock_irqsave(&dev_priv->lock_2d, flags);
++	/*
++	 * First idle the 2D engine.
++	 */
++
++	if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
++	    ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
++		goto out;
++
++	do {
++		busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
++		cpu_relax();
++	} while (busy && !time_after_eq(jiffies, _end));
++
++	if (busy)
++		busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
++	if (busy)
++		goto out;
++
++	do {
++		busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
++						_PSB_C2B_STATUS_BUSY) != 0);
++		cpu_relax();
++	} while (busy && !time_after_eq(jiffies, _end));
++	if (busy)
++		busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
++					_PSB_C2B_STATUS_BUSY) != 0);
++
++out:
++	spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
++	return (busy) ? -EBUSY : 0;
++}
+diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
+new file mode 100644
+index 0000000..2079395
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/backlight.c
+@@ -0,0 +1,49 @@
++/*
++ * GMA500 Backlight Interface
++ *
++ * Copyright (c) 2009-2011, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Eric Knopp
++ *
++ */
++
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_drv.h"
++#include "intel_bios.h"
++#include "power.h"
++
++int gma_backlight_init(struct drm_device *dev)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	return dev_priv->ops->backlight_init(dev);
++#else
++	return 0;
++#endif
++}
++
++void gma_backlight_exit(struct drm_device *dev)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	if (dev_priv->backlight_device) {
++		dev_priv->backlight_device->props.brightness = 0;
++		backlight_update_status(dev_priv->backlight_device);
++		backlight_device_unregister(dev_priv->backlight_device);
++	}
++#endif
++}
+diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
+new file mode 100644
+index 0000000..a54cc73
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/cdv_device.c
+@@ -0,0 +1,484 @@
++/**************************************************************************
++ * Copyright (c) 2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <linux/backlight.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "gma_drm.h"
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "intel_bios.h"
++#include "cdv_device.h"
++
++#define VGA_SR_INDEX		0x3c4
++#define VGA_SR_DATA		0x3c5
++
++static void cdv_disable_vga(struct drm_device *dev)
++{
++	u8 sr1;
++	u32 vga_reg;
++
++	vga_reg = VGACNTRL;
++
++	outb(1, VGA_SR_INDEX);
++	sr1 = inb(VGA_SR_DATA);
++	outb(sr1 | 1<<5, VGA_SR_DATA);
++	udelay(300);
++
++	REG_WRITE(vga_reg, VGA_DISP_DISABLE);
++	REG_READ(vga_reg);
++}
++
++static int cdv_output_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	cdv_disable_vga(dev);
++
++	cdv_intel_crt_init(dev, &dev_priv->mode_dev);
++	cdv_intel_lvds_init(dev, &dev_priv->mode_dev);
++
++	/* These bits indicate HDMI not SDVO on CDV, but we don't yet support
++	   the HDMI interface */
++	if (REG_READ(SDVOB) & SDVO_DETECTED)
++		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOB);
++	if (REG_READ(SDVOC) & SDVO_DETECTED)
++		cdv_hdmi_init(dev, &dev_priv->mode_dev, SDVOC);
++	return 0;
++}
++
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++
++/*
++ *	Poulsbo Backlight Interfaces
++ */
++
++#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++
++#define PSB_BLC_PWM_PRECISION_FACTOR    10
++#define PSB_BLC_MAX_PWM_REG_FREQ        0xFFFE
++#define PSB_BLC_MIN_PWM_REG_FREQ        0x2
++
++#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
++
++static int cdv_brightness;
++static struct backlight_device *cdv_backlight_device;
++
++static int cdv_get_brightness(struct backlight_device *bd)
++{
++	/* return locally cached var instead of HW read (due to DPST etc.) */
++	/* FIXME: ideally return actual value in case firmware fiddled with
++	   it */
++	return cdv_brightness;
++}
++
++
++static int cdv_backlight_setup(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	unsigned long core_clock;
++	/* u32 bl_max_freq; */
++	/* unsigned long value; */
++	u16 bl_max_freq;
++	uint32_t value;
++	uint32_t blc_pwm_precision_factor;
++
++	/* get bl_max_freq and pol from dev_priv*/
++	if (!dev_priv->lvds_bl) {
++		dev_err(dev->dev, "Has no valid LVDS backlight info\n");
++		return -ENOENT;
++	}
++	bl_max_freq = dev_priv->lvds_bl->freq;
++	blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
++
++	core_clock = dev_priv->core_freq;
++
++	value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
++	value *= blc_pwm_precision_factor;
++	value /= bl_max_freq;
++	value /= blc_pwm_precision_factor;
++
++	if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
++		 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
++				return -ERANGE;
++	else {
++		/* FIXME */
++	}
++	return 0;
++}
++
++static int cdv_set_brightness(struct backlight_device *bd)
++{
++	int level = bd->props.brightness;
++
++	/* Percentage 1-100% being valid */
++	if (level < 1)
++		level = 1;
++
++	/*cdv_intel_lvds_set_brightness(dev, level); FIXME */
++	cdv_brightness = level;
++	return 0;
++}
++
++static const struct backlight_ops cdv_ops = {
++	.get_brightness = cdv_get_brightness,
++	.update_status  = cdv_set_brightness,
++};
++
++static int cdv_backlight_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int ret;
++	struct backlight_properties props;
++
++	memset(&props, 0, sizeof(struct backlight_properties));
++	props.max_brightness = 100;
++	props.type = BACKLIGHT_PLATFORM;
++
++	cdv_backlight_device = backlight_device_register("psb-bl",
++					NULL, (void *)dev, &cdv_ops, &props);
++	if (IS_ERR(cdv_backlight_device))
++		return PTR_ERR(cdv_backlight_device);
++
++	ret = cdv_backlight_setup(dev);
++	if (ret < 0) {
++		backlight_device_unregister(cdv_backlight_device);
++		cdv_backlight_device = NULL;
++		return ret;
++	}
++	cdv_backlight_device->props.brightness = 100;
++	cdv_backlight_device->props.max_brightness = 100;
++	backlight_update_status(cdv_backlight_device);
++	dev_priv->backlight_device = cdv_backlight_device;
++	return 0;
++}
++
++#endif
++
++/*
++ *	Provide the Cedarview specific chip logic and low level methods
++ *	for power management
++ *
++ *	FIXME: we need to implement the apm/ospm base management bits
++ *	for this and the MID devices.
++ */
++
++static inline u32 CDV_MSG_READ32(uint port, uint offset)
++{
++	int mcr = (0x10<<24) | (port << 16) | (offset << 8);
++	uint32_t ret_val = 0;
++	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++	pci_write_config_dword(pci_root, 0xD0, mcr);
++	pci_read_config_dword(pci_root, 0xD4, &ret_val);
++	pci_dev_put(pci_root);
++	return ret_val;
++}
++
++static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value)
++{
++	int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
++	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++	pci_write_config_dword(pci_root, 0xD4, value);
++	pci_write_config_dword(pci_root, 0xD0, mcr);
++	pci_dev_put(pci_root);
++}
++
++#define PSB_PM_SSC			0x20
++#define PSB_PM_SSS			0x30
++#define PSB_PWRGT_GFX_ON		0x02
++#define PSB_PWRGT_GFX_OFF		0x01
++#define PSB_PWRGT_GFX_D0		0x00
++#define PSB_PWRGT_GFX_D3		0x03
++
++static void cdv_init_pm(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 pwr_cnt;
++	int i;
++
++	dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
++							PSB_APMBA) & 0xFFFF;
++	dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT,
++							PSB_OSPMBA) & 0xFFFF;
++
++	/* Power status */
++	pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
++
++	/* Enable the GPU */
++	pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
++	pwr_cnt |= PSB_PWRGT_GFX_ON;
++	outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
++
++	/* Wait for the GPU power */
++	for (i = 0; i < 5; i++) {
++		u32 pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++		if ((pwr_sts & PSB_PWRGT_GFX_MASK) == 0)
++			return;
++		udelay(10);
++	}
++	dev_err(dev->dev, "GPU: power management timed out.\n");
++}
++
++/**
++ *	cdv_save_display_registers	-	save registers lost on suspend
++ *	@dev: our DRM device
++ *
++ *	Save the state we need in order to be able to restore the interface
++ *	upon resume from suspend
++ */
++static int cdv_save_display_registers(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_save_area *regs = &dev_priv->regs;
++	struct drm_connector *connector;
++
++	dev_info(dev->dev, "Saving GPU registers.\n");
++
++	pci_read_config_byte(dev->pdev, 0xF4, &regs->cdv.saveLBB);
++
++	regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D);
++	regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D);
++
++	regs->cdv.saveDSPARB = REG_READ(DSPARB);
++	regs->cdv.saveDSPFW[0] = REG_READ(DSPFW1);
++	regs->cdv.saveDSPFW[1] = REG_READ(DSPFW2);
++	regs->cdv.saveDSPFW[2] = REG_READ(DSPFW3);
++	regs->cdv.saveDSPFW[3] = REG_READ(DSPFW4);
++	regs->cdv.saveDSPFW[4] = REG_READ(DSPFW5);
++	regs->cdv.saveDSPFW[5] = REG_READ(DSPFW6);
++
++	regs->cdv.saveADPA = REG_READ(ADPA);
++
++	regs->cdv.savePP_CONTROL = REG_READ(PP_CONTROL);
++	regs->cdv.savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
++	regs->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++	regs->saveBLC_PWM_CTL2 = REG_READ(BLC_PWM_CTL2);
++	regs->cdv.saveLVDS = REG_READ(LVDS);
++
++	regs->cdv.savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
++
++	regs->cdv.savePP_ON_DELAYS = REG_READ(PP_ON_DELAYS);
++	regs->cdv.savePP_OFF_DELAYS = REG_READ(PP_OFF_DELAYS);
++	regs->cdv.savePP_CYCLE = REG_READ(PP_CYCLE);
++
++	regs->cdv.saveVGACNTRL = REG_READ(VGACNTRL);
++
++	regs->cdv.saveIER = REG_READ(PSB_INT_ENABLE_R);
++	regs->cdv.saveIMR = REG_READ(PSB_INT_MASK_R);
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
++
++	return 0;
++}
++
++/**
++ *	cdv_restore_display_registers	-	restore lost register state
++ *	@dev: our DRM device
++ *
++ *	Restore register state that was lost during suspend and resume.
++ *
++ *	FIXME: review
++ */
++static int cdv_restore_display_registers(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_save_area *regs = &dev_priv->regs;
++	struct drm_connector *connector;
++	u32 temp;
++
++	pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB);
++
++	REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D);
++	REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D);
++
++	/* BIOS does below anyway */
++	REG_WRITE(DPIO_CFG, 0);
++	REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
++
++	temp = REG_READ(DPLL_A);
++	if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) {
++		REG_WRITE(DPLL_A, temp | DPLL_SYNCLOCK_ENABLE);
++		REG_READ(DPLL_A);
++	}
++
++	temp = REG_READ(DPLL_B);
++	if ((temp & DPLL_SYNCLOCK_ENABLE) == 0) {
++		REG_WRITE(DPLL_B, temp | DPLL_SYNCLOCK_ENABLE);
++		REG_READ(DPLL_B);
++	}
++
++	udelay(500);
++
++	REG_WRITE(DSPFW1, regs->cdv.saveDSPFW[0]);
++	REG_WRITE(DSPFW2, regs->cdv.saveDSPFW[1]);
++	REG_WRITE(DSPFW3, regs->cdv.saveDSPFW[2]);
++	REG_WRITE(DSPFW4, regs->cdv.saveDSPFW[3]);
++	REG_WRITE(DSPFW5, regs->cdv.saveDSPFW[4]);
++	REG_WRITE(DSPFW6, regs->cdv.saveDSPFW[5]);
++
++	REG_WRITE(DSPARB, regs->cdv.saveDSPARB);
++	REG_WRITE(ADPA, regs->cdv.saveADPA);
++
++	REG_WRITE(BLC_PWM_CTL2, regs->saveBLC_PWM_CTL2);
++	REG_WRITE(LVDS, regs->cdv.saveLVDS);
++	REG_WRITE(PFIT_CONTROL, regs->cdv.savePFIT_CONTROL);
++	REG_WRITE(PFIT_PGM_RATIOS, regs->cdv.savePFIT_PGM_RATIOS);
++	REG_WRITE(BLC_PWM_CTL, regs->saveBLC_PWM_CTL);
++	REG_WRITE(PP_ON_DELAYS, regs->cdv.savePP_ON_DELAYS);
++	REG_WRITE(PP_OFF_DELAYS, regs->cdv.savePP_OFF_DELAYS);
++	REG_WRITE(PP_CYCLE, regs->cdv.savePP_CYCLE);
++	REG_WRITE(PP_CONTROL, regs->cdv.savePP_CONTROL);
++
++	REG_WRITE(VGACNTRL, regs->cdv.saveVGACNTRL);
++
++	REG_WRITE(PSB_INT_ENABLE_R, regs->cdv.saveIER);
++	REG_WRITE(PSB_INT_MASK_R, regs->cdv.saveIMR);
++
++	/* Fix arbitration bug */
++	CDV_MSG_WRITE32(3, 0x30, 0x08027108);
++
++	drm_mode_config_reset(dev);
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
++
++	/* Resume the modeset for every activated CRTC */
++	drm_helper_resume_force_mode(dev);
++	return 0;
++}
++
++static int cdv_power_down(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 pwr_cnt, pwr_mask, pwr_sts;
++	int tries = 5;
++
++	pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
++	pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
++	pwr_cnt |= PSB_PWRGT_GFX_OFF;
++	pwr_mask = PSB_PWRGT_GFX_MASK;
++
++	outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
++
++	while (tries--) {
++		pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++		if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D3)
++			return 0;
++		udelay(10);
++	}
++	return 0;
++}
++
++static int cdv_power_up(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 pwr_cnt, pwr_mask, pwr_sts;
++	int tries = 5;
++
++	pwr_cnt = inl(dev_priv->apm_base + PSB_APM_CMD);
++	pwr_cnt &= ~PSB_PWRGT_GFX_MASK;
++	pwr_cnt |= PSB_PWRGT_GFX_ON;
++	pwr_mask = PSB_PWRGT_GFX_MASK;
++
++	outl(pwr_cnt, dev_priv->apm_base + PSB_APM_CMD);
++
++	while (tries--) {
++		pwr_sts = inl(dev_priv->apm_base + PSB_APM_STS);
++		if ((pwr_sts & pwr_mask) == PSB_PWRGT_GFX_D0)
++			return 0;
++		udelay(10);
++	}
++	return 0;
++}
++
++/* FIXME ? - shared with Poulsbo */
++static void cdv_get_core_freq(struct drm_device *dev)
++{
++	uint32_t clock;
++	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
++	pci_read_config_dword(pci_root, 0xD4, &clock);
++	pci_dev_put(pci_root);
++
++	switch (clock & 0x07) {
++	case 0:
++		dev_priv->core_freq = 100;
++		break;
++	case 1:
++		dev_priv->core_freq = 133;
++		break;
++	case 2:
++		dev_priv->core_freq = 150;
++		break;
++	case 3:
++		dev_priv->core_freq = 178;
++		break;
++	case 4:
++		dev_priv->core_freq = 200;
++		break;
++	case 5:
++	case 6:
++	case 7:
++		dev_priv->core_freq = 266;
++	default:
++		dev_priv->core_freq = 0;
++	}
++}
++
++static int cdv_chip_setup(struct drm_device *dev)
++{
++	cdv_get_core_freq(dev);
++	gma_intel_opregion_init(dev);
++	psb_intel_init_bios(dev);
++	REG_WRITE(PORT_HOTPLUG_EN, 0);
++	REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
++	return 0;
++}
++
++/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
++
++const struct psb_ops cdv_chip_ops = {
++	.name = "GMA3600/3650",
++	.accel_2d = 0,
++	.pipes = 2,
++	.crtcs = 2,
++	.sgx_offset = MRST_SGX_OFFSET,
++	.chip_setup = cdv_chip_setup,
++
++	.crtc_helper = &cdv_intel_helper_funcs,
++	.crtc_funcs = &cdv_intel_crtc_funcs,
++
++	.output_init = cdv_output_init,
++
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++	.backlight_init = cdv_backlight_init,
++#endif
++
++	.init_pm = cdv_init_pm,
++	.save_regs = cdv_save_display_registers,
++	.restore_regs = cdv_restore_display_registers,
++	.power_down = cdv_power_down,
++	.power_up = cdv_power_up,
++};
+diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
+new file mode 100644
+index 0000000..9561e17
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/cdv_device.h
+@@ -0,0 +1,36 @@
++/*
++ * Copyright © 2011 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc., 
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
++extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
++extern void cdv_intel_crt_init(struct drm_device *dev,
++			struct psb_intel_mode_device *mode_dev);
++extern void cdv_intel_lvds_init(struct drm_device *dev,
++			struct psb_intel_mode_device *mode_dev);
++extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev,
++			int reg);
++extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
++					     struct drm_crtc *crtc);
++
++static inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
++{
++	/* Wait for 20ms, i.e. one cycle at 50hz. */
++        /* FIXME: msleep ?? */
++	mdelay(20);
++}
++
++
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
+new file mode 100644
+index 0000000..a71a6cd
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
+@@ -0,0 +1,340 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ */
++
++#include <linux/i2c.h>
++#include <drm/drmP.h>
++
++#include "intel_bios.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "power.h"
++#include "cdv_device.h"
++#include <linux/pm_runtime.h>
++
++
++static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	u32 temp, reg;
++	reg = ADPA;
++
++	temp = REG_READ(reg);
++	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
++	temp &= ~ADPA_DAC_ENABLE;
++
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++		temp |= ADPA_DAC_ENABLE;
++		break;
++	case DRM_MODE_DPMS_STANDBY:
++		temp |= ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE;
++		break;
++	case DRM_MODE_DPMS_SUSPEND:
++		temp |= ADPA_DAC_ENABLE | ADPA_VSYNC_CNTL_DISABLE;
++		break;
++	case DRM_MODE_DPMS_OFF:
++		temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
++		break;
++	}
++
++	REG_WRITE(reg, temp);
++}
++
++static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
++				struct drm_display_mode *mode)
++{
++	struct drm_psb_private *dev_priv = connector->dev->dev_private;
++	int max_clock = 0;
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		return MODE_NO_DBLESCAN;
++
++	/* The lowest clock for CDV is 20000KHz */
++	if (mode->clock < 20000)
++		return MODE_CLOCK_LOW;
++
++	/* The max clock for CDV is 355 instead of 400 */
++	max_clock = 355000;
++	if (mode->clock > max_clock)
++		return MODE_CLOCK_HIGH;
++
++	if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
++		return MODE_PANEL;
++
++	/* We assume worst case scenario of 32 bpp here, since we don't know */
++	if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
++	    dev_priv->vram_stolen_size)
++		return MODE_MEM;
++
++	return MODE_OK;
++}
++
++static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
++				 struct drm_display_mode *mode,
++				 struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
++			       struct drm_display_mode *mode,
++			       struct drm_display_mode *adjusted_mode)
++{
++
++	struct drm_device *dev = encoder->dev;
++	struct drm_crtc *crtc = encoder->crtc;
++	struct psb_intel_crtc *psb_intel_crtc =
++					to_psb_intel_crtc(crtc);
++	int dpll_md_reg;
++	u32 adpa, dpll_md;
++	u32 adpa_reg;
++
++	if (psb_intel_crtc->pipe == 0)
++		dpll_md_reg = DPLL_A_MD;
++	else
++		dpll_md_reg = DPLL_B_MD;
++
++	adpa_reg = ADPA;
++
++	/*
++	 * Disable separate mode multiplier used when cloning SDVO to CRT
++	 * XXX this needs to be adjusted when we really are cloning
++	 */
++	{
++		dpll_md = REG_READ(dpll_md_reg);
++		REG_WRITE(dpll_md_reg,
++			   dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
++	}
++
++	adpa = 0;
++	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
++		adpa |= ADPA_HSYNC_ACTIVE_HIGH;
++	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
++		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
++
++	if (psb_intel_crtc->pipe == 0)
++		adpa |= ADPA_PIPE_A_SELECT;
++	else
++		adpa |= ADPA_PIPE_B_SELECT;
++
++	REG_WRITE(adpa_reg, adpa);
++}
++
++
++/**
++ * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
++ *
++ * \return true if CRT is connected.
++ * \return false if CRT is disconnected.
++ */
++static bool cdv_intel_crt_detect_hotplug(struct drm_connector *connector,
++								bool force)
++{
++	struct drm_device *dev = connector->dev;
++	u32 hotplug_en;
++	int i, tries = 0, ret = false;
++	u32 adpa_orig;
++
++	/* disable the DAC when doing the hotplug detection */
++
++	adpa_orig = REG_READ(ADPA);
++
++	REG_WRITE(ADPA, adpa_orig & ~(ADPA_DAC_ENABLE));
++
++	/*
++	 * On a CDV thep, CRT detect sequence need to be done twice
++	 * to get a reliable result.
++	 */
++	tries = 2;
++
++	hotplug_en = REG_READ(PORT_HOTPLUG_EN);
++	hotplug_en &= ~(CRT_HOTPLUG_DETECT_MASK);
++	hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
++
++	hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
++	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
++
++	for (i = 0; i < tries ; i++) {
++		unsigned long timeout;
++		/* turn on the FORCE_DETECT */
++		REG_WRITE(PORT_HOTPLUG_EN, hotplug_en);
++		timeout = jiffies + msecs_to_jiffies(1000);
++		/* wait for FORCE_DETECT to go off */
++		do {
++			if (!(REG_READ(PORT_HOTPLUG_EN) &
++					CRT_HOTPLUG_FORCE_DETECT))
++				break;
++			msleep(1);
++		} while (time_after(timeout, jiffies));
++	}
++
++	if ((REG_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
++	    CRT_HOTPLUG_MONITOR_NONE)
++		ret = true;
++
++	/* Restore the saved ADPA */
++	REG_WRITE(ADPA, adpa_orig);
++	return ret;
++}
++
++static enum drm_connector_status cdv_intel_crt_detect(
++				struct drm_connector *connector, bool force)
++{
++	if (cdv_intel_crt_detect_hotplug(connector, force))
++		return connector_status_connected;
++	else
++		return connector_status_disconnected;
++}
++
++static void cdv_intel_crt_destroy(struct drm_connector *connector)
++{
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++
++	psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
++	drm_sysfs_connector_remove(connector);
++	drm_connector_cleanup(connector);
++	kfree(connector);
++}
++
++static int cdv_intel_crt_get_modes(struct drm_connector *connector)
++{
++	struct psb_intel_encoder *psb_intel_encoder =
++				psb_intel_attached_encoder(connector);
++	return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
++}
++
++static int cdv_intel_crt_set_property(struct drm_connector *connector,
++				  struct drm_property *property,
++				  uint64_t value)
++{
++	return 0;
++}
++
++/*
++ * Routines for controlling stuff on the analog port
++ */
++
++static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
++	.dpms = cdv_intel_crt_dpms,
++	.mode_fixup = cdv_intel_crt_mode_fixup,
++	.prepare = psb_intel_encoder_prepare,
++	.commit = psb_intel_encoder_commit,
++	.mode_set = cdv_intel_crt_mode_set,
++};
++
++static const struct drm_connector_funcs cdv_intel_crt_connector_funcs = {
++	.dpms = drm_helper_connector_dpms,
++	.detect = cdv_intel_crt_detect,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.destroy = cdv_intel_crt_destroy,
++	.set_property = cdv_intel_crt_set_property,
++};
++
++static const struct drm_connector_helper_funcs
++				cdv_intel_crt_connector_helper_funcs = {
++	.mode_valid = cdv_intel_crt_mode_valid,
++	.get_modes = cdv_intel_crt_get_modes,
++	.best_encoder = psb_intel_best_encoder,
++};
++
++static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
++{
++	drm_encoder_cleanup(encoder);
++}
++
++static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
++	.destroy = cdv_intel_crt_enc_destroy,
++};
++
++void cdv_intel_crt_init(struct drm_device *dev,
++			struct psb_intel_mode_device *mode_dev)
++{
++
++	struct psb_intel_connector *psb_intel_connector;
++	struct psb_intel_encoder *psb_intel_encoder;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++
++	u32 i2c_reg;
++
++	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
++	if (!psb_intel_encoder)
++		return;
++
++	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
++	if (!psb_intel_connector)
++		goto failed_connector;
++
++	connector = &psb_intel_connector->base;
++	drm_connector_init(dev, connector,
++		&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
++
++	encoder = &psb_intel_encoder->base;
++	drm_encoder_init(dev, encoder,
++		&cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
++
++	psb_intel_connector_attach_encoder(psb_intel_connector,
++					   psb_intel_encoder);
++
++	/* Set up the DDC bus. */
++	i2c_reg = GPIOA;
++	/* Remove the following code for CDV */
++	/*
++	if (dev_priv->crt_ddc_bus != 0)
++		i2c_reg = dev_priv->crt_ddc_bus;
++	}*/
++	psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
++							  i2c_reg, "CRTDDC_A");
++	if (!psb_intel_encoder->ddc_bus) {
++		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
++			   "failed.\n");
++		goto failed_ddc;
++	}
++
++	psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
++	/*
++	psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
++	psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
++	*/
++	connector->interlace_allowed = 0;
++	connector->doublescan_allowed = 0;
++
++	drm_encoder_helper_add(encoder, &cdv_intel_crt_helper_funcs);
++	drm_connector_helper_add(connector,
++					&cdv_intel_crt_connector_helper_funcs);
++
++	drm_sysfs_connector_add(connector);
++
++	return;
++failed_ddc:
++	drm_encoder_cleanup(&psb_intel_encoder->base);
++	drm_connector_cleanup(&psb_intel_connector->base);
++	kfree(psb_intel_connector);
++failed_connector:
++	kfree(psb_intel_encoder);
++	return;
++}
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
+new file mode 100644
+index 0000000..be84559
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
+@@ -0,0 +1,1459 @@
++/*
++ * Copyright © 2006-2011 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ */
++
++#include <linux/i2c.h>
++#include <linux/pm_runtime.h>
++
++#include <drm/drmP.h>
++#include "framebuffer.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_display.h"
++#include "power.h"
++#include "cdv_device.h"
++
++
++struct cdv_intel_range_t {
++	int min, max;
++};
++
++struct cdv_intel_p2_t {
++	int dot_limit;
++	int p2_slow, p2_fast;
++};
++
++struct cdv_intel_clock_t {
++	/* given values */
++	int n;
++	int m1, m2;
++	int p1, p2;
++	/* derived values */
++	int dot;
++	int vco;
++	int m;
++	int p;
++};
++
++#define INTEL_P2_NUM		      2
++
++struct cdv_intel_limit_t {
++	struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
++	struct cdv_intel_p2_t p2;
++};
++
++#define CDV_LIMIT_SINGLE_LVDS_96	0
++#define CDV_LIMIT_SINGLE_LVDS_100	1
++#define CDV_LIMIT_DAC_HDMI_27		2
++#define CDV_LIMIT_DAC_HDMI_96		3
++
++static const struct cdv_intel_limit_t cdv_intel_limits[] = {
++	{			/* CDV_SIGNLE_LVDS_96MHz */
++	 .dot = {.min = 20000, .max = 115500},
++	 .vco = {.min = 1800000, .max = 3600000},
++	 .n = {.min = 2, .max = 6},
++	 .m = {.min = 60, .max = 160},
++	 .m1 = {.min = 0, .max = 0},
++	 .m2 = {.min = 58, .max = 158},
++	 .p = {.min = 28, .max = 140},
++	 .p1 = {.min = 2, .max = 10},
++	 .p2 = {.dot_limit = 200000,
++		.p2_slow = 14, .p2_fast = 14},
++	 },
++	{			/* CDV_SINGLE_LVDS_100MHz */
++	 .dot = {.min = 20000, .max = 115500},
++	 .vco = {.min = 1800000, .max = 3600000},
++	 .n = {.min = 2, .max = 6},
++	 .m = {.min = 60, .max = 160},
++	 .m1 = {.min = 0, .max = 0},
++	 .m2 = {.min = 58, .max = 158},
++	 .p = {.min = 28, .max = 140},
++	 .p1 = {.min = 2, .max = 10},
++	 /* The single-channel range is 25-112Mhz, and dual-channel
++	  * is 80-224Mhz.  Prefer single channel as much as possible.
++	  */
++	 .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
++	 },
++	{			/* CDV_DAC_HDMI_27MHz */
++	 .dot = {.min = 20000, .max = 400000},
++	 .vco = {.min = 1809000, .max = 3564000},
++	 .n = {.min = 1, .max = 1},
++	 .m = {.min = 67, .max = 132},
++	 .m1 = {.min = 0, .max = 0},
++	 .m2 = {.min = 65, .max = 130},
++	 .p = {.min = 5, .max = 90},
++	 .p1 = {.min = 1, .max = 9},
++	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
++	 },
++	{			/* CDV_DAC_HDMI_96MHz */
++	 .dot = {.min = 20000, .max = 400000},
++	 .vco = {.min = 1800000, .max = 3600000},
++	 .n = {.min = 2, .max = 6},
++	 .m = {.min = 60, .max = 160},
++	 .m1 = {.min = 0, .max = 0},
++	 .m2 = {.min = 58, .max = 158},
++	 .p = {.min = 5, .max = 100},
++	 .p1 = {.min = 1, .max = 10},
++	 .p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
++	 },
++};
++
++#define _wait_for(COND, MS, W) ({ \
++	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
++	int ret__ = 0;							\
++	while (!(COND)) {						\
++		if (time_after(jiffies, timeout__)) {			\
++			ret__ = -ETIMEDOUT;				\
++			break;						\
++		}							\
++		if (W && !in_dbg_master())				\
++			msleep(W);					\
++	}								\
++	ret__;								\
++})
++
++#define wait_for(COND, MS) _wait_for(COND, MS, 1)
++
++
++static int cdv_sb_read(struct drm_device *dev, u32 reg, u32 *val)
++{
++	int ret;
++
++	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
++	if (ret) {
++		DRM_ERROR("timeout waiting for SB to idle before read\n");
++		return ret;
++	}
++
++	REG_WRITE(SB_ADDR, reg);
++	REG_WRITE(SB_PCKT,
++		   SET_FIELD(SB_OPCODE_READ, SB_OPCODE) |
++		   SET_FIELD(SB_DEST_DPLL, SB_DEST) |
++		   SET_FIELD(0xf, SB_BYTE_ENABLE));
++
++	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
++	if (ret) {
++		DRM_ERROR("timeout waiting for SB to idle after read\n");
++		return ret;
++	}
++
++	*val = REG_READ(SB_DATA);
++
++	return 0;
++}
++
++static int cdv_sb_write(struct drm_device *dev, u32 reg, u32 val)
++{
++	int ret;
++	static bool dpio_debug = true;
++	u32 temp;
++
++	if (dpio_debug) {
++		if (cdv_sb_read(dev, reg, &temp) == 0)
++			DRM_DEBUG_KMS("0x%08x: 0x%08x (before)\n", reg, temp);
++		DRM_DEBUG_KMS("0x%08x: 0x%08x\n", reg, val);
++	}
++
++	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
++	if (ret) {
++		DRM_ERROR("timeout waiting for SB to idle before write\n");
++		return ret;
++	}
++
++	REG_WRITE(SB_ADDR, reg);
++	REG_WRITE(SB_DATA, val);
++	REG_WRITE(SB_PCKT,
++		   SET_FIELD(SB_OPCODE_WRITE, SB_OPCODE) |
++		   SET_FIELD(SB_DEST_DPLL, SB_DEST) |
++		   SET_FIELD(0xf, SB_BYTE_ENABLE));
++
++	ret = wait_for((REG_READ(SB_PCKT) & SB_BUSY) == 0, 1000);
++	if (ret) {
++		DRM_ERROR("timeout waiting for SB to idle after write\n");
++		return ret;
++	}
++
++	if (dpio_debug) {
++		if (cdv_sb_read(dev, reg, &temp) == 0)
++			DRM_DEBUG_KMS("0x%08x: 0x%08x (after)\n", reg, temp);
++	}
++
++	return 0;
++}
++
++/* Reset the DPIO configuration register.  The BIOS does this at every
++ * mode set.
++ */
++static void cdv_sb_reset(struct drm_device *dev)
++{
++
++	REG_WRITE(DPIO_CFG, 0);
++	REG_READ(DPIO_CFG);
++	REG_WRITE(DPIO_CFG, DPIO_MODE_SELECT_0 | DPIO_CMN_RESET_N);
++}
++
++/* Unlike most Intel display engines, on Cedarview the DPLL registers
++ * are behind this sideband bus.  They must be programmed while the
++ * DPLL reference clock is on in the DPLL control register, but before
++ * the DPLL is enabled in the DPLL control register.
++ */
++static int
++cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
++			       struct cdv_intel_clock_t *clock)
++{
++	struct psb_intel_crtc *psb_crtc =
++				to_psb_intel_crtc(crtc);
++	int pipe = psb_crtc->pipe;
++	u32 m, n_vco, p;
++	int ret = 0;
++	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++	u32 ref_value;
++
++	cdv_sb_reset(dev);
++
++	if ((REG_READ(dpll_reg) & DPLL_SYNCLOCK_ENABLE) == 0) {
++		DRM_ERROR("Attempting to set DPLL with refclk disabled\n");
++		return -EBUSY;
++	}
++
++	/* Follow the BIOS and write the REF/SFR Register. Hardcoded value */
++	ref_value = 0x68A701;
++
++	cdv_sb_write(dev, SB_REF_SFR(pipe), ref_value);
++
++	/* We don't know what the other fields of these regs are, so
++	 * leave them in place.
++	 */
++	ret = cdv_sb_read(dev, SB_M(pipe), &m);
++	if (ret)
++		return ret;
++	m &= ~SB_M_DIVIDER_MASK;
++	m |= ((clock->m2) << SB_M_DIVIDER_SHIFT);
++	ret = cdv_sb_write(dev, SB_M(pipe), m);
++	if (ret)
++		return ret;
++
++	ret = cdv_sb_read(dev, SB_N_VCO(pipe), &n_vco);
++	if (ret)
++		return ret;
++
++	/* Follow the BIOS to program the N_DIVIDER REG */
++	n_vco &= 0xFFFF;
++	n_vco |= 0x107;
++	n_vco &= ~(SB_N_VCO_SEL_MASK |
++		   SB_N_DIVIDER_MASK |
++		   SB_N_CB_TUNE_MASK);
++
++	n_vco |= ((clock->n) << SB_N_DIVIDER_SHIFT);
++
++	if (clock->vco < 2250000) {
++		n_vco |= (2 << SB_N_CB_TUNE_SHIFT);
++		n_vco |= (0 << SB_N_VCO_SEL_SHIFT);
++	} else if (clock->vco < 2750000) {
++		n_vco |= (1 << SB_N_CB_TUNE_SHIFT);
++		n_vco |= (1 << SB_N_VCO_SEL_SHIFT);
++	} else if (clock->vco < 3300000) {
++		n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
++		n_vco |= (2 << SB_N_VCO_SEL_SHIFT);
++	} else {
++		n_vco |= (0 << SB_N_CB_TUNE_SHIFT);
++		n_vco |= (3 << SB_N_VCO_SEL_SHIFT);
++	}
++
++	ret = cdv_sb_write(dev, SB_N_VCO(pipe), n_vco);
++	if (ret)
++		return ret;
++
++	ret = cdv_sb_read(dev, SB_P(pipe), &p);
++	if (ret)
++		return ret;
++	p &= ~(SB_P2_DIVIDER_MASK | SB_P1_DIVIDER_MASK);
++	p |= SET_FIELD(clock->p1, SB_P1_DIVIDER);
++	switch (clock->p2) {
++	case 5:
++		p |= SET_FIELD(SB_P2_5, SB_P2_DIVIDER);
++		break;
++	case 10:
++		p |= SET_FIELD(SB_P2_10, SB_P2_DIVIDER);
++		break;
++	case 14:
++		p |= SET_FIELD(SB_P2_14, SB_P2_DIVIDER);
++		break;
++	case 7:
++		p |= SET_FIELD(SB_P2_7, SB_P2_DIVIDER);
++		break;
++	default:
++		DRM_ERROR("Bad P2 clock: %d\n", clock->p2);
++		return -EINVAL;
++	}
++	ret = cdv_sb_write(dev, SB_P(pipe), p);
++	if (ret)
++		return ret;
++
++	/* always Program the Lane Register for the Pipe A*/
++	if (pipe == 0) {
++		/* Program the Lane0/1 for HDMI B */
++		u32 lane_reg, lane_value;
++
++		lane_reg = PSB_LANE0;
++		cdv_sb_read(dev, lane_reg, &lane_value);
++		lane_value &= ~(LANE_PLL_MASK);
++		lane_value |= LANE_PLL_ENABLE;
++		cdv_sb_write(dev, lane_reg, lane_value);
++
++		lane_reg = PSB_LANE1;
++		cdv_sb_read(dev, lane_reg, &lane_value);
++		lane_value &= ~(LANE_PLL_MASK);
++		lane_value |= LANE_PLL_ENABLE;
++		cdv_sb_write(dev, lane_reg, lane_value);
++
++		/* Program the Lane2/3 for HDMI C */
++		lane_reg = PSB_LANE2;
++		cdv_sb_read(dev, lane_reg, &lane_value);
++		lane_value &= ~(LANE_PLL_MASK);
++		lane_value |= LANE_PLL_ENABLE;
++		cdv_sb_write(dev, lane_reg, lane_value);
++
++		lane_reg = PSB_LANE3;
++		cdv_sb_read(dev, lane_reg, &lane_value);
++		lane_value &= ~(LANE_PLL_MASK);
++		lane_value |= LANE_PLL_ENABLE;
++		cdv_sb_write(dev, lane_reg, lane_value);
++	}
++
++	return 0;
++}
++
++/*
++ * Returns whether any encoder on the specified pipe is of the specified type
++ */
++static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_mode_config *mode_config = &dev->mode_config;
++	struct drm_connector *l_entry;
++
++	list_for_each_entry(l_entry, &mode_config->connector_list, head) {
++		if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
++			struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(l_entry);
++			if (psb_intel_encoder->type == type)
++				return true;
++		}
++	}
++	return false;
++}
++
++static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
++							int refclk)
++{
++	const struct cdv_intel_limit_t *limit;
++	if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
++		/*
++		 * Now only single-channel LVDS is supported on CDV. If it is
++		 * incorrect, please add the dual-channel LVDS.
++		 */
++		if (refclk == 96000)
++			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
++		else
++			limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
++	} else {
++		if (refclk == 27000)
++			limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_27];
++		else
++			limit = &cdv_intel_limits[CDV_LIMIT_DAC_HDMI_96];
++	}
++	return limit;
++}
++
++/* m1 is reserved as 0 in CDV, n is a ring counter */
++static void cdv_intel_clock(struct drm_device *dev,
++			int refclk, struct cdv_intel_clock_t *clock)
++{
++	clock->m = clock->m2 + 2;
++	clock->p = clock->p1 * clock->p2;
++	clock->vco = (refclk * clock->m) / clock->n;
++	clock->dot = clock->vco / clock->p;
++}
++
++
++#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
++static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
++				const struct cdv_intel_limit_t *limit,
++			       struct cdv_intel_clock_t *clock)
++{
++	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
++		INTELPllInvalid("p1 out of range\n");
++	if (clock->p < limit->p.min || limit->p.max < clock->p)
++		INTELPllInvalid("p out of range\n");
++	/* unnecessary to check the range of m(m1/M2)/n again */
++	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
++		INTELPllInvalid("vco out of range\n");
++	/* XXX: We may need to be checking "Dot clock"
++	 * depending on the multiplier, connector, etc.,
++	 * rather than just a single range.
++	 */
++	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
++		INTELPllInvalid("dot out of range\n");
++
++	return true;
++}
++
++static bool cdv_intel_find_best_PLL(struct drm_crtc *crtc, int target,
++				int refclk,
++				struct cdv_intel_clock_t *best_clock)
++{
++	struct drm_device *dev = crtc->dev;
++	struct cdv_intel_clock_t clock;
++	const struct cdv_intel_limit_t *limit = cdv_intel_limit(crtc, refclk);
++	int err = target;
++
++
++	if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
++	    (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
++		/*
++		 * For LVDS, if the panel is on, just rely on its current
++		 * settings for dual-channel.  We haven't figured out how to
++		 * reliably set up different single/dual channel state, if we
++		 * even can.
++		 */
++		if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
++		    LVDS_CLKB_POWER_UP)
++			clock.p2 = limit->p2.p2_fast;
++		else
++			clock.p2 = limit->p2.p2_slow;
++	} else {
++		if (target < limit->p2.dot_limit)
++			clock.p2 = limit->p2.p2_slow;
++		else
++			clock.p2 = limit->p2.p2_fast;
++	}
++
++	memset(best_clock, 0, sizeof(*best_clock));
++	clock.m1 = 0;
++	/* m1 is reserved as 0 in CDV, n is a ring counter.
++	   So skip the m1 loop */
++	for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
++		for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
++					     clock.m2++) {
++			for (clock.p1 = limit->p1.min;
++					clock.p1 <= limit->p1.max;
++					clock.p1++) {
++				int this_err;
++
++				cdv_intel_clock(dev, refclk, &clock);
++
++				if (!cdv_intel_PLL_is_valid(crtc,
++								limit, &clock))
++						continue;
++
++				this_err = abs(clock.dot - target);
++				if (this_err < err) {
++					*best_clock = clock;
++					err = this_err;
++				}
++			}
++		}
++	}
++
++	return err != target;
++}
++
++static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
++			    int x, int y, struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++	int pipe = psb_intel_crtc->pipe;
++	unsigned long start, offset;
++	int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
++	int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
++	int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
++	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++	u32 dspcntr;
++	int ret = 0;
++
++	if (!gma_power_begin(dev, true))
++		return 0;
++
++	/* no fb bound */
++	if (!crtc->fb) {
++		dev_err(dev->dev, "No FB bound\n");
++		goto psb_intel_pipe_cleaner;
++	}
++
++
++	/* We are displaying this buffer, make sure it is actually loaded
++	   into the GTT */
++	ret = psb_gtt_pin(psbfb->gtt);
++	if (ret < 0)
++		goto psb_intel_pipe_set_base_exit;
++	start = psbfb->gtt->offset;
++	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
++
++	REG_WRITE(dspstride, crtc->fb->pitches[0]);
++
++	dspcntr = REG_READ(dspcntr_reg);
++	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
++
++	switch (crtc->fb->bits_per_pixel) {
++	case 8:
++		dspcntr |= DISPPLANE_8BPP;
++		break;
++	case 16:
++		if (crtc->fb->depth == 15)
++			dspcntr |= DISPPLANE_15_16BPP;
++		else
++			dspcntr |= DISPPLANE_16BPP;
++		break;
++	case 24:
++	case 32:
++		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++		break;
++	default:
++		dev_err(dev->dev, "Unknown color depth\n");
++		ret = -EINVAL;
++		goto psb_intel_pipe_set_base_exit;
++	}
++	REG_WRITE(dspcntr_reg, dspcntr);
++
++	dev_dbg(dev->dev,
++		"Writing base %08lX %08lX %d %d\n", start, offset, x, y);
++
++	REG_WRITE(dspbase, offset);
++	REG_READ(dspbase);
++	REG_WRITE(dspsurf, start);
++	REG_READ(dspsurf);
++
++psb_intel_pipe_cleaner:
++	/* If there was a previous display we can now unpin it */
++	if (old_fb)
++		psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
++
++psb_intel_pipe_set_base_exit:
++	gma_power_end(dev);
++	return ret;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++	int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
++	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++	u32 temp;
++
++	/* XXX: When our outputs are all unaware of DPMS modes other than off
++	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++	 */
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++	case DRM_MODE_DPMS_STANDBY:
++	case DRM_MODE_DPMS_SUSPEND:
++		/* Enable the DPLL */
++		temp = REG_READ(dpll_reg);
++		if ((temp & DPLL_VCO_ENABLE) == 0) {
++			REG_WRITE(dpll_reg, temp);
++			REG_READ(dpll_reg);
++			/* Wait for the clocks to stabilize. */
++			udelay(150);
++			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++			REG_READ(dpll_reg);
++			/* Wait for the clocks to stabilize. */
++			udelay(150);
++			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++			REG_READ(dpll_reg);
++			/* Wait for the clocks to stabilize. */
++			udelay(150);
++		}
++
++		/* Jim Bish - switch plan and pipe per scott */
++		/* Enable the plane */
++		temp = REG_READ(dspcntr_reg);
++		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++			REG_WRITE(dspcntr_reg,
++				  temp | DISPLAY_PLANE_ENABLE);
++			/* Flush the plane changes */
++			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++		}
++
++		udelay(150);
++
++		/* Enable the pipe */
++		temp = REG_READ(pipeconf_reg);
++		if ((temp & PIPEACONF_ENABLE) == 0)
++			REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++
++		psb_intel_crtc_load_lut(crtc);
++
++		/* Give the overlay scaler a chance to enable
++		 * if it's on this pipe */
++		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
++		break;
++	case DRM_MODE_DPMS_OFF:
++		/* Give the overlay scaler a chance to disable
++		 * if it's on this pipe */
++		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++		/* Disable the VGA plane that we never use */
++		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++		/* Jim Bish - changed pipe/plane here as well. */
++
++		/* Wait for vblank for the disable to take effect */
++		cdv_intel_wait_for_vblank(dev);
++
++		/* Next, disable display pipes */
++		temp = REG_READ(pipeconf_reg);
++		if ((temp & PIPEACONF_ENABLE) != 0) {
++			REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++			REG_READ(pipeconf_reg);
++		}
++
++		/* Wait for vblank for the disable to take effect. */
++		cdv_intel_wait_for_vblank(dev);
++
++		udelay(150);
++
++		/* Disable display plane */
++		temp = REG_READ(dspcntr_reg);
++		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++			REG_WRITE(dspcntr_reg,
++				  temp & ~DISPLAY_PLANE_ENABLE);
++			/* Flush the plane changes */
++			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++			REG_READ(dspbase_reg);
++		}
++
++		temp = REG_READ(dpll_reg);
++		if ((temp & DPLL_VCO_ENABLE) != 0) {
++			REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++			REG_READ(dpll_reg);
++		}
++
++		/* Wait for the clocks to turn off. */
++		udelay(150);
++		break;
++	}
++	/*Set FIFO Watermarks*/
++	REG_WRITE(DSPARB, 0x3F3E);
++}
++
++static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
++{
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++}
++
++static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
++{
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++}
++
++static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
++				  struct drm_display_mode *mode,
++				  struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++
++/**
++ * Return the pipe currently connected to the panel fitter,
++ * or -1 if the panel fitter is not present or not in use
++ */
++static int cdv_intel_panel_fitter_pipe(struct drm_device *dev)
++{
++	u32 pfit_control;
++
++	pfit_control = REG_READ(PFIT_CONTROL);
++
++	/* See if the panel fitter is in use */
++	if ((pfit_control & PFIT_ENABLE) == 0)
++		return -1;
++	return (pfit_control >> 29) & 0x3;
++}
++
++static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
++			       struct drm_display_mode *mode,
++			       struct drm_display_mode *adjusted_mode,
++			       int x, int y,
++			       struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++	int dpll_md_reg = (psb_intel_crtc->pipe == 0) ? DPLL_A_MD : DPLL_B_MD;
++	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++	int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
++	int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
++	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++	int refclk;
++	struct cdv_intel_clock_t clock;
++	u32 dpll = 0, dspcntr, pipeconf;
++	bool ok;
++	bool is_crt = false, is_lvds = false, is_tv = false;
++	bool is_hdmi = false;
++	struct drm_mode_config *mode_config = &dev->mode_config;
++	struct drm_connector *connector;
++
++	list_for_each_entry(connector, &mode_config->connector_list, head) {
++		struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++
++		if (!connector->encoder
++		    || connector->encoder->crtc != crtc)
++			continue;
++
++		switch (psb_intel_encoder->type) {
++		case INTEL_OUTPUT_LVDS:
++			is_lvds = true;
++			break;
++		case INTEL_OUTPUT_TVOUT:
++			is_tv = true;
++			break;
++		case INTEL_OUTPUT_ANALOG:
++			is_crt = true;
++			break;
++		case INTEL_OUTPUT_HDMI:
++			is_hdmi = true;
++			break;
++		}
++	}
++
++	refclk = 96000;
++
++	/* Hack selection about ref clk for CRT */
++	/* Select 27MHz as the reference clk for HDMI */
++	if (is_crt || is_hdmi)
++		refclk = 27000;
++
++	drm_mode_debug_printmodeline(adjusted_mode);
++
++	ok = cdv_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
++				 &clock);
++	if (!ok) {
++		dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
++		return 0;
++	}
++
++	dpll = DPLL_VGA_MODE_DIS;
++	if (is_tv) {
++		/* XXX: just matching BIOS for now */
++/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
++		dpll |= 3;
++	}
++		dpll |= PLL_REF_INPUT_DREFCLK;
++
++	dpll |= DPLL_SYNCLOCK_ENABLE;
++	dpll |= DPLL_VGA_MODE_DIS;
++	if (is_lvds)
++		dpll |= DPLLB_MODE_LVDS;
++	else
++		dpll |= DPLLB_MODE_DAC_SERIAL;
++	/* dpll |= (2 << 11); */
++
++	/* setup pipeconf */
++	pipeconf = REG_READ(pipeconf_reg);
++
++	/* Set up the display plane register */
++	dspcntr = DISPPLANE_GAMMA_ENABLE;
++
++	if (pipe == 0)
++		dspcntr |= DISPPLANE_SEL_PIPE_A;
++	else
++		dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++	dspcntr |= DISPLAY_PLANE_ENABLE;
++	pipeconf |= PIPEACONF_ENABLE;
++
++	REG_WRITE(dpll_reg, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE);
++	REG_READ(dpll_reg);
++
++	cdv_dpll_set_clock_cdv(dev, crtc, &clock);
++
++	udelay(150);
++
++
++	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
++	 * This is an exception to the general rule that mode_set doesn't turn
++	 * things on.
++	 */
++	if (is_lvds) {
++		u32 lvds = REG_READ(LVDS);
++
++		lvds |=
++		    LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP |
++		    LVDS_PIPEB_SELECT;
++		/* Set the B0-B3 data pairs corresponding to
++		 * whether we're going to
++		 * set the DPLLs for dual-channel mode or not.
++		 */
++		if (clock.p2 == 7)
++			lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
++		else
++			lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
++
++		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
++		 * appropriately here, but we need to look more
++		 * thoroughly into how panels behave in the two modes.
++		 */
++
++		REG_WRITE(LVDS, lvds);
++		REG_READ(LVDS);
++	}
++
++	dpll |= DPLL_VCO_ENABLE;
++
++	/* Disable the panel fitter if it was on our pipe */
++	if (cdv_intel_panel_fitter_pipe(dev) == pipe)
++		REG_WRITE(PFIT_CONTROL, 0);
++
++	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
++	drm_mode_debug_printmodeline(mode);
++
++	REG_WRITE(dpll_reg,
++		(REG_READ(dpll_reg) & ~DPLL_LOCK) | DPLL_VCO_ENABLE);
++	REG_READ(dpll_reg);
++	/* Wait for the clocks to stabilize. */
++	udelay(150); /* 42 usec w/o calibration, 110 with.  rounded up. */
++
++	if (!(REG_READ(dpll_reg) & DPLL_LOCK)) {
++		dev_err(dev->dev, "Failed to get DPLL lock\n");
++		return -EBUSY;
++	}
++
++	{
++		int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
++		REG_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
++	}
++
++	REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++		  ((adjusted_mode->crtc_htotal - 1) << 16));
++	REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++		  ((adjusted_mode->crtc_hblank_end - 1) << 16));
++	REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++		  ((adjusted_mode->crtc_hsync_end - 1) << 16));
++	REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++		  ((adjusted_mode->crtc_vtotal - 1) << 16));
++	REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++		  ((adjusted_mode->crtc_vblank_end - 1) << 16));
++	REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++		  ((adjusted_mode->crtc_vsync_end - 1) << 16));
++	/* pipesrc and dspsize control the size that is scaled from,
++	 * which should always be the user's requested size.
++	 */
++	REG_WRITE(dspsize_reg,
++		  ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
++	REG_WRITE(dsppos_reg, 0);
++	REG_WRITE(pipesrc_reg,
++		  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
++	REG_WRITE(pipeconf_reg, pipeconf);
++	REG_READ(pipeconf_reg);
++
++	cdv_intel_wait_for_vblank(dev);
++
++	REG_WRITE(dspcntr_reg, dspcntr);
++
++	/* Flush the plane changes */
++	{
++		struct drm_crtc_helper_funcs *crtc_funcs =
++		    crtc->helper_private;
++		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++	}
++
++	cdv_intel_wait_for_vblank(dev);
++
++	return 0;
++}
++
++/** Loads the palette/gamma unit for the CRTC with the prepared values */
++static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_psb_private *dev_priv =
++				(struct drm_psb_private *)dev->dev_private;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int palreg = PALETTE_A;
++	int i;
++
++	/* The clocks have to be on to load the palette. */
++	if (!crtc->enabled)
++		return;
++
++	switch (psb_intel_crtc->pipe) {
++	case 0:
++		break;
++	case 1:
++		palreg = PALETTE_B;
++		break;
++	case 2:
++		palreg = PALETTE_C;
++		break;
++	default:
++		dev_err(dev->dev, "Illegal Pipe Number.\n");
++		return;
++	}
++
++	if (gma_power_begin(dev, false)) {
++		for (i = 0; i < 256; i++) {
++			REG_WRITE(palreg + 4 * i,
++				  ((psb_intel_crtc->lut_r[i] +
++				  psb_intel_crtc->lut_adj[i]) << 16) |
++				  ((psb_intel_crtc->lut_g[i] +
++				  psb_intel_crtc->lut_adj[i]) << 8) |
++				  (psb_intel_crtc->lut_b[i] +
++				  psb_intel_crtc->lut_adj[i]));
++		}
++		gma_power_end(dev);
++	} else {
++		for (i = 0; i < 256; i++) {
++			dev_priv->regs.psb.save_palette_a[i] =
++				  ((psb_intel_crtc->lut_r[i] +
++				  psb_intel_crtc->lut_adj[i]) << 16) |
++				  ((psb_intel_crtc->lut_g[i] +
++				  psb_intel_crtc->lut_adj[i]) << 8) |
++				  (psb_intel_crtc->lut_b[i] +
++				  psb_intel_crtc->lut_adj[i]);
++		}
++
++	}
++}
++
++/**
++ * Save HW states of giving crtc
++ */
++static void cdv_intel_crtc_save(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	/* struct drm_psb_private *dev_priv =
++			(struct drm_psb_private *)dev->dev_private; */
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
++	int pipeA = (psb_intel_crtc->pipe == 0);
++	uint32_t paletteReg;
++	int i;
++
++	if (!crtc_state) {
++		dev_dbg(dev->dev, "No CRTC state found\n");
++		return;
++	}
++
++	crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
++	crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
++	crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
++	crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
++	crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
++	crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
++	crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
++	crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
++	crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
++	crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
++	crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
++	crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
++	crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
++
++	/*NOTE: DSPSIZE DSPPOS only for psb*/
++	crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
++	crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
++
++	crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
++
++	DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++			crtc_state->saveDSPCNTR,
++			crtc_state->savePIPECONF,
++			crtc_state->savePIPESRC,
++			crtc_state->saveFP0,
++			crtc_state->saveFP1,
++			crtc_state->saveDPLL,
++			crtc_state->saveHTOTAL,
++			crtc_state->saveHBLANK,
++			crtc_state->saveHSYNC,
++			crtc_state->saveVTOTAL,
++			crtc_state->saveVBLANK,
++			crtc_state->saveVSYNC,
++			crtc_state->saveDSPSTRIDE,
++			crtc_state->saveDSPSIZE,
++			crtc_state->saveDSPPOS,
++			crtc_state->saveDSPBASE
++		);
++
++	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
++	for (i = 0; i < 256; ++i)
++		crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
++}
++
++/**
++ * Restore HW states of giving crtc
++ */
++static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	/* struct drm_psb_private * dev_priv =
++				(struct drm_psb_private *)dev->dev_private; */
++	struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
++	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
++	/* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
++	int pipeA = (psb_intel_crtc->pipe == 0);
++	uint32_t paletteReg;
++	int i;
++
++	if (!crtc_state) {
++		dev_dbg(dev->dev, "No crtc state\n");
++		return;
++	}
++
++	DRM_DEBUG(
++		"current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++		REG_READ(pipeA ? DSPACNTR : DSPBCNTR),
++		REG_READ(pipeA ? PIPEACONF : PIPEBCONF),
++		REG_READ(pipeA ? PIPEASRC : PIPEBSRC),
++		REG_READ(pipeA ? FPA0 : FPB0),
++		REG_READ(pipeA ? FPA1 : FPB1),
++		REG_READ(pipeA ? DPLL_A : DPLL_B),
++		REG_READ(pipeA ? HTOTAL_A : HTOTAL_B),
++		REG_READ(pipeA ? HBLANK_A : HBLANK_B),
++		REG_READ(pipeA ? HSYNC_A : HSYNC_B),
++		REG_READ(pipeA ? VTOTAL_A : VTOTAL_B),
++		REG_READ(pipeA ? VBLANK_A : VBLANK_B),
++		REG_READ(pipeA ? VSYNC_A : VSYNC_B),
++		REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE),
++		REG_READ(pipeA ? DSPASIZE : DSPBSIZE),
++		REG_READ(pipeA ? DSPAPOS : DSPBPOS),
++		REG_READ(pipeA ? DSPABASE : DSPBBASE)
++		);
++
++	DRM_DEBUG(
++		"saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
++		crtc_state->saveDSPCNTR,
++		crtc_state->savePIPECONF,
++		crtc_state->savePIPESRC,
++		crtc_state->saveFP0,
++		crtc_state->saveFP1,
++		crtc_state->saveDPLL,
++		crtc_state->saveHTOTAL,
++		crtc_state->saveHBLANK,
++		crtc_state->saveHSYNC,
++		crtc_state->saveVTOTAL,
++		crtc_state->saveVBLANK,
++		crtc_state->saveVSYNC,
++		crtc_state->saveDSPSTRIDE,
++		crtc_state->saveDSPSIZE,
++		crtc_state->saveDSPPOS,
++		crtc_state->saveDSPBASE
++		);
++
++
++	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
++		REG_WRITE(pipeA ? DPLL_A : DPLL_B,
++			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
++		REG_READ(pipeA ? DPLL_A : DPLL_B);
++		DRM_DEBUG("write dpll: %x\n",
++				REG_READ(pipeA ? DPLL_A : DPLL_B));
++		udelay(150);
++	}
++
++	REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
++	REG_READ(pipeA ? FPA0 : FPB0);
++
++	REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
++	REG_READ(pipeA ? FPA1 : FPB1);
++
++	REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
++	REG_READ(pipeA ? DPLL_A : DPLL_B);
++	udelay(150);
++
++	REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
++	REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
++	REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
++	REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
++	REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
++	REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
++	REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
++
++	REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
++	REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
++
++	REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
++	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
++	REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
++
++	cdv_intel_wait_for_vblank(dev);
++
++	REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
++	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
++
++	cdv_intel_wait_for_vblank(dev);
++
++	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
++	for (i = 0; i < 256; ++i)
++		REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
++}
++
++static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
++				 struct drm_file *file_priv,
++				 uint32_t handle,
++				 uint32_t width, uint32_t height)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
++	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
++	uint32_t temp;
++	size_t addr = 0;
++	struct gtt_range *gt;
++	struct drm_gem_object *obj;
++	int ret;
++
++	/* if we want to turn of the cursor ignore width and height */
++	if (!handle) {
++		/* turn off the cursor */
++		temp = CURSOR_MODE_DISABLE;
++
++		if (gma_power_begin(dev, false)) {
++			REG_WRITE(control, temp);
++			REG_WRITE(base, 0);
++			gma_power_end(dev);
++		}
++
++		/* unpin the old GEM object */
++		if (psb_intel_crtc->cursor_obj) {
++			gt = container_of(psb_intel_crtc->cursor_obj,
++							struct gtt_range, gem);
++			psb_gtt_unpin(gt);
++			drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
++			psb_intel_crtc->cursor_obj = NULL;
++		}
++
++		return 0;
++	}
++
++	/* Currently we only support 64x64 cursors */
++	if (width != 64 || height != 64) {
++		dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
++		return -EINVAL;
++	}
++
++	obj = drm_gem_object_lookup(dev, file_priv, handle);
++	if (!obj)
++		return -ENOENT;
++
++	if (obj->size < width * height * 4) {
++		dev_dbg(dev->dev, "buffer is to small\n");
++		return -ENOMEM;
++	}
++
++	gt = container_of(obj, struct gtt_range, gem);
++
++	/* Pin the memory into the GTT */
++	ret = psb_gtt_pin(gt);
++	if (ret) {
++		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
++		return ret;
++	}
++
++	addr = gt->offset;	/* Or resource.start ??? */
++
++	psb_intel_crtc->cursor_addr = addr;
++
++	temp = 0;
++	/* set the pipe for the cursor */
++	temp |= (pipe << 28);
++	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
++
++	if (gma_power_begin(dev, false)) {
++		REG_WRITE(control, temp);
++		REG_WRITE(base, addr);
++		gma_power_end(dev);
++	}
++
++	/* unpin the old GEM object */
++	if (psb_intel_crtc->cursor_obj) {
++		gt = container_of(psb_intel_crtc->cursor_obj,
++							struct gtt_range, gem);
++		psb_gtt_unpin(gt);
++		drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
++		psb_intel_crtc->cursor_obj = obj;
++	}
++	return 0;
++}
++
++static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	uint32_t temp = 0;
++	uint32_t adder;
++
++
++	if (x < 0) {
++		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
++		x = -x;
++	}
++	if (y < 0) {
++		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
++		y = -y;
++	}
++
++	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
++	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
++
++	adder = psb_intel_crtc->cursor_addr;
++
++	if (gma_power_begin(dev, false)) {
++		REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
++		REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
++		gma_power_end(dev);
++	}
++	return 0;
++}
++
++static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
++			 u16 *green, u16 *blue, uint32_t start, uint32_t size)
++{
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int i;
++	int end = (start + size > 256) ? 256 : start + size;
++
++	for (i = start; i < end; i++) {
++		psb_intel_crtc->lut_r[i] = red[i] >> 8;
++		psb_intel_crtc->lut_g[i] = green[i] >> 8;
++		psb_intel_crtc->lut_b[i] = blue[i] >> 8;
++	}
++
++	cdv_intel_crtc_load_lut(crtc);
++}
++
++static int cdv_crtc_set_config(struct drm_mode_set *set)
++{
++	int ret = 0;
++	struct drm_device *dev = set->crtc->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (!dev_priv->rpm_enabled)
++		return drm_crtc_helper_set_config(set);
++
++	pm_runtime_forbid(&dev->pdev->dev);
++
++	ret = drm_crtc_helper_set_config(set);
++
++	pm_runtime_allow(&dev->pdev->dev);
++
++	return ret;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++
++/* FIXME: why are we using this, should it be cdv_ in this tree ? */
++
++static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
++{
++	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++	clock->p = clock->p1 * clock->p2;
++	clock->vco = refclk * clock->m / (clock->n + 2);
++	clock->dot = clock->vco / clock->p;
++}
++
++/* Returns the clock of the currently programmed mode of the given pipe. */
++static int cdv_intel_crtc_clock_get(struct drm_device *dev,
++				struct drm_crtc *crtc)
++{
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	u32 dpll;
++	u32 fp;
++	struct cdv_intel_clock_t clock;
++	bool is_lvds;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (gma_power_begin(dev, false)) {
++		dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
++		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++			fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
++		else
++			fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
++		is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
++		gma_power_end(dev);
++	} else {
++		dpll = (pipe == 0) ?
++			dev_priv->regs.psb.saveDPLL_A :
++			dev_priv->regs.psb.saveDPLL_B;
++
++		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++			fp = (pipe == 0) ?
++				dev_priv->regs.psb.saveFPA0 :
++				dev_priv->regs.psb.saveFPB0;
++		else
++			fp = (pipe == 0) ?
++				dev_priv->regs.psb.saveFPA1 :
++				dev_priv->regs.psb.saveFPB1;
++
++		is_lvds = (pipe == 1) &&
++				(dev_priv->regs.psb.saveLVDS & LVDS_PORT_EN);
++	}
++
++	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
++	clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
++	clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
++
++	if (is_lvds) {
++		clock.p1 =
++		    ffs((dpll &
++			 DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
++			DPLL_FPA01_P1_POST_DIV_SHIFT);
++		if (clock.p1 == 0) {
++			clock.p1 = 4;
++			dev_err(dev->dev, "PLL %d\n", dpll);
++		}
++		clock.p2 = 14;
++
++		if ((dpll & PLL_REF_INPUT_MASK) ==
++		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
++			/* XXX: might not be 66MHz */
++			i8xx_clock(66000, &clock);
++		} else
++			i8xx_clock(48000, &clock);
++	} else {
++		if (dpll & PLL_P1_DIVIDE_BY_TWO)
++			clock.p1 = 2;
++		else {
++			clock.p1 =
++			    ((dpll &
++			      DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
++			     DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
++		}
++		if (dpll & PLL_P2_DIVIDE_BY_4)
++			clock.p2 = 4;
++		else
++			clock.p2 = 2;
++
++		i8xx_clock(48000, &clock);
++	}
++
++	/* XXX: It would be nice to validate the clocks, but we can't reuse
++	 * i830PllIsValid() because it relies on the xf86_config connector
++	 * configuration being accurate, which it isn't necessarily.
++	 */
++
++	return clock.dot;
++}
++
++/** Returns the currently programmed mode of the given pipe. */
++struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
++					     struct drm_crtc *crtc)
++{
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	struct drm_display_mode *mode;
++	int htot;
++	int hsync;
++	int vtot;
++	int vsync;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (gma_power_begin(dev, false)) {
++		htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
++		hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
++		vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
++		vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
++		gma_power_end(dev);
++	} else {
++		htot = (pipe == 0) ?
++			dev_priv->regs.psb.saveHTOTAL_A :
++			dev_priv->regs.psb.saveHTOTAL_B;
++		hsync = (pipe == 0) ?
++			dev_priv->regs.psb.saveHSYNC_A :
++			dev_priv->regs.psb.saveHSYNC_B;
++		vtot = (pipe == 0) ?
++			dev_priv->regs.psb.saveVTOTAL_A :
++			dev_priv->regs.psb.saveVTOTAL_B;
++		vsync = (pipe == 0) ?
++			dev_priv->regs.psb.saveVSYNC_A :
++			dev_priv->regs.psb.saveVSYNC_B;
++	}
++
++	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++	if (!mode)
++		return NULL;
++
++	mode->clock = cdv_intel_crtc_clock_get(dev, crtc);
++	mode->hdisplay = (htot & 0xffff) + 1;
++	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
++	mode->hsync_start = (hsync & 0xffff) + 1;
++	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
++	mode->vdisplay = (vtot & 0xffff) + 1;
++	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
++	mode->vsync_start = (vsync & 0xffff) + 1;
++	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
++
++	drm_mode_set_name(mode);
++	drm_mode_set_crtcinfo(mode, 0);
++
++	return mode;
++}
++
++static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
++{
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++
++	kfree(psb_intel_crtc->crtc_state);
++	drm_crtc_cleanup(crtc);
++	kfree(psb_intel_crtc);
++}
++
++const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
++	.dpms = cdv_intel_crtc_dpms,
++	.mode_fixup = cdv_intel_crtc_mode_fixup,
++	.mode_set = cdv_intel_crtc_mode_set,
++	.mode_set_base = cdv_intel_pipe_set_base,
++	.prepare = cdv_intel_crtc_prepare,
++	.commit = cdv_intel_crtc_commit,
++};
++
++const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
++	.save = cdv_intel_crtc_save,
++	.restore = cdv_intel_crtc_restore,
++	.cursor_set = cdv_intel_crtc_cursor_set,
++	.cursor_move = cdv_intel_crtc_cursor_move,
++	.gamma_set = cdv_intel_crtc_gamma_set,
++	.set_config = cdv_crtc_set_config,
++	.destroy = cdv_intel_crtc_destroy,
++};
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+new file mode 100644
+index 0000000..8d52695
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+@@ -0,0 +1,393 @@
++/*
++ * Copyright © 2006-2011 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *	jim liu <jim.liu at intel.com>
++ *
++ * FIXME:
++ *	We should probably make this generic and share it with Medfield
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++#include "psb_intel_drv.h"
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "cdv_device.h"
++#include <linux/pm_runtime.h>
++
++/* hdmi control bits */
++#define HDMI_NULL_PACKETS_DURING_VSYNC	(1 << 9)
++#define HDMI_BORDER_ENABLE		(1 << 7)
++#define HDMI_AUDIO_ENABLE		(1 << 6)
++#define HDMI_VSYNC_ACTIVE_HIGH		(1 << 4)
++#define HDMI_HSYNC_ACTIVE_HIGH		(1 << 3)
++/* hdmi-b control bits */
++#define	HDMIB_PIPE_B_SELECT		(1 << 30)
++
++
++struct mid_intel_hdmi_priv {
++	u32 hdmi_reg;
++	u32 save_HDMIB;
++	bool has_hdmi_sink;
++	bool has_hdmi_audio;
++	/* Should set this when detect hotplug */
++	bool hdmi_device_connected;
++	struct mdfld_hdmi_i2c *i2c_bus;
++	struct i2c_adapter *hdmi_i2c_adapter;	/* for control functions */
++	struct drm_device *dev;
++};
++
++static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
++			struct drm_display_mode *mode,
++			struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
++	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
++	u32 hdmib;
++	struct drm_crtc *crtc = encoder->crtc;
++	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
++
++	hdmib = (2 << 10);
++
++	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
++		hdmib |= HDMI_VSYNC_ACTIVE_HIGH;
++	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
++		hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
++
++	if (intel_crtc->pipe == 1)
++		hdmib |= HDMIB_PIPE_B_SELECT;
++
++	if (hdmi_priv->has_hdmi_audio) {
++		hdmib |= HDMI_AUDIO_ENABLE;
++		hdmib |= HDMI_NULL_PACKETS_DURING_VSYNC;
++	}
++
++	REG_WRITE(hdmi_priv->hdmi_reg, hdmib);
++	REG_READ(hdmi_priv->hdmi_reg);
++}
++
++static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
++				  struct drm_display_mode *mode,
++				  struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct psb_intel_encoder *psb_intel_encoder =
++						to_psb_intel_encoder(encoder);
++	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
++	u32 hdmib;
++
++	hdmib = REG_READ(hdmi_priv->hdmi_reg);
++
++	if (mode != DRM_MODE_DPMS_ON)
++		REG_WRITE(hdmi_priv->hdmi_reg, hdmib & ~HDMIB_PORT_EN);
++	else
++		REG_WRITE(hdmi_priv->hdmi_reg, hdmib | HDMIB_PORT_EN);
++	REG_READ(hdmi_priv->hdmi_reg);
++}
++
++static void cdv_hdmi_save(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
++
++	hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
++}
++
++static void cdv_hdmi_restore(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
++
++	REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
++	REG_READ(hdmi_priv->hdmi_reg);
++}
++
++static enum drm_connector_status cdv_hdmi_detect(
++				struct drm_connector *connector, bool force)
++{
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++	struct psb_intel_connector *psb_intel_connector =
++					to_psb_intel_connector(connector);
++	struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
++	struct edid *edid = NULL;
++	enum drm_connector_status status = connector_status_disconnected;
++
++	edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
++
++	hdmi_priv->has_hdmi_sink = false;
++	hdmi_priv->has_hdmi_audio = false;
++	if (edid) {
++		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
++			status = connector_status_connected;
++			hdmi_priv->has_hdmi_sink =
++						drm_detect_hdmi_monitor(edid);
++			hdmi_priv->has_hdmi_audio =
++						drm_detect_monitor_audio(edid);
++		}
++
++		psb_intel_connector->base.display_info.raw_edid = NULL;
++		kfree(edid);
++	}
++	return status;
++}
++
++static int cdv_hdmi_set_property(struct drm_connector *connector,
++				       struct drm_property *property,
++				       uint64_t value)
++{
++	struct drm_encoder *encoder = connector->encoder;
++
++	if (!strcmp(property->name, "scaling mode") && encoder) {
++		struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
++		bool centre;
++		uint64_t curValue;
++
++		if (!crtc)
++			return -1;
++
++		switch (value) {
++		case DRM_MODE_SCALE_FULLSCREEN:
++			break;
++		case DRM_MODE_SCALE_NO_SCALE:
++			break;
++		case DRM_MODE_SCALE_ASPECT:
++			break;
++		default:
++			return -1;
++		}
++
++		if (drm_connector_property_get_value(connector,
++							property, &curValue))
++			return -1;
++
++		if (curValue == value)
++			return 0;
++
++		if (drm_connector_property_set_value(connector,
++							property, value))
++			return -1;
++
++		centre = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
++			(value == DRM_MODE_SCALE_NO_SCALE);
++
++		if (crtc->saved_mode.hdisplay != 0 &&
++		    crtc->saved_mode.vdisplay != 0) {
++			if (centre) {
++				if (!drm_crtc_helper_set_mode(encoder->crtc, &crtc->saved_mode,
++					    encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
++					return -1;
++			} else {
++				struct drm_encoder_helper_funcs *helpers
++						    = encoder->helper_private;
++				helpers->mode_set(encoder, &crtc->saved_mode,
++					     &crtc->saved_adjusted_mode);
++			}
++		}
++	}
++	return 0;
++}
++
++/*
++ * Return the list of HDMI DDC modes if available.
++ */
++static int cdv_hdmi_get_modes(struct drm_connector *connector)
++{
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++	struct edid *edid = NULL;
++	int ret = 0;
++
++	edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
++	if (edid) {
++		drm_mode_connector_update_edid_property(connector, edid);
++		ret = drm_add_edid_modes(connector, edid);
++		kfree(edid);
++	}
++	return ret;
++}
++
++static int cdv_hdmi_mode_valid(struct drm_connector *connector,
++				 struct drm_display_mode *mode)
++{
++	struct drm_psb_private *dev_priv = connector->dev->dev_private;
++
++	if (mode->clock > 165000)
++		return MODE_CLOCK_HIGH;
++	if (mode->clock < 20000)
++		return MODE_CLOCK_HIGH;
++
++	/* just in case */
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		return MODE_NO_DBLESCAN;
++
++	/* just in case */
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++		return MODE_NO_INTERLACE;
++
++	/* We assume worst case scenario of 32 bpp here, since we don't know */
++	if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
++	    dev_priv->vram_stolen_size)
++		return MODE_MEM;
++
++	return MODE_OK;
++}
++
++static void cdv_hdmi_destroy(struct drm_connector *connector)
++{
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++
++	if (psb_intel_encoder->i2c_bus)
++		psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
++	drm_sysfs_connector_remove(connector);
++	drm_connector_cleanup(connector);
++	kfree(connector);
++}
++
++static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
++	.dpms = cdv_hdmi_dpms,
++	.mode_fixup = cdv_hdmi_mode_fixup,
++	.prepare = psb_intel_encoder_prepare,
++	.mode_set = cdv_hdmi_mode_set,
++	.commit = psb_intel_encoder_commit,
++};
++
++static const struct drm_connector_helper_funcs
++					cdv_hdmi_connector_helper_funcs = {
++	.get_modes = cdv_hdmi_get_modes,
++	.mode_valid = cdv_hdmi_mode_valid,
++	.best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
++	.dpms = drm_helper_connector_dpms,
++	.save = cdv_hdmi_save,
++	.restore = cdv_hdmi_restore,
++	.detect = cdv_hdmi_detect,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.set_property = cdv_hdmi_set_property,
++	.destroy = cdv_hdmi_destroy,
++};
++
++void cdv_hdmi_init(struct drm_device *dev,
++			struct psb_intel_mode_device *mode_dev, int reg)
++{
++	struct psb_intel_encoder *psb_intel_encoder;
++	struct psb_intel_connector *psb_intel_connector;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++	struct mid_intel_hdmi_priv *hdmi_priv;
++	int ddc_bus;
++
++	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
++				    GFP_KERNEL);
++
++	if (!psb_intel_encoder)
++		return;
++
++	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
++				      GFP_KERNEL);
++
++	if (!psb_intel_connector)
++		goto err_connector;
++
++	hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
++
++	if (!hdmi_priv)
++		goto err_priv;
++
++	connector = &psb_intel_connector->base;
++	encoder = &psb_intel_encoder->base;
++	drm_connector_init(dev, connector,
++			   &cdv_hdmi_connector_funcs,
++			   DRM_MODE_CONNECTOR_DVID);
++
++	drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
++			 DRM_MODE_ENCODER_TMDS);
++
++	psb_intel_connector_attach_encoder(psb_intel_connector,
++					   psb_intel_encoder);
++	psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
++	hdmi_priv->hdmi_reg = reg;
++	hdmi_priv->has_hdmi_sink = false;
++	psb_intel_encoder->dev_priv = hdmi_priv;
++
++	drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
++	drm_connector_helper_add(connector,
++				 &cdv_hdmi_connector_helper_funcs);
++	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++	connector->interlace_allowed = false;
++	connector->doublescan_allowed = false;
++
++	drm_connector_attach_property(connector,
++				      dev->mode_config.scaling_mode_property,
++				      DRM_MODE_SCALE_FULLSCREEN);
++
++	switch (reg) {
++	case SDVOB:
++		ddc_bus = GPIOE;
++		break;
++	case SDVOC:
++		ddc_bus = GPIOD;
++		break;
++	default:
++		DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
++		goto failed_ddc;
++		break;
++	}
++
++	psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
++				ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
++
++	if (!psb_intel_encoder->i2c_bus) {
++		dev_err(dev->dev, "No ddc adapter available!\n");
++		goto failed_ddc;
++	}
++
++	hdmi_priv->hdmi_i2c_adapter =
++				&(psb_intel_encoder->i2c_bus->adapter);
++	hdmi_priv->dev = dev;
++	drm_sysfs_connector_add(connector);
++	return;
++
++failed_ddc:
++	drm_encoder_cleanup(encoder);
++	drm_connector_cleanup(connector);
++err_priv:
++	kfree(psb_intel_connector);
++err_connector:
++	kfree(psb_intel_encoder);
++}
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+new file mode 100644
+index 0000000..8359c1a
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+@@ -0,0 +1,734 @@
++/*
++ * Copyright © 2006-2011 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ *	Dave Airlie <airlied at linux.ie>
++ *	Jesse Barnes <jesse.barnes at intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <linux/dmi.h>
++#include <drm/drmP.h>
++
++#include "intel_bios.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "power.h"
++#include <linux/pm_runtime.h>
++#include "cdv_device.h"
++
++/**
++ * LVDS I2C backlight control macros
++ */
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BRIGHTNESS_MASK 0xFF
++#define BLC_I2C_TYPE	0x01
++#define BLC_PWM_TYPT	0x02
++
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++
++#define PSB_BLC_MAX_PWM_REG_FREQ       (0xFFFE)
++#define PSB_BLC_MIN_PWM_REG_FREQ	(0x2)
++#define PSB_BLC_PWM_PRECISION_FACTOR	(10)
++#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
++#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++
++struct cdv_intel_lvds_priv {
++	/**
++	 * Saved LVDO output states
++	 */
++	uint32_t savePP_ON;
++	uint32_t savePP_OFF;
++	uint32_t saveLVDS;
++	uint32_t savePP_CONTROL;
++	uint32_t savePP_CYCLE;
++	uint32_t savePFIT_CONTROL;
++	uint32_t savePFIT_PGM_RATIOS;
++	uint32_t saveBLC_PWM_CTL;
++};
++
++/*
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 retval;
++
++	if (gma_power_begin(dev, false)) {
++		retval = ((REG_READ(BLC_PWM_CTL) &
++			  BACKLIGHT_MODULATION_FREQ_MASK) >>
++			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++
++		gma_power_end(dev);
++	} else
++		retval = ((dev_priv->regs.saveBLC_PWM_CTL &
++			  BACKLIGHT_MODULATION_FREQ_MASK) >>
++			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++
++	return retval;
++}
++
++#if 0
++/*
++ * Set LVDS backlight level by I2C command
++ */
++static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
++					unsigned int level)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
++	u8 out_buf[2];
++	unsigned int blc_i2c_brightness;
++
++	struct i2c_msg msgs[] = {
++		{
++			.addr = lvds_i2c_bus->slave_addr,
++			.flags = 0,
++			.len = 2,
++			.buf = out_buf,
++		}
++	};
++
++	blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
++			     BRIGHTNESS_MASK /
++			     BRIGHTNESS_MAX_LEVEL);
++
++	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
++		blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
++
++	out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
++	out_buf[1] = (u8)blc_i2c_brightness;
++
++	if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
++		return 0;
++
++	DRM_ERROR("I2C transfer error\n");
++	return -1;
++}
++
++
++static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	u32 max_pwm_blc;
++	u32 blc_pwm_duty_cycle;
++
++	max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
++
++	/*BLC_PWM_CTL Should be initiated while backlight device init*/
++	BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
++
++	blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
++		blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
++
++	blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
++	REG_WRITE(BLC_PWM_CTL,
++		  (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
++		  (blc_pwm_duty_cycle));
++
++	return 0;
++}
++
++/*
++ * Set LVDS backlight level either by I2C or PWM
++ */
++void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (!dev_priv->lvds_bl) {
++		DRM_ERROR("NO LVDS Backlight Info\n");
++		return;
++	}
++
++	if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
++		cdv_lvds_i2c_set_brightness(dev, level);
++	else
++		cdv_lvds_pwm_set_brightness(dev, level);
++}
++#endif
++
++/**
++ * Sets the backlight level.
++ *
++ * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight().
++ */
++static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 blc_pwm_ctl;
++
++	if (gma_power_begin(dev, false)) {
++		blc_pwm_ctl =
++			REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
++		REG_WRITE(BLC_PWM_CTL,
++				(blc_pwm_ctl |
++				(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
++		gma_power_end(dev);
++	} else {
++		blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
++				~BACKLIGHT_DUTY_CYCLE_MASK;
++		dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
++					(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
++	}
++}
++
++/**
++ * Sets the power state for the panel.
++ */
++static void cdv_intel_lvds_set_power(struct drm_device *dev,
++				     struct drm_encoder *encoder, bool on)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 pp_status;
++
++	if (!gma_power_begin(dev, true))
++		return;
++
++	if (on) {
++		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++			  POWER_TARGET_ON);
++		do {
++			pp_status = REG_READ(PP_STATUS);
++		} while ((pp_status & PP_ON) == 0);
++
++		cdv_intel_lvds_set_backlight(dev,
++				dev_priv->mode_dev.backlight_duty_cycle);
++	} else {
++		cdv_intel_lvds_set_backlight(dev, 0);
++
++		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++			  ~POWER_TARGET_ON);
++		do {
++			pp_status = REG_READ(PP_STATUS);
++		} while (pp_status & PP_ON);
++	}
++	gma_power_end(dev);
++}
++
++static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	if (mode == DRM_MODE_DPMS_ON)
++		cdv_intel_lvds_set_power(dev, encoder, true);
++	else
++		cdv_intel_lvds_set_power(dev, encoder, false);
++	/* XXX: We never power down the LVDS pairs. */
++}
++
++static void cdv_intel_lvds_save(struct drm_connector *connector)
++{
++}
++
++static void cdv_intel_lvds_restore(struct drm_connector *connector)
++{
++}
++
++static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
++			      struct drm_display_mode *mode)
++{
++	struct drm_device *dev = connector->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct drm_display_mode *fixed_mode =
++					dev_priv->mode_dev.panel_fixed_mode;
++
++	/* just in case */
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		return MODE_NO_DBLESCAN;
++
++	/* just in case */
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++		return MODE_NO_INTERLACE;
++
++	if (fixed_mode) {
++		if (mode->hdisplay > fixed_mode->hdisplay)
++			return MODE_PANEL;
++		if (mode->vdisplay > fixed_mode->vdisplay)
++			return MODE_PANEL;
++	}
++	return MODE_OK;
++}
++
++static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
++				  struct drm_display_mode *mode,
++				  struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++	struct drm_encoder *tmp_encoder;
++	struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
++
++	/* Should never happen!! */
++	list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
++			    head) {
++		if (tmp_encoder != encoder
++		    && tmp_encoder->crtc == encoder->crtc) {
++			printk(KERN_ERR "Can't enable LVDS and another "
++			       "encoder on the same pipe\n");
++			return false;
++		}
++	}
++
++	/*
++	 * If we have timings from the BIOS for the panel, put them in
++	 * to the adjusted mode.  The CRTC will be set up for this mode,
++	 * with the panel scaling set up to source from the H/VDisplay
++	 * of the original mode.
++	 */
++	if (panel_fixed_mode != NULL) {
++		adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
++		adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
++		adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
++		adjusted_mode->htotal = panel_fixed_mode->htotal;
++		adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
++		adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
++		adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
++		adjusted_mode->vtotal = panel_fixed_mode->vtotal;
++		adjusted_mode->clock = panel_fixed_mode->clock;
++		drm_mode_set_crtcinfo(adjusted_mode,
++				      CRTC_INTERLACE_HALVE_V);
++	}
++
++	/*
++	 * XXX: It would be nice to support lower refresh rates on the
++	 * panels to reduce power consumption, and perhaps match the
++	 * user's requested refresh rate.
++	 */
++
++	return true;
++}
++
++static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++
++	if (!gma_power_begin(dev, true))
++		return;
++
++	mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++	mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++					  BACKLIGHT_DUTY_CYCLE_MASK);
++
++	cdv_intel_lvds_set_power(dev, encoder, false);
++
++	gma_power_end(dev);
++}
++
++static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++
++	if (mode_dev->backlight_duty_cycle == 0)
++		mode_dev->backlight_duty_cycle =
++		    cdv_intel_lvds_get_max_backlight(dev);
++
++	cdv_intel_lvds_set_power(dev, encoder, true);
++}
++
++static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 pfit_control;
++
++	/*
++	 * The LVDS pin pair will already have been turned on in the
++	 * cdv_intel_crtc_mode_set since it has a large impact on the DPLL
++	 * settings.
++	 */
++
++	/*
++	 * Enable automatic panel scaling so that non-native modes fill the
++	 * screen.  Should be enabled before the pipe is enabled, according to
++	 * register description and PRM.
++	 */
++	if (mode->hdisplay != adjusted_mode->hdisplay ||
++	    mode->vdisplay != adjusted_mode->vdisplay)
++		pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
++				HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
++				HORIZ_INTERP_BILINEAR);
++	else
++		pfit_control = 0;
++
++	if (dev_priv->lvds_dither)
++		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
++
++	REG_WRITE(PFIT_CONTROL, pfit_control);
++}
++
++/**
++ * Detect the LVDS connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the LVDS was actually connected anyway.
++ */
++static enum drm_connector_status cdv_intel_lvds_detect(
++				struct drm_connector *connector, bool force)
++{
++	return connector_status_connected;
++}
++
++/**
++ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
++ */
++static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++	int ret;
++
++	ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
++
++	if (ret)
++		return ret;
++
++	/* Didn't get an EDID, so
++	 * Set wide sync ranges so we get all modes
++	 * handed to valid_mode for checking
++	 */
++	connector->display_info.min_vfreq = 0;
++	connector->display_info.max_vfreq = 200;
++	connector->display_info.min_hfreq = 0;
++	connector->display_info.max_hfreq = 200;
++	if (mode_dev->panel_fixed_mode != NULL) {
++		struct drm_display_mode *mode =
++		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++		drm_mode_probed_add(connector, mode);
++		return 1;
++	}
++
++	return 0;
++}
++
++/**
++ * cdv_intel_lvds_destroy - unregister and free LVDS structures
++ * @connector: connector to free
++ *
++ * Unregister the DDC bus for this connector then free the driver private
++ * structure.
++ */
++static void cdv_intel_lvds_destroy(struct drm_connector *connector)
++{
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++
++	if (psb_intel_encoder->i2c_bus)
++		psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
++	drm_sysfs_connector_remove(connector);
++	drm_connector_cleanup(connector);
++	kfree(connector);
++}
++
++static int cdv_intel_lvds_set_property(struct drm_connector *connector,
++				       struct drm_property *property,
++				       uint64_t value)
++{
++	struct drm_encoder *encoder = connector->encoder;
++
++	if (!strcmp(property->name, "scaling mode") && encoder) {
++		struct psb_intel_crtc *crtc =
++					to_psb_intel_crtc(encoder->crtc);
++		uint64_t curValue;
++
++		if (!crtc)
++			return -1;
++
++		switch (value) {
++		case DRM_MODE_SCALE_FULLSCREEN:
++			break;
++		case DRM_MODE_SCALE_NO_SCALE:
++			break;
++		case DRM_MODE_SCALE_ASPECT:
++			break;
++		default:
++			return -1;
++		}
++
++		if (drm_connector_property_get_value(connector,
++						     property,
++						     &curValue))
++			return -1;
++
++		if (curValue == value)
++			return 0;
++
++		if (drm_connector_property_set_value(connector,
++							property,
++							value))
++			return -1;
++
++		if (crtc->saved_mode.hdisplay != 0 &&
++		    crtc->saved_mode.vdisplay != 0) {
++			if (!drm_crtc_helper_set_mode(encoder->crtc,
++						      &crtc->saved_mode,
++						      encoder->crtc->x,
++						      encoder->crtc->y,
++						      encoder->crtc->fb))
++				return -1;
++		}
++	} else if (!strcmp(property->name, "backlight") && encoder) {
++		if (drm_connector_property_set_value(connector,
++							property,
++							value))
++			return -1;
++		else {
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++			struct drm_psb_private *dev_priv =
++						encoder->dev->dev_private;
++			struct backlight_device *bd =
++						dev_priv->backlight_device;
++			bd->props.brightness = value;
++			backlight_update_status(bd);
++#endif
++		}
++	} else if (!strcmp(property->name, "DPMS") && encoder) {
++		struct drm_encoder_helper_funcs *helpers =
++					encoder->helper_private;
++		helpers->dpms(encoder, value);
++	}
++	return 0;
++}
++
++static const struct drm_encoder_helper_funcs
++					cdv_intel_lvds_helper_funcs = {
++	.dpms = cdv_intel_lvds_encoder_dpms,
++	.mode_fixup = cdv_intel_lvds_mode_fixup,
++	.prepare = cdv_intel_lvds_prepare,
++	.mode_set = cdv_intel_lvds_mode_set,
++	.commit = cdv_intel_lvds_commit,
++};
++
++static const struct drm_connector_helper_funcs
++				cdv_intel_lvds_connector_helper_funcs = {
++	.get_modes = cdv_intel_lvds_get_modes,
++	.mode_valid = cdv_intel_lvds_mode_valid,
++	.best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
++	.dpms = drm_helper_connector_dpms,
++	.save = cdv_intel_lvds_save,
++	.restore = cdv_intel_lvds_restore,
++	.detect = cdv_intel_lvds_detect,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.set_property = cdv_intel_lvds_set_property,
++	.destroy = cdv_intel_lvds_destroy,
++};
++
++
++static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
++{
++	drm_encoder_cleanup(encoder);
++}
++
++const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
++	.destroy = cdv_intel_lvds_enc_destroy,
++};
++
++/**
++ * cdv_intel_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void cdv_intel_lvds_init(struct drm_device *dev,
++		     struct psb_intel_mode_device *mode_dev)
++{
++	struct psb_intel_encoder *psb_intel_encoder;
++	struct psb_intel_connector *psb_intel_connector;
++	struct cdv_intel_lvds_priv *lvds_priv;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++	struct drm_display_mode *scan;
++	struct drm_crtc *crtc;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 lvds;
++	int pipe;
++
++	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
++				    GFP_KERNEL);
++	if (!psb_intel_encoder)
++		return;
++
++	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
++				      GFP_KERNEL);
++	if (!psb_intel_connector)
++		goto failed_connector;
++
++	lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
++	if (!lvds_priv)
++		goto failed_lvds_priv;
++
++	psb_intel_encoder->dev_priv = lvds_priv;
++
++	connector = &psb_intel_connector->base;
++	encoder = &psb_intel_encoder->base;
++
++
++	drm_connector_init(dev, connector,
++			   &cdv_intel_lvds_connector_funcs,
++			   DRM_MODE_CONNECTOR_LVDS);
++
++	drm_encoder_init(dev, encoder,
++			 &cdv_intel_lvds_enc_funcs,
++			 DRM_MODE_ENCODER_LVDS);
++
++
++	psb_intel_connector_attach_encoder(psb_intel_connector,
++					   psb_intel_encoder);
++	psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
++
++	drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
++	drm_connector_helper_add(connector,
++				 &cdv_intel_lvds_connector_helper_funcs);
++	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++	connector->interlace_allowed = false;
++	connector->doublescan_allowed = false;
++
++	/*Attach connector properties*/
++	drm_connector_attach_property(connector,
++				      dev->mode_config.scaling_mode_property,
++				      DRM_MODE_SCALE_FULLSCREEN);
++	drm_connector_attach_property(connector,
++				      dev_priv->backlight_property,
++				      BRIGHTNESS_MAX_LEVEL);
++
++	/**
++	 * Set up I2C bus
++	 * FIXME: distroy i2c_bus when exit
++	 */
++	psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
++							 GPIOB,
++							 "LVDSBLC_B");
++	if (!psb_intel_encoder->i2c_bus) {
++		dev_printk(KERN_ERR,
++			&dev->pdev->dev, "I2C bus registration failed.\n");
++		goto failed_blc_i2c;
++	}
++	psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
++	dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
++
++	/*
++	 * LVDS discovery:
++	 * 1) check for EDID on DDC
++	 * 2) check for VBT data
++	 * 3) check to see if LVDS is already on
++	 *    if none of the above, no panel
++	 * 4) make sure lid is open
++	 *    if closed, act like it's not there for now
++	 */
++
++	/* Set up the DDC bus. */
++	psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
++							 GPIOC,
++							 "LVDSDDC_C");
++	if (!psb_intel_encoder->ddc_bus) {
++		dev_printk(KERN_ERR, &dev->pdev->dev,
++			   "DDC bus registration " "failed.\n");
++		goto failed_ddc;
++	}
++
++	/*
++	 * Attempt to get the fixed panel mode from DDC.  Assume that the
++	 * preferred mode is the right one.
++	 */
++	psb_intel_ddc_get_modes(connector,
++				&psb_intel_encoder->ddc_bus->adapter);
++	list_for_each_entry(scan, &connector->probed_modes, head) {
++		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++			mode_dev->panel_fixed_mode =
++			    drm_mode_duplicate(dev, scan);
++			goto out;	/* FIXME: check for quirks */
++		}
++	}
++
++	/* Failed to get EDID, what about VBT? do we need this?*/
++	if (dev_priv->lfp_lvds_vbt_mode) {
++		mode_dev->panel_fixed_mode =
++			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
++		if (mode_dev->panel_fixed_mode) {
++			mode_dev->panel_fixed_mode->type |=
++				DRM_MODE_TYPE_PREFERRED;
++			goto out;	/* FIXME: check for quirks */
++		}
++	}
++	/*
++	 * If we didn't get EDID, try checking if the panel is already turned
++	 * on.	If so, assume that whatever is currently programmed is the
++	 * correct mode.
++	 */
++	lvds = REG_READ(LVDS);
++	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
++	crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
++
++	if (crtc && (lvds & LVDS_PORT_EN)) {
++		mode_dev->panel_fixed_mode =
++		    cdv_intel_crtc_mode_get(dev, crtc);
++		if (mode_dev->panel_fixed_mode) {
++			mode_dev->panel_fixed_mode->type |=
++			    DRM_MODE_TYPE_PREFERRED;
++			goto out;	/* FIXME: check for quirks */
++		}
++	}
++
++	/* If we still don't have a mode after all that, give up. */
++	if (!mode_dev->panel_fixed_mode) {
++		DRM_DEBUG
++			("Found no modes on the lvds, ignoring the LVDS\n");
++		goto failed_find;
++	}
++
++out:
++	drm_sysfs_connector_add(connector);
++	return;
++
++failed_find:
++	printk(KERN_ERR "Failed find\n");
++	if (psb_intel_encoder->ddc_bus)
++		psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
++failed_ddc:
++	printk(KERN_ERR "Failed DDC\n");
++	if (psb_intel_encoder->i2c_bus)
++		psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
++failed_blc_i2c:
++	printk(KERN_ERR "Failed BLC\n");
++	drm_encoder_cleanup(encoder);
++	drm_connector_cleanup(connector);
++	kfree(lvds_priv);
++failed_lvds_priv:
++	kfree(psb_intel_connector);
++failed_connector:
++	kfree(psb_intel_encoder);
++}
+diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
+new file mode 100644
+index 0000000..8ea202f
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/framebuffer.c
+@@ -0,0 +1,800 @@
++/**************************************************************************
++ * Copyright (c) 2007-2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/tty.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/console.h>
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_fb_helper.h>
++
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_drv.h"
++#include "framebuffer.h"
++#include "gtt.h"
++
++static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
++static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++					      struct drm_file *file_priv,
++					      unsigned int *handle);
++
++static const struct drm_framebuffer_funcs psb_fb_funcs = {
++	.destroy = psb_user_framebuffer_destroy,
++	.create_handle = psb_user_framebuffer_create_handle,
++};
++
++#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
++
++static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
++			   unsigned blue, unsigned transp,
++			   struct fb_info *info)
++{
++	struct psb_fbdev *fbdev = info->par;
++	struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
++	uint32_t v;
++
++	if (!fb)
++		return -ENOMEM;
++
++	if (regno > 255)
++		return 1;
++
++	red = CMAP_TOHW(red, info->var.red.length);
++	blue = CMAP_TOHW(blue, info->var.blue.length);
++	green = CMAP_TOHW(green, info->var.green.length);
++	transp = CMAP_TOHW(transp, info->var.transp.length);
++
++	v = (red << info->var.red.offset) |
++	    (green << info->var.green.offset) |
++	    (blue << info->var.blue.offset) |
++	    (transp << info->var.transp.offset);
++
++	if (regno < 16) {
++		switch (fb->bits_per_pixel) {
++		case 16:
++			((uint32_t *) info->pseudo_palette)[regno] = v;
++			break;
++		case 24:
++		case 32:
++			((uint32_t *) info->pseudo_palette)[regno] = v;
++			break;
++		}
++	}
++
++	return 0;
++}
++
++static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++	struct psb_fbdev *fbdev = info->par;
++	struct psb_framebuffer *psbfb = &fbdev->pfb;
++	struct drm_device *dev = psbfb->base.dev;
++
++	/*
++	 *	We have to poke our nose in here. The core fb code assumes
++	 *	panning is part of the hardware that can be invoked before
++	 *	the actual fb is mapped. In our case that isn't quite true.
++	 */
++	if (psbfb->gtt->npage) {
++		/* GTT roll shifts in 4K pages, we need to shift the right
++		   number of pages */
++		int pages = info->fix.line_length >> 12;
++		psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
++	}
++        return 0;
++}
++
++static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++	struct psb_framebuffer *psbfb = vma->vm_private_data;
++	struct drm_device *dev = psbfb->base.dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int page_num;
++	int i;
++	unsigned long address;
++	int ret;
++	unsigned long pfn;
++	/* FIXME: assumes fb at stolen base which may not be true */
++	unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
++
++	page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++	address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
++
++	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++	for (i = 0; i < page_num; i++) {
++		pfn = (phys_addr >> PAGE_SHIFT);
++
++		ret = vm_insert_mixed(vma, address, pfn);
++		if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
++			break;
++		else if (unlikely(ret != 0)) {
++			ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
++			return ret;
++		}
++		address += PAGE_SIZE;
++		phys_addr += PAGE_SIZE;
++	}
++	return VM_FAULT_NOPAGE;
++}
++
++static void psbfb_vm_open(struct vm_area_struct *vma)
++{
++}
++
++static void psbfb_vm_close(struct vm_area_struct *vma)
++{
++}
++
++static struct vm_operations_struct psbfb_vm_ops = {
++	.fault	= psbfb_vm_fault,
++	.open	= psbfb_vm_open,
++	.close	= psbfb_vm_close
++};
++
++static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
++{
++	struct psb_fbdev *fbdev = info->par;
++	struct psb_framebuffer *psbfb = &fbdev->pfb;
++
++	if (vma->vm_pgoff != 0)
++		return -EINVAL;
++	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
++		return -EINVAL;
++
++	if (!psbfb->addr_space)
++		psbfb->addr_space = vma->vm_file->f_mapping;
++	/*
++	 * If this is a GEM object then info->screen_base is the virtual
++	 * kernel remapping of the object. FIXME: Review if this is
++	 * suitable for our mmap work
++	 */
++	vma->vm_ops = &psbfb_vm_ops;
++	vma->vm_private_data = (void *)psbfb;
++	vma->vm_flags |= VM_RESERVED | VM_IO |
++					VM_MIXEDMAP | VM_DONTEXPAND;
++	return 0;
++}
++
++static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
++						unsigned long arg)
++{
++	return -ENOTTY;
++}
++
++static struct fb_ops psbfb_ops = {
++	.owner = THIS_MODULE,
++	.fb_check_var = drm_fb_helper_check_var,
++	.fb_set_par = drm_fb_helper_set_par,
++	.fb_blank = drm_fb_helper_blank,
++	.fb_setcolreg = psbfb_setcolreg,
++	.fb_fillrect = cfb_fillrect,
++	.fb_copyarea = psbfb_copyarea,
++	.fb_imageblit = cfb_imageblit,
++	.fb_mmap = psbfb_mmap,
++	.fb_sync = psbfb_sync,
++	.fb_ioctl = psbfb_ioctl,
++};
++
++static struct fb_ops psbfb_roll_ops = {
++	.owner = THIS_MODULE,
++	.fb_check_var = drm_fb_helper_check_var,
++	.fb_set_par = drm_fb_helper_set_par,
++	.fb_blank = drm_fb_helper_blank,
++	.fb_setcolreg = psbfb_setcolreg,
++	.fb_fillrect = cfb_fillrect,
++	.fb_copyarea = cfb_copyarea,
++	.fb_imageblit = cfb_imageblit,
++	.fb_pan_display = psbfb_pan,
++	.fb_mmap = psbfb_mmap,
++	.fb_ioctl = psbfb_ioctl,
++};
++
++static struct fb_ops psbfb_unaccel_ops = {
++	.owner = THIS_MODULE,
++	.fb_check_var = drm_fb_helper_check_var,
++	.fb_set_par = drm_fb_helper_set_par,
++	.fb_blank = drm_fb_helper_blank,
++	.fb_setcolreg = psbfb_setcolreg,
++	.fb_fillrect = cfb_fillrect,
++	.fb_copyarea = cfb_copyarea,
++	.fb_imageblit = cfb_imageblit,
++	.fb_mmap = psbfb_mmap,
++	.fb_ioctl = psbfb_ioctl,
++};
++
++/**
++ *	psb_framebuffer_init	-	initialize a framebuffer
++ *	@dev: our DRM device
++ *	@fb: framebuffer to set up
++ *	@mode_cmd: mode description
++ *	@gt: backing object
++ *
++ *	Configure and fill in the boilerplate for our frame buffer. Return
++ *	0 on success or an error code if we fail.
++ */
++static int psb_framebuffer_init(struct drm_device *dev,
++					struct psb_framebuffer *fb,
++					struct drm_mode_fb_cmd2 *mode_cmd,
++					struct gtt_range *gt)
++{
++	u32 bpp, depth;
++	int ret;
++
++	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
++
++	if (mode_cmd->pitches[0] & 63)
++		return -EINVAL;
++	switch (bpp) {
++	case 8:
++	case 16:
++	case 24:
++	case 32:
++		break;
++	default:
++		return -EINVAL;
++	}
++	ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
++	if (ret) {
++		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
++		return ret;
++	}
++	drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
++	fb->gtt = gt;
++	return 0;
++}
++
++/**
++ *	psb_framebuffer_create	-	create a framebuffer backed by gt
++ *	@dev: our DRM device
++ *	@mode_cmd: the description of the requested mode
++ *	@gt: the backing object
++ *
++ *	Create a framebuffer object backed by the gt, and fill in the
++ *	boilerplate required
++ *
++ *	TODO: review object references
++ */
++
++static struct drm_framebuffer *psb_framebuffer_create
++			(struct drm_device *dev,
++			 struct drm_mode_fb_cmd2 *mode_cmd,
++			 struct gtt_range *gt)
++{
++	struct psb_framebuffer *fb;
++	int ret;
++
++	fb = kzalloc(sizeof(*fb), GFP_KERNEL);
++	if (!fb)
++		return ERR_PTR(-ENOMEM);
++
++	ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
++	if (ret) {
++		kfree(fb);
++		return ERR_PTR(ret);
++	}
++	return &fb->base;
++}
++
++/**
++ *	psbfb_alloc		-	allocate frame buffer memory
++ *	@dev: the DRM device
++ *	@aligned_size: space needed
++ *	@force: fall back to GEM buffers if need be
++ *
++ *	Allocate the frame buffer. In the usual case we get a GTT range that
++ *	is stolen memory backed and life is simple. If there isn't sufficient
++ *	we fail as we don't have the virtual mapping space to really vmap it
++ *	and the kernel console code can't handle non linear framebuffers.
++ *
++ *	Re-address this as and if the framebuffer layer grows this ability.
++ */
++static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
++{
++	struct gtt_range *backing;
++	/* Begin by trying to use stolen memory backing */
++	backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
++	if (backing) {
++		if (drm_gem_private_object_init(dev,
++					&backing->gem, aligned_size) == 0)
++			return backing;
++		psb_gtt_free_range(dev, backing);
++	}
++	return NULL;
++}
++
++/**
++ *	psbfb_create		-	create a framebuffer
++ *	@fbdev: the framebuffer device
++ *	@sizes: specification of the layout
++ *
++ *	Create a framebuffer to the specifications provided
++ */
++static int psbfb_create(struct psb_fbdev *fbdev,
++				struct drm_fb_helper_surface_size *sizes)
++{
++	struct drm_device *dev = fbdev->psb_fb_helper.dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct fb_info *info;
++	struct drm_framebuffer *fb;
++	struct psb_framebuffer *psbfb = &fbdev->pfb;
++	struct drm_mode_fb_cmd2 mode_cmd;
++	struct device *device = &dev->pdev->dev;
++	int size;
++	int ret;
++	struct gtt_range *backing;
++	u32 bpp, depth;
++	int gtt_roll = 0;
++	int pitch_lines = 0;
++
++	mode_cmd.width = sizes->surface_width;
++	mode_cmd.height = sizes->surface_height;
++	bpp = sizes->surface_bpp;
++	depth = sizes->surface_depth;
++
++	/* No 24bit packed */
++	if (bpp == 24)
++		bpp = 32;
++
++	do {
++		/*
++		 * Acceleration via the GTT requires pitch to be
++		 * power of two aligned. Preferably page but less
++		 * is ok with some fonts
++		 */
++        	mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
++
++        	size = mode_cmd.pitches[0] * mode_cmd.height;
++        	size = ALIGN(size, PAGE_SIZE);
++
++		/* Allocate the fb in the GTT with stolen page backing */
++		backing = psbfb_alloc(dev, size);
++
++		if (pitch_lines)
++			pitch_lines *= 2;
++		else
++			pitch_lines = 1;
++		gtt_roll++;
++	} while (backing == NULL && pitch_lines <= 16);
++
++	/* The final pitch we accepted if we succeeded */
++	pitch_lines /= 2;
++
++	if (backing == NULL) {
++		/*
++		 *	We couldn't get the space we wanted, fall back to the
++		 *	display engine requirement instead.  The HW requires
++		 *	the pitch to be 64 byte aligned
++		 */
++
++		gtt_roll = 0;	/* Don't use GTT accelerated scrolling */
++		pitch_lines = 64;
++
++		mode_cmd.pitches[0] =  ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
++
++		size = mode_cmd.pitches[0] * mode_cmd.height;
++		size = ALIGN(size, PAGE_SIZE);
++
++		/* Allocate the framebuffer in the GTT with stolen page backing */
++		backing = psbfb_alloc(dev, size);
++		if (backing == NULL)
++			return -ENOMEM;
++	}
++
++	mutex_lock(&dev->struct_mutex);
++
++	info = framebuffer_alloc(0, device);
++	if (!info) {
++		ret = -ENOMEM;
++		goto out_err1;
++	}
++	info->par = fbdev;
++
++	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
++
++	ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
++	if (ret)
++		goto out_unref;
++
++	fb = &psbfb->base;
++	psbfb->fbdev = info;
++
++	fbdev->psb_fb_helper.fb = fb;
++	fbdev->psb_fb_helper.fbdev = info;
++
++	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
++	strcpy(info->fix.id, "psbfb");
++
++	info->flags = FBINFO_DEFAULT;
++	if (dev_priv->ops->accel_2d && pitch_lines > 8)	/* 2D engine */
++		info->fbops = &psbfb_ops;
++	else if (gtt_roll) {	/* GTT rolling seems best */
++		info->fbops = &psbfb_roll_ops;
++		info->flags |= FBINFO_HWACCEL_YPAN;
++	} else	/* Software */
++		info->fbops = &psbfb_unaccel_ops;
++
++	ret = fb_alloc_cmap(&info->cmap, 256, 0);
++	if (ret) {
++		ret = -ENOMEM;
++		goto out_unref;
++	}
++
++	info->fix.smem_start = dev->mode_config.fb_base;
++	info->fix.smem_len = size;
++	info->fix.ywrapstep = gtt_roll;
++	info->fix.ypanstep = 0;
++
++	/* Accessed stolen memory directly */
++	info->screen_base = (char *)dev_priv->vram_addr +
++							backing->offset;
++	info->screen_size = size;
++
++	if (dev_priv->gtt.stolen_size) {
++		info->apertures = alloc_apertures(1);
++		if (!info->apertures) {
++			ret = -ENOMEM;
++			goto out_unref;
++		}
++		info->apertures->ranges[0].base = dev->mode_config.fb_base;
++		info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
++	}
++
++	drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
++				sizes->fb_width, sizes->fb_height);
++
++	info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
++	info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
++
++	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
++
++	dev_info(dev->dev, "allocated %dx%d fb\n",
++					psbfb->base.width, psbfb->base.height);
++
++	mutex_unlock(&dev->struct_mutex);
++	return 0;
++out_unref:
++	if (backing->stolen)
++		psb_gtt_free_range(dev, backing);
++	else
++		drm_gem_object_unreference(&backing->gem);
++out_err1:
++	mutex_unlock(&dev->struct_mutex);
++	psb_gtt_free_range(dev, backing);
++	return ret;
++}
++
++/**
++ *	psb_user_framebuffer_create	-	create framebuffer
++ *	@dev: our DRM device
++ *	@filp: client file
++ *	@cmd: mode request
++ *
++ *	Create a new framebuffer backed by a userspace GEM object
++ */
++static struct drm_framebuffer *psb_user_framebuffer_create
++			(struct drm_device *dev, struct drm_file *filp,
++			 struct drm_mode_fb_cmd2 *cmd)
++{
++	struct gtt_range *r;
++	struct drm_gem_object *obj;
++
++	/*
++	 *	Find the GEM object and thus the gtt range object that is
++	 *	to back this space
++	 */
++	obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
++	if (obj == NULL)
++		return ERR_PTR(-ENOENT);
++
++	/* Let the core code do all the work */
++	r = container_of(obj, struct gtt_range, gem);
++	return psb_framebuffer_create(dev, cmd, r);
++}
++
++static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
++							u16 blue, int regno)
++{
++	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
++
++	intel_crtc->lut_r[regno] = red >> 8;
++	intel_crtc->lut_g[regno] = green >> 8;
++	intel_crtc->lut_b[regno] = blue >> 8;
++}
++
++static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
++					u16 *green, u16 *blue, int regno)
++{
++	struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
++
++	*red = intel_crtc->lut_r[regno] << 8;
++	*green = intel_crtc->lut_g[regno] << 8;
++	*blue = intel_crtc->lut_b[regno] << 8;
++}
++
++static int psbfb_probe(struct drm_fb_helper *helper,
++				struct drm_fb_helper_surface_size *sizes)
++{
++	struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
++	int new_fb = 0;
++	int ret;
++
++	if (!helper->fb) {
++		ret = psbfb_create(psb_fbdev, sizes);
++		if (ret)
++			return ret;
++		new_fb = 1;
++	}
++	return new_fb;
++}
++
++struct drm_fb_helper_funcs psb_fb_helper_funcs = {
++	.gamma_set = psbfb_gamma_set,
++	.gamma_get = psbfb_gamma_get,
++	.fb_probe = psbfb_probe,
++};
++
++static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
++{
++	struct fb_info *info;
++	struct psb_framebuffer *psbfb = &fbdev->pfb;
++
++	if (fbdev->psb_fb_helper.fbdev) {
++		info = fbdev->psb_fb_helper.fbdev;
++		unregister_framebuffer(info);
++		if (info->cmap.len)
++			fb_dealloc_cmap(&info->cmap);
++		framebuffer_release(info);
++	}
++	drm_fb_helper_fini(&fbdev->psb_fb_helper);
++	drm_framebuffer_cleanup(&psbfb->base);
++
++	if (psbfb->gtt)
++		drm_gem_object_unreference(&psbfb->gtt->gem);
++	return 0;
++}
++
++int psb_fbdev_init(struct drm_device *dev)
++{
++	struct psb_fbdev *fbdev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
++	if (!fbdev) {
++		dev_err(dev->dev, "no memory\n");
++		return -ENOMEM;
++	}
++
++	dev_priv->fbdev = fbdev;
++	fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
++
++	drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
++							INTELFB_CONN_LIMIT);
++
++	drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
++	drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
++	return 0;
++}
++
++static void psb_fbdev_fini(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (!dev_priv->fbdev)
++		return;
++
++	psb_fbdev_destroy(dev, dev_priv->fbdev);
++	kfree(dev_priv->fbdev);
++	dev_priv->fbdev = NULL;
++}
++
++static void psbfb_output_poll_changed(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
++	drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
++}
++
++/**
++ *	psb_user_framebuffer_create_handle - add hamdle to a framebuffer
++ *	@fb: framebuffer
++ *	@file_priv: our DRM file
++ *	@handle: returned handle
++ *
++ *	Our framebuffer object is a GTT range which also contains a GEM
++ *	object. We need to turn it into a handle for userspace. GEM will do
++ *	the work for us
++ */
++static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
++					      struct drm_file *file_priv,
++					      unsigned int *handle)
++{
++	struct psb_framebuffer *psbfb = to_psb_fb(fb);
++	struct gtt_range *r = psbfb->gtt;
++	return drm_gem_handle_create(file_priv, &r->gem, handle);
++}
++
++/**
++ *	psb_user_framebuffer_destroy	-	destruct user created fb
++ *	@fb: framebuffer
++ *
++ *	User framebuffers are backed by GEM objects so all we have to do is
++ *	clean up a bit and drop the reference, GEM will handle the fallout
++ */
++static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++	struct psb_framebuffer *psbfb = to_psb_fb(fb);
++	struct gtt_range *r = psbfb->gtt;
++	struct drm_device *dev = fb->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_fbdev *fbdev = dev_priv->fbdev;
++	struct drm_crtc *crtc;
++	int reset = 0;
++
++	/* Should never get stolen memory for a user fb */
++	WARN_ON(r->stolen);
++
++	/* Check if we are erroneously live */
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++		if (crtc->fb == fb)
++			reset = 1;
++
++	if (reset)
++		/*
++		 * Now force a sane response before we permit the DRM CRTC
++		 * layer to do stupid things like blank the display. Instead
++		 * we reset this framebuffer as if the user had forced a reset.
++		 * We must do this before the cleanup so that the DRM layer
++		 * doesn't get a chance to stick its oar in where it isn't
++		 * wanted.
++		 */
++		drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
++
++	/* Let DRM do its clean up */
++	drm_framebuffer_cleanup(fb);
++	/*  We are no longer using the resource in GEM */
++	drm_gem_object_unreference_unlocked(&r->gem);
++	kfree(fb);
++}
++
++static const struct drm_mode_config_funcs psb_mode_funcs = {
++	.fb_create = psb_user_framebuffer_create,
++	.output_poll_changed = psbfb_output_poll_changed,
++};
++
++static int psb_create_backlight_property(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct drm_property *backlight;
++
++	if (dev_priv->backlight_property)
++		return 0;
++
++	backlight = drm_property_create_range(dev, 0, "backlight", 0, 100);
++
++	dev_priv->backlight_property = backlight;
++
++	return 0;
++}
++
++static void psb_setup_outputs(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct drm_connector *connector;
++
++	drm_mode_create_scaling_mode_property(dev);
++	psb_create_backlight_property(dev);
++
++	dev_priv->ops->output_init(dev);
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list,
++			    head) {
++		struct psb_intel_encoder *psb_intel_encoder =
++			psb_intel_attached_encoder(connector);
++		struct drm_encoder *encoder = &psb_intel_encoder->base;
++		int crtc_mask = 0, clone_mask = 0;
++
++		/* valid crtcs */
++		switch (psb_intel_encoder->type) {
++		case INTEL_OUTPUT_ANALOG:
++			crtc_mask = (1 << 0);
++			clone_mask = (1 << INTEL_OUTPUT_ANALOG);
++			break;
++		case INTEL_OUTPUT_SDVO:
++			crtc_mask = ((1 << 0) | (1 << 1));
++			clone_mask = (1 << INTEL_OUTPUT_SDVO);
++			break;
++		case INTEL_OUTPUT_LVDS:
++			if (IS_MRST(dev))
++				crtc_mask = (1 << 0);
++			else
++				crtc_mask = (1 << 1);
++			clone_mask = (1 << INTEL_OUTPUT_LVDS);
++			break;
++		case INTEL_OUTPUT_MIPI:
++			crtc_mask = (1 << 0);
++			clone_mask = (1 << INTEL_OUTPUT_MIPI);
++			break;
++		case INTEL_OUTPUT_MIPI2:
++			crtc_mask = (1 << 2);
++			clone_mask = (1 << INTEL_OUTPUT_MIPI2);
++			break;
++		case INTEL_OUTPUT_HDMI:
++			if (IS_MFLD(dev))
++				crtc_mask = (1 << 1);
++			else	
++				crtc_mask = (1 << 0);
++			clone_mask = (1 << INTEL_OUTPUT_HDMI);
++			break;
++		}
++		encoder->possible_crtcs = crtc_mask;
++		encoder->possible_clones =
++		    psb_intel_connector_clones(dev, clone_mask);
++	}
++}
++
++void psb_modeset_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++	int i;
++
++	drm_mode_config_init(dev);
++
++	dev->mode_config.min_width = 0;
++	dev->mode_config.min_height = 0;
++
++	dev->mode_config.funcs = (void *) &psb_mode_funcs;
++
++	/* set memory base */
++	/* Oaktrail and Poulsbo should use BAR 2*/
++	pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
++					&(dev->mode_config.fb_base));
++
++	/* num pipes is 2 for PSB but 1 for Mrst */
++	for (i = 0; i < dev_priv->num_pipe; i++)
++		psb_intel_crtc_init(dev, i, mode_dev);
++
++	dev->mode_config.max_width = 2048;
++	dev->mode_config.max_height = 2048;
++
++	psb_setup_outputs(dev);
++}
++
++void psb_modeset_cleanup(struct drm_device *dev)
++{
++	mutex_lock(&dev->struct_mutex);
++
++	drm_kms_helper_poll_fini(dev);
++	psb_fbdev_fini(dev);
++	drm_mode_config_cleanup(dev);
++
++	mutex_unlock(&dev->struct_mutex);
++}
+diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
+new file mode 100644
+index 0000000..989558a
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/framebuffer.h
+@@ -0,0 +1,47 @@
++/*
++ * Copyright (c) 2008-2011, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *      Eric Anholt <eric at anholt.net>
++ *
++ */
++
++#ifndef _FRAMEBUFFER_H_
++#define _FRAMEBUFFER_H_
++
++#include <drm/drmP.h>
++#include <drm/drm_fb_helper.h>
++
++#include "psb_drv.h"
++
++struct psb_framebuffer {
++	struct drm_framebuffer base;
++	struct address_space *addr_space;
++	struct fb_info *fbdev;
++	struct gtt_range *gtt;
++};
++
++struct psb_fbdev {
++	struct drm_fb_helper psb_fb_helper;
++	struct psb_framebuffer pfb;
++};
++
++#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
++
++extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
++
++#endif
++
+diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
+new file mode 100644
+index 0000000..9fbb868
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/gem.c
+@@ -0,0 +1,292 @@
++/*
++ *  psb GEM interface
++ *
++ * Copyright (c) 2011, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Alan Cox
++ *
++ * TODO:
++ *	-	we need to work out if the MMU is relevant (eg for
++ *		accelerated operations on a GEM object)
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "gma_drm.h"
++#include "psb_drv.h"
++
++int psb_gem_init_object(struct drm_gem_object *obj)
++{
++	return -EINVAL;
++}
++
++void psb_gem_free_object(struct drm_gem_object *obj)
++{
++	struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
++	drm_gem_object_release_wrap(obj);
++	/* This must occur last as it frees up the memory of the GEM object */
++	psb_gtt_free_range(obj->dev, gtt);
++}
++
++int psb_gem_get_aperture(struct drm_device *dev, void *data,
++				struct drm_file *file)
++{
++	return -EINVAL;
++}
++
++/**
++ *	psb_gem_dumb_map_gtt	-	buffer mapping for dumb interface
++ *	@file: our drm client file
++ *	@dev: drm device
++ *	@handle: GEM handle to the object (from dumb_create)
++ *
++ *	Do the necessary setup to allow the mapping of the frame buffer
++ *	into user memory. We don't have to do much here at the moment.
++ */
++int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
++			 uint32_t handle, uint64_t *offset)
++{
++	int ret = 0;
++	struct drm_gem_object *obj;
++
++	if (!(dev->driver->driver_features & DRIVER_GEM))
++		return -ENODEV;
++
++	mutex_lock(&dev->struct_mutex);
++
++	/* GEM does all our handle to object mapping */
++	obj = drm_gem_object_lookup(dev, file, handle);
++	if (obj == NULL) {
++		ret = -ENOENT;
++		goto unlock;
++	}
++	/* What validation is needed here ? */
++
++	/* Make it mmapable */
++	if (!obj->map_list.map) {
++		ret = gem_create_mmap_offset(obj);
++		if (ret)
++			goto out;
++	}
++	/* GEM should really work out the hash offsets for us */
++	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
++out:
++	drm_gem_object_unreference(obj);
++unlock:
++	mutex_unlock(&dev->struct_mutex);
++	return ret;
++}
++
++/**
++ *	psb_gem_create		-	create a mappable object
++ *	@file: the DRM file of the client
++ *	@dev: our device
++ *	@size: the size requested
++ *	@handlep: returned handle (opaque number)
++ *
++ *	Create a GEM object, fill in the boilerplate and attach a handle to
++ *	it so that userspace can speak about it. This does the core work
++ *	for the various methods that do/will create GEM objects for things
++ */
++static int psb_gem_create(struct drm_file *file,
++	struct drm_device *dev, uint64_t size, uint32_t *handlep)
++{
++	struct gtt_range *r;
++	int ret;
++	u32 handle;
++
++	size = roundup(size, PAGE_SIZE);
++
++	/* Allocate our object - for now a direct gtt range which is not
++	   stolen memory backed */
++	r = psb_gtt_alloc_range(dev, size, "gem", 0);
++	if (r == NULL) {
++		dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
++		return -ENOSPC;
++	}
++	/* Initialize the extra goodies GEM needs to do all the hard work */
++	if (drm_gem_object_init(dev, &r->gem, size) != 0) {
++		psb_gtt_free_range(dev, r);
++		/* GEM doesn't give an error code so use -ENOMEM */
++		dev_err(dev->dev, "GEM init failed for %lld\n", size);
++		return -ENOMEM;
++	}
++	/* Give the object a handle so we can carry it more easily */
++	ret = drm_gem_handle_create(file, &r->gem, &handle);
++	if (ret) {
++		dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
++							&r->gem, size);
++		drm_gem_object_release(&r->gem);
++		psb_gtt_free_range(dev, r);
++		return ret;
++	}
++	/* We have the initial and handle reference but need only one now */
++	drm_gem_object_unreference(&r->gem);
++	*handlep = handle;
++	return 0;
++}
++
++/**
++ *	psb_gem_dumb_create	-	create a dumb buffer
++ *	@drm_file: our client file
++ *	@dev: our device
++ *	@args: the requested arguments copied from userspace
++ *
++ *	Allocate a buffer suitable for use for a frame buffer of the
++ *	form described by user space. Give userspace a handle by which
++ *	to reference it.
++ */
++int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
++			struct drm_mode_create_dumb *args)
++{
++	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
++	args->size = args->pitch * args->height;
++	return psb_gem_create(file, dev, args->size, &args->handle);
++}
++
++/**
++ *	psb_gem_dumb_destroy	-	destroy a dumb buffer
++ *	@file: client file
++ *	@dev: our DRM device
++ *	@handle: the object handle
++ *
++ *	Destroy a handle that was created via psb_gem_dumb_create, at least
++ *	we hope it was created that way. i915 seems to assume the caller
++ *	does the checking but that might be worth review ! FIXME
++ */
++int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
++			uint32_t handle)
++{
++	/* No special work needed, drop the reference and see what falls out */
++	return drm_gem_handle_delete(file, handle);
++}
++
++/**
++ *	psb_gem_fault		-	pagefault handler for GEM objects
++ *	@vma: the VMA of the GEM object
++ *	@vmf: fault detail
++ *
++ *	Invoked when a fault occurs on an mmap of a GEM managed area. GEM
++ *	does most of the work for us including the actual map/unmap calls
++ *	but we need to do the actual page work.
++ *
++ *	This code eventually needs to handle faulting objects in and out
++ *	of the GTT and repacking it when we run out of space. We can put
++ *	that off for now and for our simple uses
++ *
++ *	The VMA was set up by GEM. In doing so it also ensured that the
++ *	vma->vm_private_data points to the GEM object that is backing this
++ *	mapping.
++ */
++int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++	struct drm_gem_object *obj;
++	struct gtt_range *r;
++	int ret;
++	unsigned long pfn;
++	pgoff_t page_offset;
++	struct drm_device *dev;
++	struct drm_psb_private *dev_priv;
++
++	obj = vma->vm_private_data;	/* GEM object */
++	dev = obj->dev;
++	dev_priv = dev->dev_private;
++
++	r = container_of(obj, struct gtt_range, gem);	/* Get the gtt range */
++
++	/* Make sure we don't parallel update on a fault, nor move or remove
++	   something from beneath our feet */
++	mutex_lock(&dev->struct_mutex);
++
++	/* For now the mmap pins the object and it stays pinned. As things
++	   stand that will do us no harm */
++	if (r->mmapping == 0) {
++		ret = psb_gtt_pin(r);
++		if (ret < 0) {
++			dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
++			goto fail;
++		}
++		r->mmapping = 1;
++	}
++
++	/* Page relative to the VMA start - we must calculate this ourselves
++	   because vmf->pgoff is the fake GEM offset */
++	page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
++				>> PAGE_SHIFT;
++
++	/* CPU view of the page, don't go via the GART for CPU writes */
++	if (r->stolen)
++		pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
++	else
++		pfn = page_to_pfn(r->pages[page_offset]);
++	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
++
++fail:
++	mutex_unlock(&dev->struct_mutex);
++	switch (ret) {
++	case 0:
++	case -ERESTARTSYS:
++	case -EINTR:
++		return VM_FAULT_NOPAGE;
++	case -ENOMEM:
++		return VM_FAULT_OOM;
++	default:
++		return VM_FAULT_SIGBUS;
++	}
++}
++
++static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
++						int size, u32 *handle)
++{
++	struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
++	if (gtt == NULL)
++		return -ENOMEM;
++	if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
++		goto free_gtt;
++	if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
++		return 0;
++free_gtt:
++	psb_gtt_free_range(dev, gtt);
++	return -ENOMEM;
++}
++
++/*
++ *	GEM interfaces for our specific client
++ */
++int psb_gem_create_ioctl(struct drm_device *dev, void *data,
++					struct drm_file *file)
++{
++	struct drm_psb_gem_create *args = data;
++	int ret;
++	if (args->flags & GMA_GEM_CREATE_STOLEN) {
++		ret = psb_gem_create_stolen(file, dev, args->size,
++							&args->handle);
++		if (ret == 0)
++			return 0;
++		/* Fall throguh */
++		args->flags &= ~GMA_GEM_CREATE_STOLEN;
++	}
++	return psb_gem_create(file, dev, args->size, &args->handle);
++}
++
++int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
++					struct drm_file *file)
++{
++	struct drm_psb_gem_mmap *args = data;
++	return dev->driver->dumb_map_offset(file, dev,
++						args->handle, &args->offset);
++}
++
+diff --git a/drivers/gpu/drm/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
+new file mode 100644
+index 0000000..3c17634
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/gem_glue.c
+@@ -0,0 +1,90 @@
++/**************************************************************************
++ * Copyright (c) 2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "gem_glue.h"
++
++void drm_gem_object_release_wrap(struct drm_gem_object *obj)
++{
++	/* Remove the list map if one is present */
++	if (obj->map_list.map) {
++		struct drm_gem_mm *mm = obj->dev->mm_private;
++		struct drm_map_list *list = &obj->map_list;
++		drm_ht_remove_item(&mm->offset_hash, &list->hash);
++		drm_mm_put_block(list->file_offset_node);
++		kfree(list->map);
++		list->map = NULL;
++	}
++	drm_gem_object_release(obj);
++}
++
++/**
++ *	gem_create_mmap_offset		-	invent an mmap offset
++ *	@obj: our object
++ *
++ *	Standard implementation of offset generation for mmap as is
++ *	duplicated in several drivers. This belongs in GEM.
++ */
++int gem_create_mmap_offset(struct drm_gem_object *obj)
++{
++	struct drm_device *dev = obj->dev;
++	struct drm_gem_mm *mm = dev->mm_private;
++	struct drm_map_list *list;
++	struct drm_local_map *map;
++	int ret;
++
++	list = &obj->map_list;
++	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
++	if (list->map == NULL)
++		return -ENOMEM;
++	map = list->map;
++	map->type = _DRM_GEM;
++	map->size = obj->size;
++	map->handle = obj;
++
++	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
++					obj->size / PAGE_SIZE, 0, 0);
++	if (!list->file_offset_node) {
++		dev_err(dev->dev, "failed to allocate offset for bo %d\n",
++								obj->name);
++		ret = -ENOSPC;
++		goto free_it;
++	}
++	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
++					obj->size / PAGE_SIZE, 0);
++	if (!list->file_offset_node) {
++		ret = -ENOMEM;
++		goto free_it;
++	}
++	list->hash.key = list->file_offset_node->start;
++	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
++	if (ret) {
++		dev_err(dev->dev, "failed to add to map hash\n");
++		goto free_mm;
++	}
++	return 0;
++
++free_mm:
++	drm_mm_put_block(list->file_offset_node);
++free_it:
++	kfree(list->map);
++	list->map = NULL;
++	return ret;
++}
+diff --git a/drivers/gpu/drm/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
+new file mode 100644
+index 0000000..ce5ce30
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/gem_glue.h
+@@ -0,0 +1,2 @@
++extern void drm_gem_object_release_wrap(struct drm_gem_object *obj);
++extern int gem_create_mmap_offset(struct drm_gem_object *obj);
+diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
+new file mode 100644
+index 0000000..c6465b4
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/gtt.c
+@@ -0,0 +1,551 @@
++/*
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
++ *	    Alan Cox <alan at linux.intel.com>
++ */
++
++#include <drm/drmP.h>
++#include <linux/shmem_fs.h>
++#include "psb_drv.h"
++
++
++/*
++ *	GTT resource allocator - manage page mappings in GTT space
++ */
++
++/**
++ *	psb_gtt_mask_pte	-	generate GTT pte entry
++ *	@pfn: page number to encode
++ *	@type: type of memory in the GTT
++ *
++ *	Set the GTT entry for the appropriate memory type.
++ */
++static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
++{
++	uint32_t mask = PSB_PTE_VALID;
++
++	if (type & PSB_MMU_CACHED_MEMORY)
++		mask |= PSB_PTE_CACHED;
++	if (type & PSB_MMU_RO_MEMORY)
++		mask |= PSB_PTE_RO;
++	if (type & PSB_MMU_WO_MEMORY)
++		mask |= PSB_PTE_WO;
++
++	return (pfn << PAGE_SHIFT) | mask;
++}
++
++/**
++ *	psb_gtt_entry		-	find the GTT entries for a gtt_range
++ *	@dev: our DRM device
++ *	@r: our GTT range
++ *
++ *	Given a gtt_range object return the GTT offset of the page table
++ *	entries for this gtt_range
++ */
++static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	unsigned long offset;
++
++	offset = r->resource.start - dev_priv->gtt_mem->start;
++
++	return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
++}
++
++/**
++ *	psb_gtt_insert	-	put an object into the GTT
++ *	@dev: our DRM device
++ *	@r: our GTT range
++ *
++ *	Take our preallocated GTT range and insert the GEM object into
++ *	the GTT. This is protected via the gtt mutex which the caller
++ *	must hold.
++ */
++static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
++{
++	u32 *gtt_slot, pte;
++	struct page **pages;
++	int i;
++
++	if (r->pages == NULL) {
++		WARN_ON(1);
++		return -EINVAL;
++	}
++
++	WARN_ON(r->stolen);	/* refcount these maybe ? */
++
++	gtt_slot = psb_gtt_entry(dev, r);
++	pages = r->pages;
++
++	/* Make sure changes are visible to the GPU */
++	set_pages_array_uc(pages, r->npage);
++
++	/* Write our page entries into the GTT itself */
++	for (i = r->roll; i < r->npage; i++) {
++		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
++		iowrite32(pte, gtt_slot++);
++	}
++	for (i = 0; i < r->roll; i++) {
++		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
++		iowrite32(pte, gtt_slot++);
++	}
++	/* Make sure all the entries are set before we return */
++	ioread32(gtt_slot - 1);
++
++	return 0;
++}
++
++/**
++ *	psb_gtt_remove	-	remove an object from the GTT
++ *	@dev: our DRM device
++ *	@r: our GTT range
++ *
++ *	Remove a preallocated GTT range from the GTT. Overwrite all the
++ *	page table entries with the dummy page. This is protected via the gtt
++ *	mutex which the caller must hold.
++ */
++static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 *gtt_slot, pte;
++	int i;
++
++	WARN_ON(r->stolen);
++
++	gtt_slot = psb_gtt_entry(dev, r);
++	pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0);
++
++	for (i = 0; i < r->npage; i++)
++		iowrite32(pte, gtt_slot++);
++	ioread32(gtt_slot - 1);
++	set_pages_array_wb(r->pages, r->npage);
++}
++
++/**
++ *	psb_gtt_roll	-	set scrolling position
++ *	@dev: our DRM device
++ *	@r: the gtt mapping we are using
++ *	@roll: roll offset
++ *
++ *	Roll an existing pinned mapping by moving the pages through the GTT.
++ *	This allows us to implement hardware scrolling on the consoles without
++ *	a 2D engine
++ */
++void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
++{
++	u32 *gtt_slot, pte;
++	int i;
++
++	if (roll >= r->npage) {
++		WARN_ON(1);
++		return;
++	}
++
++	r->roll = roll;
++
++	/* Not currently in the GTT - no worry we will write the mapping at
++	   the right position when it gets pinned */
++	if (!r->stolen && !r->in_gart)
++		return;
++
++	gtt_slot = psb_gtt_entry(dev, r);
++
++	for (i = r->roll; i < r->npage; i++) {
++		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
++		iowrite32(pte, gtt_slot++);
++	}
++	for (i = 0; i < r->roll; i++) {
++		pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
++		iowrite32(pte, gtt_slot++);
++	}
++	ioread32(gtt_slot - 1);
++}
++
++/**
++ *	psb_gtt_attach_pages	-	attach and pin GEM pages
++ *	@gt: the gtt range
++ *
++ *	Pin and build an in kernel list of the pages that back our GEM object.
++ *	While we hold this the pages cannot be swapped out. This is protected
++ *	via the gtt mutex which the caller must hold.
++ */
++static int psb_gtt_attach_pages(struct gtt_range *gt)
++{
++	struct inode *inode;
++	struct address_space *mapping;
++	int i;
++	struct page *p;
++	int pages = gt->gem.size / PAGE_SIZE;
++
++	WARN_ON(gt->pages);
++
++	/* This is the shared memory object that backs the GEM resource */
++	inode = gt->gem.filp->f_path.dentry->d_inode;
++	mapping = inode->i_mapping;
++
++	gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
++	if (gt->pages == NULL)
++		return -ENOMEM;
++	gt->npage = pages;
++
++	for (i = 0; i < pages; i++) {
++		p = shmem_read_mapping_page(mapping, i);
++		if (IS_ERR(p))
++			goto err;
++		gt->pages[i] = p;
++	}
++	return 0;
++
++err:
++	while (i--)
++		page_cache_release(gt->pages[i]);
++	kfree(gt->pages);
++	gt->pages = NULL;
++	return PTR_ERR(p);
++}
++
++/**
++ *	psb_gtt_detach_pages	-	attach and pin GEM pages
++ *	@gt: the gtt range
++ *
++ *	Undo the effect of psb_gtt_attach_pages. At this point the pages
++ *	must have been removed from the GTT as they could now be paged out
++ *	and move bus address. This is protected via the gtt mutex which the
++ *	caller must hold.
++ */
++static void psb_gtt_detach_pages(struct gtt_range *gt)
++{
++	int i;
++	for (i = 0; i < gt->npage; i++) {
++		/* FIXME: do we need to force dirty */
++		set_page_dirty(gt->pages[i]);
++		page_cache_release(gt->pages[i]);
++	}
++	kfree(gt->pages);
++	gt->pages = NULL;
++}
++
++/**
++ *	psb_gtt_pin		-	pin pages into the GTT
++ *	@gt: range to pin
++ *
++ *	Pin a set of pages into the GTT. The pins are refcounted so that
++ *	multiple pins need multiple unpins to undo.
++ *
++ *	Non GEM backed objects treat this as a no-op as they are always GTT
++ *	backed objects.
++ */
++int psb_gtt_pin(struct gtt_range *gt)
++{
++	int ret = 0;
++	struct drm_device *dev = gt->gem.dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	mutex_lock(&dev_priv->gtt_mutex);
++
++	if (gt->in_gart == 0 && gt->stolen == 0) {
++		ret = psb_gtt_attach_pages(gt);
++		if (ret < 0)
++			goto out;
++		ret = psb_gtt_insert(dev, gt);
++		if (ret < 0) {
++			psb_gtt_detach_pages(gt);
++			goto out;
++		}
++	}
++	gt->in_gart++;
++out:
++	mutex_unlock(&dev_priv->gtt_mutex);
++	return ret;
++}
++
++/**
++ *	psb_gtt_unpin		-	Drop a GTT pin requirement
++ *	@gt: range to pin
++ *
++ *	Undoes the effect of psb_gtt_pin. On the last drop the GEM object
++ *	will be removed from the GTT which will also drop the page references
++ *	and allow the VM to clean up or page stuff.
++ *
++ *	Non GEM backed objects treat this as a no-op as they are always GTT
++ *	backed objects.
++ */
++void psb_gtt_unpin(struct gtt_range *gt)
++{
++	struct drm_device *dev = gt->gem.dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	mutex_lock(&dev_priv->gtt_mutex);
++
++	WARN_ON(!gt->in_gart);
++
++	gt->in_gart--;
++	if (gt->in_gart == 0 && gt->stolen == 0) {
++		psb_gtt_remove(dev, gt);
++		psb_gtt_detach_pages(gt);
++	}
++	mutex_unlock(&dev_priv->gtt_mutex);
++}
++
++/*
++ *	GTT resource allocator - allocate and manage GTT address space
++ */
++
++/**
++ *	psb_gtt_alloc_range	-	allocate GTT address space
++ *	@dev: Our DRM device
++ *	@len: length (bytes) of address space required
++ *	@name: resource name
++ *	@backed: resource should be backed by stolen pages
++ *
++ *	Ask the kernel core to find us a suitable range of addresses
++ *	to use for a GTT mapping.
++ *
++ *	Returns a gtt_range structure describing the object, or NULL on
++ *	error. On successful return the resource is both allocated and marked
++ *	as in use.
++ */
++struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
++						const char *name, int backed)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct gtt_range *gt;
++	struct resource *r = dev_priv->gtt_mem;
++	int ret;
++	unsigned long start, end;
++
++	if (backed) {
++		/* The start of the GTT is the stolen pages */
++		start = r->start;
++		end = r->start + dev_priv->gtt.stolen_size - 1;
++	} else {
++		/* The rest we will use for GEM backed objects */
++		start = r->start + dev_priv->gtt.stolen_size;
++		end = r->end;
++	}
++
++	gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
++	if (gt == NULL)
++		return NULL;
++	gt->resource.name = name;
++	gt->stolen = backed;
++	gt->in_gart = backed;
++	gt->roll = 0;
++	/* Ensure this is set for non GEM objects */
++	gt->gem.dev = dev;
++	ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
++				len, start, end, PAGE_SIZE, NULL, NULL);
++	if (ret == 0) {
++		gt->offset = gt->resource.start - r->start;
++		return gt;
++	}
++	kfree(gt);
++	return NULL;
++}
++
++/**
++ *	psb_gtt_free_range	-	release GTT address space
++ *	@dev: our DRM device
++ *	@gt: a mapping created with psb_gtt_alloc_range
++ *
++ *	Release a resource that was allocated with psb_gtt_alloc_range. If the
++ *	object has been pinned by mmap users we clean this up here currently.
++ */
++void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
++{
++	/* Undo the mmap pin if we are destroying the object */
++	if (gt->mmapping) {
++		psb_gtt_unpin(gt);
++		gt->mmapping = 0;
++	}
++	WARN_ON(gt->in_gart && !gt->stolen);
++	release_resource(&gt->resource);
++	kfree(gt);
++}
++
++static void psb_gtt_alloc(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	init_rwsem(&dev_priv->gtt.sem);
++}
++
++void psb_gtt_takedown(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->gtt_map) {
++		iounmap(dev_priv->gtt_map);
++		dev_priv->gtt_map = NULL;
++	}
++	if (dev_priv->gtt_initialized) {
++		pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
++				      dev_priv->gmch_ctrl);
++		PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
++		(void) PSB_RVDC32(PSB_PGETBL_CTL);
++	}
++	if (dev_priv->vram_addr)
++		iounmap(dev_priv->gtt_map);
++}
++
++int psb_gtt_init(struct drm_device *dev, int resume)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	unsigned gtt_pages;
++	unsigned long stolen_size, vram_stolen_size;
++	unsigned i, num_pages;
++	unsigned pfn_base;
++	uint32_t vram_pages;
++	uint32_t dvmt_mode = 0;
++	struct psb_gtt *pg;
++
++	int ret = 0;
++	uint32_t pte;
++
++	mutex_init(&dev_priv->gtt_mutex);
++
++	psb_gtt_alloc(dev);
++	pg = &dev_priv->gtt;
++
++	/* Enable the GTT */
++	pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
++	pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
++			      dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
++
++	dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
++	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++	(void) PSB_RVDC32(PSB_PGETBL_CTL);
++
++	/* The root resource we allocate address space from */
++	dev_priv->gtt_initialized = 1;
++
++	pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
++
++	/*
++	 *	The video mmu has a hw bug when accessing 0x0D0000000.
++	 *	Make gatt start at 0x0e000,0000. This doesn't actually
++	 *	matter for us but may do if the video acceleration ever
++	 *	gets opened up.
++	 */
++	pg->mmu_gatt_start = 0xE0000000;
++
++	pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
++	gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
++								>> PAGE_SHIFT;
++	/* CDV doesn't report this. In which case the system has 64 gtt pages */
++	if (pg->gtt_start == 0 || gtt_pages == 0) {
++		dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
++		gtt_pages = 64;
++		pg->gtt_start = dev_priv->pge_ctl;
++	}
++
++	pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
++	pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
++								>> PAGE_SHIFT;
++	dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
++
++	if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
++		static struct resource fudge;	/* Preferably peppermint */
++		/* This can occur on CDV systems. Fudge it in this case.
++		   We really don't care what imaginary space is being allocated
++		   at this point */
++		dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
++		pg->gatt_start = 0x40000000;
++		pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
++		/* This is a little confusing but in fact the GTT is providing
++		   a view from the GPU into memory and not vice versa. As such
++		   this is really allocating space that is not the same as the
++		   CPU address space on CDV */
++		fudge.start = 0x40000000;
++		fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
++		fudge.name = "fudge";
++		fudge.flags = IORESOURCE_MEM;
++		dev_priv->gtt_mem = &fudge;
++	}
++
++	pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
++	vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
++								- PAGE_SIZE;
++
++	stolen_size = vram_stolen_size;
++
++	printk(KERN_INFO "Stolen memory information\n");
++	printk(KERN_INFO "       base in RAM: 0x%x\n", dev_priv->stolen_base);
++	printk(KERN_INFO "       size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n",
++		vram_stolen_size/1024);
++	dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7;
++	printk(KERN_INFO "      the correct size should be: %dM(dvmt mode=%d)\n",
++		(dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode);
++
++	if (resume && (gtt_pages != pg->gtt_pages) &&
++	    (stolen_size != pg->stolen_size)) {
++		dev_err(dev->dev, "GTT resume error.\n");
++		ret = -EINVAL;
++		goto out_err;
++	}
++
++	pg->gtt_pages = gtt_pages;
++	pg->stolen_size = stolen_size;
++	dev_priv->vram_stolen_size = vram_stolen_size;
++
++	/*
++	 *	Map the GTT and the stolen memory area
++	 */
++	dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
++						gtt_pages << PAGE_SHIFT);
++	if (!dev_priv->gtt_map) {
++		dev_err(dev->dev, "Failure to map gtt.\n");
++		ret = -ENOMEM;
++		goto out_err;
++	}
++
++	dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
++	if (!dev_priv->vram_addr) {
++		dev_err(dev->dev, "Failure to map stolen base.\n");
++		ret = -ENOMEM;
++		goto out_err;
++	}
++
++	/*
++	 * Insert vram stolen pages into the GTT
++	 */
++
++	pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
++	vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT;
++	printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
++		num_pages, pfn_base << PAGE_SHIFT, 0);
++	for (i = 0; i < num_pages; ++i) {
++		pte = psb_gtt_mask_pte(pfn_base + i, 0);
++		iowrite32(pte, dev_priv->gtt_map + i);
++	}
++
++	/*
++	 * Init rest of GTT to the scratch page to avoid accidents or scribbles
++	 */
++
++	pfn_base = page_to_pfn(dev_priv->scratch_page);
++	pte = psb_gtt_mask_pte(pfn_base, 0);
++	for (; i < gtt_pages; ++i)
++		iowrite32(pte, dev_priv->gtt_map + i);
++
++	(void) ioread32(dev_priv->gtt_map + i - 1);
++	return 0;
++
++out_err:
++	psb_gtt_takedown(dev);
++	return ret;
++}
+diff --git a/drivers/gpu/drm/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
+new file mode 100644
+index 0000000..aa17423
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/gtt.h
+@@ -0,0 +1,64 @@
++/**************************************************************************
++ * Copyright (c) 2007-2008, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_GTT_H_
++#define _PSB_GTT_H_
++
++#include <drm/drmP.h>
++
++/* This wants cleaning up with respect to the psb_dev and un-needed stuff */
++struct psb_gtt {
++	uint32_t gatt_start;
++	uint32_t mmu_gatt_start;
++	uint32_t gtt_start;
++	uint32_t gtt_phys_start;
++	unsigned gtt_pages;
++	unsigned gatt_pages;
++	unsigned long stolen_size;
++	unsigned long vram_stolen_size;
++	struct rw_semaphore sem;
++};
++
++/* Exported functions */
++extern int psb_gtt_init(struct drm_device *dev, int resume);
++extern void psb_gtt_takedown(struct drm_device *dev);
++
++/* Each gtt_range describes an allocation in the GTT area */
++struct gtt_range {
++	struct resource resource;	/* Resource for our allocation */
++	u32 offset;			/* GTT offset of our object */
++	struct drm_gem_object gem;	/* GEM high level stuff */
++	int in_gart;			/* Currently in the GART (ref ct) */
++	bool stolen;			/* Backed from stolen RAM */
++	bool mmapping;			/* Is mmappable */
++	struct page **pages;		/* Backing pages if present */
++	int npage;			/* Number of backing pages */
++	int roll;			/* Roll applied to the GTT entries */
++};
++
++extern struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
++						const char *name, int backed);
++extern void psb_gtt_kref_put(struct gtt_range *gt);
++extern void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt);
++extern int psb_gtt_pin(struct gtt_range *gt);
++extern void psb_gtt_unpin(struct gtt_range *gt);
++extern void psb_gtt_roll(struct drm_device *dev,
++					struct gtt_range *gt, int roll);
++
++#endif
+diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
+new file mode 100644
+index 0000000..d4d0c5b
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/intel_bios.c
+@@ -0,0 +1,303 @@
++/*
++ * Copyright (c) 2006 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *    Eric Anholt <eric at anholt.net>
++ *
++ */
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "gma_drm.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "intel_bios.h"
++
++
++static void *find_section(struct bdb_header *bdb, int section_id)
++{
++	u8 *base = (u8 *)bdb;
++	int index = 0;
++	u16 total, current_size;
++	u8 current_id;
++
++	/* skip to first section */
++	index += bdb->header_size;
++	total = bdb->bdb_size;
++
++	/* walk the sections looking for section_id */
++	while (index < total) {
++		current_id = *(base + index);
++		index++;
++		current_size = *((u16 *)(base + index));
++		index += 2;
++		if (current_id == section_id)
++			return base + index;
++		index += current_size;
++	}
++
++	return NULL;
++}
++
++static void fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
++			struct lvds_dvo_timing *dvo_timing)
++{
++	panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
++		dvo_timing->hactive_lo;
++	panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
++		((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
++	panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
++		dvo_timing->hsync_pulse_width;
++	panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
++		((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
++
++	panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
++		dvo_timing->vactive_lo;
++	panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
++		dvo_timing->vsync_off;
++	panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
++		dvo_timing->vsync_pulse_width;
++	panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
++		((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
++	panel_fixed_mode->clock = dvo_timing->clock * 10;
++	panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
++
++	/* Some VBTs have bogus h/vtotal values */
++	if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
++		panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
++	if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
++		panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
++
++	drm_mode_set_name(panel_fixed_mode);
++}
++
++static void parse_backlight_data(struct drm_psb_private *dev_priv,
++				struct bdb_header *bdb)
++{
++	struct bdb_lvds_backlight *vbt_lvds_bl = NULL;
++	struct bdb_lvds_backlight *lvds_bl;
++	u8 p_type = 0;
++	void *bl_start = NULL;
++	struct bdb_lvds_options *lvds_opts
++				= find_section(bdb, BDB_LVDS_OPTIONS);
++
++	dev_priv->lvds_bl = NULL;
++
++	if (lvds_opts)
++		p_type = lvds_opts->panel_type;
++	else
++		return;
++
++	bl_start = find_section(bdb, BDB_LVDS_BACKLIGHT);
++	vbt_lvds_bl = (struct bdb_lvds_backlight *)(bl_start + 1) + p_type;
++
++	lvds_bl = kzalloc(sizeof(*vbt_lvds_bl), GFP_KERNEL);
++	if (!lvds_bl) {
++		dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
++		return;
++	}
++	memcpy(lvds_bl, vbt_lvds_bl, sizeof(*vbt_lvds_bl));
++	dev_priv->lvds_bl = lvds_bl;
++}
++
++/* Try to find integrated panel data */
++static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
++			    struct bdb_header *bdb)
++{
++	struct bdb_lvds_options *lvds_options;
++	struct bdb_lvds_lfp_data *lvds_lfp_data;
++	struct bdb_lvds_lfp_data_entry *entry;
++	struct lvds_dvo_timing *dvo_timing;
++	struct drm_display_mode *panel_fixed_mode;
++
++	/* Defaults if we can't find VBT info */
++	dev_priv->lvds_dither = 0;
++	dev_priv->lvds_vbt = 0;
++
++	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
++	if (!lvds_options)
++		return;
++
++	dev_priv->lvds_dither = lvds_options->pixel_dither;
++	if (lvds_options->panel_type == 0xff)
++		return;
++
++	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
++	if (!lvds_lfp_data)
++		return;
++
++
++	entry = &lvds_lfp_data->data[lvds_options->panel_type];
++	dvo_timing = &entry->dvo_timing;
++
++	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
++				      GFP_KERNEL);
++	if (panel_fixed_mode == NULL) {
++		dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
++		return;
++	}
++
++	dev_priv->lvds_vbt = 1;
++	fill_detail_timing_data(panel_fixed_mode, dvo_timing);
++
++	if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
++		dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
++		drm_mode_debug_printmodeline(panel_fixed_mode);
++	} else {
++		dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
++		dev_priv->lvds_vbt = 0;
++		kfree(panel_fixed_mode);
++	}
++	return;
++}
++
++/* Try to find sdvo panel data */
++static void parse_sdvo_panel_data(struct drm_psb_private *dev_priv,
++		      struct bdb_header *bdb)
++{
++	struct bdb_sdvo_lvds_options *sdvo_lvds_options;
++	struct lvds_dvo_timing *dvo_timing;
++	struct drm_display_mode *panel_fixed_mode;
++
++	dev_priv->sdvo_lvds_vbt_mode = NULL;
++
++	sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
++	if (!sdvo_lvds_options)
++		return;
++
++	dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
++	if (!dvo_timing)
++		return;
++
++	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
++
++	if (!panel_fixed_mode)
++		return;
++
++	fill_detail_timing_data(panel_fixed_mode,
++			dvo_timing + sdvo_lvds_options->panel_type);
++
++	dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
++
++	return;
++}
++
++static void parse_general_features(struct drm_psb_private *dev_priv,
++		       struct bdb_header *bdb)
++{
++	struct bdb_general_features *general;
++
++	/* Set sensible defaults in case we can't find the general block */
++	dev_priv->int_tv_support = 1;
++	dev_priv->int_crt_support = 1;
++
++	general = find_section(bdb, BDB_GENERAL_FEATURES);
++	if (general) {
++		dev_priv->int_tv_support = general->int_tv_support;
++		dev_priv->int_crt_support = general->int_crt_support;
++		dev_priv->lvds_use_ssc = general->enable_ssc;
++
++		if (dev_priv->lvds_use_ssc) {
++			dev_priv->lvds_ssc_freq
++				= general->ssc_freq ? 100 : 96;
++		}
++	}
++}
++
++/**
++ * psb_intel_init_bios - initialize VBIOS settings & find VBT
++ * @dev: DRM device
++ *
++ * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
++ * to appropriate values.
++ *
++ * VBT existence is a sanity check that is relied on by other i830_bios.c code.
++ * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
++ * feed an updated VBT back through that, compared to what we'll fetch using
++ * this method of groping around in the BIOS data.
++ *
++ * Returns 0 on success, nonzero on failure.
++ */
++bool psb_intel_init_bios(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct pci_dev *pdev = dev->pdev;
++	struct vbt_header *vbt = NULL;
++	struct bdb_header *bdb;
++	u8 __iomem *bios;
++	size_t size;
++	int i;
++
++	bios = pci_map_rom(pdev, &size);
++	if (!bios)
++		return -1;
++
++	/* Scour memory looking for the VBT signature */
++	for (i = 0; i + 4 < size; i++) {
++		if (!memcmp(bios + i, "$VBT", 4)) {
++			vbt = (struct vbt_header *)(bios + i);
++			break;
++		}
++	}
++
++	if (!vbt) {
++		dev_err(dev->dev, "VBT signature missing\n");
++		pci_unmap_rom(pdev, bios);
++		return -1;
++	}
++
++	bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
++
++	/* Grab useful general definitions */
++	parse_general_features(dev_priv, bdb);
++	parse_lfp_panel_data(dev_priv, bdb);
++	parse_sdvo_panel_data(dev_priv, bdb);
++	parse_backlight_data(dev_priv, bdb);
++
++	pci_unmap_rom(pdev, bios);
++
++	return 0;
++}
++
++/**
++ * Destroy and free VBT data
++ */
++void psb_intel_destroy_bios(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct drm_display_mode *sdvo_lvds_vbt_mode =
++				dev_priv->sdvo_lvds_vbt_mode;
++	struct drm_display_mode *lfp_lvds_vbt_mode =
++				dev_priv->lfp_lvds_vbt_mode;
++	struct bdb_lvds_backlight *lvds_bl =
++				dev_priv->lvds_bl;
++
++	/*free sdvo panel mode*/
++	if (sdvo_lvds_vbt_mode) {
++		dev_priv->sdvo_lvds_vbt_mode = NULL;
++		kfree(sdvo_lvds_vbt_mode);
++	}
++
++	if (lfp_lvds_vbt_mode) {
++		dev_priv->lfp_lvds_vbt_mode = NULL;
++		kfree(lfp_lvds_vbt_mode);
++	}
++
++	if (lvds_bl) {
++		dev_priv->lvds_bl = NULL;
++		kfree(lvds_bl);
++	}
++}
+diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
+new file mode 100644
+index 0000000..70f1bf0
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/intel_bios.h
+@@ -0,0 +1,430 @@
++/*
++ * Copyright (c) 2006 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *    Eric Anholt <eric at anholt.net>
++ *
++ */
++
++#ifndef _I830_BIOS_H_
++#define _I830_BIOS_H_
++
++#include <drm/drmP.h>
++
++struct vbt_header {
++	u8 signature[20];		/**< Always starts with 'VBT$' */
++	u16 version;			/**< decimal */
++	u16 header_size;		/**< in bytes */
++	u16 vbt_size;			/**< in bytes */
++	u8 vbt_checksum;
++	u8 reserved0;
++	u32 bdb_offset;			/**< from beginning of VBT */
++	u32 aim_offset[4];		/**< from beginning of VBT */
++} __attribute__((packed));
++
++
++struct bdb_header {
++	u8 signature[16];		/**< Always 'BIOS_DATA_BLOCK' */
++	u16 version;			/**< decimal */
++	u16 header_size;		/**< in bytes */
++	u16 bdb_size;			/**< in bytes */
++};
++
++/* strictly speaking, this is a "skip" block, but it has interesting info */
++struct vbios_data {
++	u8 type; /* 0 == desktop, 1 == mobile */
++	u8 relstage;
++	u8 chipset;
++	u8 lvds_present:1;
++	u8 tv_present:1;
++	u8 rsvd2:6; /* finish byte */
++	u8 rsvd3[4];
++	u8 signon[155];
++	u8 copyright[61];
++	u16 code_segment;
++	u8 dos_boot_mode;
++	u8 bandwidth_percent;
++	u8 rsvd4; /* popup memory size */
++	u8 resize_pci_bios;
++	u8 rsvd5; /* is crt already on ddc2 */
++} __attribute__((packed));
++
++/*
++ * There are several types of BIOS data blocks (BDBs), each block has
++ * an ID and size in the first 3 bytes (ID in first, size in next 2).
++ * Known types are listed below.
++ */
++#define BDB_GENERAL_FEATURES	  1
++#define BDB_GENERAL_DEFINITIONS	  2
++#define BDB_OLD_TOGGLE_LIST	  3
++#define BDB_MODE_SUPPORT_LIST	  4
++#define BDB_GENERIC_MODE_TABLE	  5
++#define BDB_EXT_MMIO_REGS	  6
++#define BDB_SWF_IO		  7
++#define BDB_SWF_MMIO		  8
++#define BDB_DOT_CLOCK_TABLE	  9
++#define BDB_MODE_REMOVAL_TABLE	 10
++#define BDB_CHILD_DEVICE_TABLE	 11
++#define BDB_DRIVER_FEATURES	 12
++#define BDB_DRIVER_PERSISTENCE	 13
++#define BDB_EXT_TABLE_PTRS	 14
++#define BDB_DOT_CLOCK_OVERRIDE	 15
++#define BDB_DISPLAY_SELECT	 16
++/* 17 rsvd */
++#define BDB_DRIVER_ROTATION	 18
++#define BDB_DISPLAY_REMOVE	 19
++#define BDB_OEM_CUSTOM		 20
++#define BDB_EFP_LIST		 21 /* workarounds for VGA hsync/vsync */
++#define BDB_SDVO_LVDS_OPTIONS	 22
++#define BDB_SDVO_PANEL_DTDS	 23
++#define BDB_SDVO_LVDS_PNP_IDS	 24
++#define BDB_SDVO_LVDS_POWER_SEQ	 25
++#define BDB_TV_OPTIONS		 26
++#define BDB_LVDS_OPTIONS	 40
++#define BDB_LVDS_LFP_DATA_PTRS	 41
++#define BDB_LVDS_LFP_DATA	 42
++#define BDB_LVDS_BACKLIGHT	 43
++#define BDB_LVDS_POWER		 44
++#define BDB_SKIP		254 /* VBIOS private block, ignore */
++
++struct bdb_general_features {
++	/* bits 1 */
++	u8 panel_fitting:2;
++	u8 flexaim:1;
++	u8 msg_enable:1;
++	u8 clear_screen:3;
++	u8 color_flip:1;
++
++	/* bits 2 */
++	u8 download_ext_vbt:1;
++	u8 enable_ssc:1;
++	u8 ssc_freq:1;
++	u8 enable_lfp_on_override:1;
++	u8 disable_ssc_ddt:1;
++	u8 rsvd8:3; /* finish byte */
++
++	/* bits 3 */
++	u8 disable_smooth_vision:1;
++	u8 single_dvi:1;
++	u8 rsvd9:6; /* finish byte */
++
++	/* bits 4 */
++	u8 legacy_monitor_detect;
++
++	/* bits 5 */
++	u8 int_crt_support:1;
++	u8 int_tv_support:1;
++	u8 rsvd11:6; /* finish byte */
++} __attribute__((packed));
++
++struct bdb_general_definitions {
++	/* DDC GPIO */
++	u8 crt_ddc_gmbus_pin;
++
++	/* DPMS bits */
++	u8 dpms_acpi:1;
++	u8 skip_boot_crt_detect:1;
++	u8 dpms_aim:1;
++	u8 rsvd1:5; /* finish byte */
++
++	/* boot device bits */
++	u8 boot_display[2];
++	u8 child_dev_size;
++
++	/* device info */
++	u8 tv_or_lvds_info[33];
++	u8 dev1[33];
++	u8 dev2[33];
++	u8 dev3[33];
++	u8 dev4[33];
++	/* may be another device block here on some platforms */
++};
++
++struct bdb_lvds_options {
++	u8 panel_type;
++	u8 rsvd1;
++	/* LVDS capabilities, stored in a dword */
++	u8 pfit_mode:2;
++	u8 pfit_text_mode_enhanced:1;
++	u8 pfit_gfx_mode_enhanced:1;
++	u8 pfit_ratio_auto:1;
++	u8 pixel_dither:1;
++	u8 lvds_edid:1;
++	u8 rsvd2:1;
++	u8 rsvd4;
++} __attribute__((packed));
++
++struct bdb_lvds_backlight {
++	u8 type:2;
++	u8 pol:1;
++	u8 gpio:3;
++	u8 gmbus:2;
++	u16 freq;
++	u8 minbrightness;
++	u8 i2caddr;
++	u8 brightnesscmd;
++	/*FIXME: more...*/
++} __attribute__((packed));
++
++/* LFP pointer table contains entries to the struct below */
++struct bdb_lvds_lfp_data_ptr {
++	u16 fp_timing_offset; /* offsets are from start of bdb */
++	u8 fp_table_size;
++	u16 dvo_timing_offset;
++	u8 dvo_table_size;
++	u16 panel_pnp_id_offset;
++	u8 pnp_table_size;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data_ptrs {
++	u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
++	struct bdb_lvds_lfp_data_ptr ptr[16];
++} __attribute__((packed));
++
++/* LFP data has 3 blocks per entry */
++struct lvds_fp_timing {
++	u16 x_res;
++	u16 y_res;
++	u32 lvds_reg;
++	u32 lvds_reg_val;
++	u32 pp_on_reg;
++	u32 pp_on_reg_val;
++	u32 pp_off_reg;
++	u32 pp_off_reg_val;
++	u32 pp_cycle_reg;
++	u32 pp_cycle_reg_val;
++	u32 pfit_reg;
++	u32 pfit_reg_val;
++	u16 terminator;
++} __attribute__((packed));
++
++struct lvds_dvo_timing {
++	u16 clock;		/**< In 10khz */
++	u8 hactive_lo;
++	u8 hblank_lo;
++	u8 hblank_hi:4;
++	u8 hactive_hi:4;
++	u8 vactive_lo;
++	u8 vblank_lo;
++	u8 vblank_hi:4;
++	u8 vactive_hi:4;
++	u8 hsync_off_lo;
++	u8 hsync_pulse_width;
++	u8 vsync_pulse_width:4;
++	u8 vsync_off:4;
++	u8 rsvd0:6;
++	u8 hsync_off_hi:2;
++	u8 h_image;
++	u8 v_image;
++	u8 max_hv;
++	u8 h_border;
++	u8 v_border;
++	u8 rsvd1:3;
++	u8 digital:2;
++	u8 vsync_positive:1;
++	u8 hsync_positive:1;
++	u8 rsvd2:1;
++} __attribute__((packed));
++
++struct lvds_pnp_id {
++	u16 mfg_name;
++	u16 product_code;
++	u32 serial;
++	u8 mfg_week;
++	u8 mfg_year;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data_entry {
++	struct lvds_fp_timing fp_timing;
++	struct lvds_dvo_timing dvo_timing;
++	struct lvds_pnp_id pnp_id;
++} __attribute__((packed));
++
++struct bdb_lvds_lfp_data {
++	struct bdb_lvds_lfp_data_entry data[16];
++} __attribute__((packed));
++
++struct aimdb_header {
++	char signature[16];
++	char oem_device[20];
++	u16 aimdb_version;
++	u16 aimdb_header_size;
++	u16 aimdb_size;
++} __attribute__((packed));
++
++struct aimdb_block {
++	u8 aimdb_id;
++	u16 aimdb_size;
++} __attribute__((packed));
++
++struct vch_panel_data {
++	u16 fp_timing_offset;
++	u8 fp_timing_size;
++	u16 dvo_timing_offset;
++	u8 dvo_timing_size;
++	u16 text_fitting_offset;
++	u8 text_fitting_size;
++	u16 graphics_fitting_offset;
++	u8 graphics_fitting_size;
++} __attribute__((packed));
++
++struct vch_bdb_22 {
++	struct aimdb_block aimdb_block;
++	struct vch_panel_data panels[16];
++} __attribute__((packed));
++
++struct bdb_sdvo_lvds_options {
++	u8 panel_backlight;
++	u8 h40_set_panel_type;
++	u8 panel_type;
++	u8 ssc_clk_freq;
++	u16 als_low_trip;
++	u16 als_high_trip;
++	u8 sclalarcoeff_tab_row_num;
++	u8 sclalarcoeff_tab_row_size;
++	u8 coefficient[8];
++	u8 panel_misc_bits_1;
++	u8 panel_misc_bits_2;
++	u8 panel_misc_bits_3;
++	u8 panel_misc_bits_4;
++} __attribute__((packed));
++
++
++extern bool psb_intel_init_bios(struct drm_device *dev);
++extern void psb_intel_destroy_bios(struct drm_device *dev);
++
++/*
++ * Driver<->VBIOS interaction occurs through scratch bits in
++ * GR18 & SWF*.
++ */
++
++/* GR18 bits are set on display switch and hotkey events */
++#define GR18_DRIVER_SWITCH_EN	(1<<7) /* 0: VBIOS control, 1: driver control */
++#define GR18_HOTKEY_MASK	0x78 /* See also SWF4 15:0 */
++#define   GR18_HK_NONE		(0x0<<3)
++#define   GR18_HK_LFP_STRETCH	(0x1<<3)
++#define   GR18_HK_TOGGLE_DISP	(0x2<<3)
++#define   GR18_HK_DISP_SWITCH	(0x4<<3) /* see SWF14 15:0 for what to enable */
++#define   GR18_HK_POPUP_DISABLED (0x6<<3)
++#define   GR18_HK_POPUP_ENABLED	(0x7<<3)
++#define   GR18_HK_PFIT		(0x8<<3)
++#define   GR18_HK_APM_CHANGE	(0xa<<3)
++#define   GR18_HK_MULTIPLE	(0xc<<3)
++#define GR18_USER_INT_EN	(1<<2)
++#define GR18_A0000_FLUSH_EN	(1<<1)
++#define GR18_SMM_EN		(1<<0)
++
++/* Set by driver, cleared by VBIOS */
++#define SWF00_YRES_SHIFT	16
++#define SWF00_XRES_SHIFT	0
++#define SWF00_RES_MASK		0xffff
++
++/* Set by VBIOS at boot time and driver at runtime */
++#define SWF01_TV2_FORMAT_SHIFT	8
++#define SWF01_TV1_FORMAT_SHIFT	0
++#define SWF01_TV_FORMAT_MASK	0xffff
++
++#define SWF10_VBIOS_BLC_I2C_EN	(1<<29)
++#define SWF10_GTT_OVERRIDE_EN	(1<<28)
++#define SWF10_LFP_DPMS_OVR	(1<<27) /* override DPMS on display switch */
++#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
++#define   SWF10_OLD_TOGGLE	0x0
++#define   SWF10_TOGGLE_LIST_1	0x1
++#define   SWF10_TOGGLE_LIST_2	0x2
++#define   SWF10_TOGGLE_LIST_3	0x3
++#define   SWF10_TOGGLE_LIST_4	0x4
++#define SWF10_PANNING_EN	(1<<23)
++#define SWF10_DRIVER_LOADED	(1<<22)
++#define SWF10_EXTENDED_DESKTOP	(1<<21)
++#define SWF10_EXCLUSIVE_MODE	(1<<20)
++#define SWF10_OVERLAY_EN	(1<<19)
++#define SWF10_PLANEB_HOLDOFF	(1<<18)
++#define SWF10_PLANEA_HOLDOFF	(1<<17)
++#define SWF10_VGA_HOLDOFF	(1<<16)
++#define SWF10_ACTIVE_DISP_MASK	0xffff
++#define   SWF10_PIPEB_LFP2	(1<<15)
++#define   SWF10_PIPEB_EFP2	(1<<14)
++#define   SWF10_PIPEB_TV2	(1<<13)
++#define   SWF10_PIPEB_CRT2	(1<<12)
++#define   SWF10_PIPEB_LFP	(1<<11)
++#define   SWF10_PIPEB_EFP	(1<<10)
++#define   SWF10_PIPEB_TV	(1<<9)
++#define   SWF10_PIPEB_CRT	(1<<8)
++#define   SWF10_PIPEA_LFP2	(1<<7)
++#define   SWF10_PIPEA_EFP2	(1<<6)
++#define   SWF10_PIPEA_TV2	(1<<5)
++#define   SWF10_PIPEA_CRT2	(1<<4)
++#define   SWF10_PIPEA_LFP	(1<<3)
++#define   SWF10_PIPEA_EFP	(1<<2)
++#define   SWF10_PIPEA_TV	(1<<1)
++#define   SWF10_PIPEA_CRT	(1<<0)
++
++#define SWF11_MEMORY_SIZE_SHIFT	16
++#define SWF11_SV_TEST_EN	(1<<15)
++#define SWF11_IS_AGP		(1<<14)
++#define SWF11_DISPLAY_HOLDOFF	(1<<13)
++#define SWF11_DPMS_REDUCED	(1<<12)
++#define SWF11_IS_VBE_MODE	(1<<11)
++#define SWF11_PIPEB_ACCESS	(1<<10) /* 0 here means pipe a */
++#define SWF11_DPMS_MASK		0x07
++#define   SWF11_DPMS_OFF	(1<<2)
++#define   SWF11_DPMS_SUSPEND	(1<<1)
++#define   SWF11_DPMS_STANDBY	(1<<0)
++#define   SWF11_DPMS_ON		0
++
++#define SWF14_GFX_PFIT_EN	(1<<31)
++#define SWF14_TEXT_PFIT_EN	(1<<30)
++#define SWF14_LID_STATUS_CLOSED	(1<<29) /* 0 here means open */
++#define SWF14_POPUP_EN		(1<<28)
++#define SWF14_DISPLAY_HOLDOFF	(1<<27)
++#define SWF14_DISP_DETECT_EN	(1<<26)
++#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
++#define SWF14_DRIVER_STATUS	(1<<24)
++#define SWF14_OS_TYPE_WIN9X	(1<<23)
++#define SWF14_OS_TYPE_WINNT	(1<<22)
++/* 21:19 rsvd */
++#define SWF14_PM_TYPE_MASK	0x00070000
++#define   SWF14_PM_ACPI_VIDEO	(0x4 << 16)
++#define   SWF14_PM_ACPI		(0x3 << 16)
++#define   SWF14_PM_APM_12	(0x2 << 16)
++#define   SWF14_PM_APM_11	(0x1 << 16)
++#define SWF14_HK_REQUEST_MASK	0x0000ffff /* see GR18 6:3 for event type */
++	  /* if GR18 indicates a display switch */
++#define   SWF14_DS_PIPEB_LFP2_EN (1<<15)
++#define   SWF14_DS_PIPEB_EFP2_EN (1<<14)
++#define   SWF14_DS_PIPEB_TV2_EN  (1<<13)
++#define   SWF14_DS_PIPEB_CRT2_EN (1<<12)
++#define   SWF14_DS_PIPEB_LFP_EN  (1<<11)
++#define   SWF14_DS_PIPEB_EFP_EN  (1<<10)
++#define   SWF14_DS_PIPEB_TV_EN	 (1<<9)
++#define   SWF14_DS_PIPEB_CRT_EN  (1<<8)
++#define   SWF14_DS_PIPEA_LFP2_EN (1<<7)
++#define   SWF14_DS_PIPEA_EFP2_EN (1<<6)
++#define   SWF14_DS_PIPEA_TV2_EN  (1<<5)
++#define   SWF14_DS_PIPEA_CRT2_EN (1<<4)
++#define   SWF14_DS_PIPEA_LFP_EN  (1<<3)
++#define   SWF14_DS_PIPEA_EFP_EN  (1<<2)
++#define   SWF14_DS_PIPEA_TV_EN	 (1<<1)
++#define   SWF14_DS_PIPEA_CRT_EN  (1<<0)
++	  /* if GR18 indicates a panel fitting request */
++#define   SWF14_PFIT_EN		(1<<0) /* 0 means disable */
++	  /* if GR18 indicates an APM change request */
++#define   SWF14_APM_HIBERNATE	0x4
++#define   SWF14_APM_SUSPEND	0x3
++#define   SWF14_APM_STANDBY	0x1
++#define   SWF14_APM_RESTORE	0x0
++
++#endif /* _I830_BIOS_H_ */
+diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
+new file mode 100644
+index 0000000..9db9052
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/intel_gmbus.c
+@@ -0,0 +1,493 @@
++/*
++ * Copyright (c) 2006 Dave Airlie <airlied at linux.ie>
++ * Copyright © 2006-2008,2010 Intel Corporation
++ *   Jesse Barnes <jesse.barnes at intel.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ *	Chris Wilson <chris at chris-wilson.co.uk>
++ */
++#include <linux/module.h>
++#include <linux/i2c.h>
++#include <linux/i2c-algo-bit.h>
++#include "drmP.h"
++#include "drm.h"
++#include "psb_intel_drv.h"
++#include "gma_drm.h"
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++
++#define _wait_for(COND, MS, W) ({ \
++	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
++	int ret__ = 0;							\
++	while (! (COND)) {						\
++		if (time_after(jiffies, timeout__)) {			\
++			ret__ = -ETIMEDOUT;				\
++			break;						\
++		}							\
++		if (W && !(in_atomic() || in_dbg_master())) msleep(W);	\
++	}								\
++	ret__;								\
++})
++
++#define wait_for(COND, MS) _wait_for(COND, MS, 1)
++#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
++
++/* Intel GPIO access functions */
++
++#define I2C_RISEFALL_TIME 20
++
++static inline struct intel_gmbus *
++to_intel_gmbus(struct i2c_adapter *i2c)
++{
++	return container_of(i2c, struct intel_gmbus, adapter);
++}
++
++struct intel_gpio {
++	struct i2c_adapter adapter;
++	struct i2c_algo_bit_data algo;
++	struct drm_psb_private *dev_priv;
++	u32 reg;
++};
++
++void
++gma_intel_i2c_reset(struct drm_device *dev)
++{
++	REG_WRITE(GMBUS0, 0);
++}
++
++static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
++{
++	/* When using bit bashing for I2C, this bit needs to be set to 1 */
++	/* FIXME: We are never Pineview, right?
++
++	u32 val;
++
++	if (!IS_PINEVIEW(dev_priv->dev))
++		return;
++
++	val = REG_READ(DSPCLK_GATE_D);
++	if (enable)
++		val |= DPCUNIT_CLOCK_GATE_DISABLE;
++	else
++		val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
++	REG_WRITE(DSPCLK_GATE_D, val);
++
++	return;
++	*/
++}
++
++static u32 get_reserved(struct intel_gpio *gpio)
++{
++	struct drm_psb_private *dev_priv = gpio->dev_priv;
++	struct drm_device *dev = dev_priv->dev;
++	u32 reserved = 0;
++
++	/* On most chips, these bits must be preserved in software. */
++	reserved = REG_READ(gpio->reg) &
++				     (GPIO_DATA_PULLUP_DISABLE |
++				      GPIO_CLOCK_PULLUP_DISABLE);
++
++	return reserved;
++}
++
++static int get_clock(void *data)
++{
++	struct intel_gpio *gpio = data;
++	struct drm_psb_private *dev_priv = gpio->dev_priv;
++	struct drm_device *dev = dev_priv->dev;
++	u32 reserved = get_reserved(gpio);
++	REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
++	REG_WRITE(gpio->reg, reserved);
++	return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
++}
++
++static int get_data(void *data)
++{
++	struct intel_gpio *gpio = data;
++	struct drm_psb_private *dev_priv = gpio->dev_priv;
++	struct drm_device *dev = dev_priv->dev;
++	u32 reserved = get_reserved(gpio);
++	REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
++	REG_WRITE(gpio->reg, reserved);
++	return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
++}
++
++static void set_clock(void *data, int state_high)
++{
++	struct intel_gpio *gpio = data;
++	struct drm_psb_private *dev_priv = gpio->dev_priv;
++	struct drm_device *dev = dev_priv->dev;
++	u32 reserved = get_reserved(gpio);
++	u32 clock_bits;
++
++	if (state_high)
++		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
++	else
++		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
++			GPIO_CLOCK_VAL_MASK;
++
++	REG_WRITE(gpio->reg, reserved | clock_bits);
++	REG_READ(gpio->reg); /* Posting */
++}
++
++static void set_data(void *data, int state_high)
++{
++	struct intel_gpio *gpio = data;
++	struct drm_psb_private *dev_priv = gpio->dev_priv;
++	struct drm_device *dev = dev_priv->dev;
++	u32 reserved = get_reserved(gpio);
++	u32 data_bits;
++
++	if (state_high)
++		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
++	else
++		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
++			GPIO_DATA_VAL_MASK;
++
++	REG_WRITE(gpio->reg, reserved | data_bits);
++	REG_READ(gpio->reg);
++}
++
++static struct i2c_adapter *
++intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
++{
++	static const int map_pin_to_reg[] = {
++		0,
++		GPIOB,
++		GPIOA,
++		GPIOC,
++		GPIOD,
++		GPIOE,
++		0,
++		GPIOF,
++	};
++	struct intel_gpio *gpio;
++
++	if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
++		return NULL;
++
++	gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
++	if (gpio == NULL)
++		return NULL;
++
++	gpio->reg = map_pin_to_reg[pin];
++	gpio->dev_priv = dev_priv;
++
++	snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
++		 "gma500 GPIO%c", "?BACDE?F"[pin]);
++	gpio->adapter.owner = THIS_MODULE;
++	gpio->adapter.algo_data	= &gpio->algo;
++	gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
++	gpio->algo.setsda = set_data;
++	gpio->algo.setscl = set_clock;
++	gpio->algo.getsda = get_data;
++	gpio->algo.getscl = get_clock;
++	gpio->algo.udelay = I2C_RISEFALL_TIME;
++	gpio->algo.timeout = usecs_to_jiffies(2200);
++	gpio->algo.data = gpio;
++
++	if (i2c_bit_add_bus(&gpio->adapter))
++		goto out_free;
++
++	return &gpio->adapter;
++
++out_free:
++	kfree(gpio);
++	return NULL;
++}
++
++static int
++intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
++		     struct i2c_adapter *adapter,
++		     struct i2c_msg *msgs,
++		     int num)
++{
++	struct intel_gpio *gpio = container_of(adapter,
++					       struct intel_gpio,
++					       adapter);
++	int ret;
++
++	gma_intel_i2c_reset(dev_priv->dev);
++
++	intel_i2c_quirk_set(dev_priv, true);
++	set_data(gpio, 1);
++	set_clock(gpio, 1);
++	udelay(I2C_RISEFALL_TIME);
++
++	ret = adapter->algo->master_xfer(adapter, msgs, num);
++
++	set_data(gpio, 1);
++	set_clock(gpio, 1);
++	intel_i2c_quirk_set(dev_priv, false);
++
++	return ret;
++}
++
++static int
++gmbus_xfer(struct i2c_adapter *adapter,
++	   struct i2c_msg *msgs,
++	   int num)
++{
++	struct intel_gmbus *bus = container_of(adapter,
++					       struct intel_gmbus,
++					       adapter);
++	struct drm_psb_private *dev_priv = adapter->algo_data;
++	struct drm_device *dev = dev_priv->dev;
++	int i, reg_offset;
++
++	if (bus->force_bit)
++		return intel_i2c_quirk_xfer(dev_priv,
++					    bus->force_bit, msgs, num);
++
++	reg_offset = 0;
++
++	REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
++
++	for (i = 0; i < num; i++) {
++		u16 len = msgs[i].len;
++		u8 *buf = msgs[i].buf;
++
++		if (msgs[i].flags & I2C_M_RD) {
++			REG_WRITE(GMBUS1 + reg_offset,
++				   GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
++				   (len << GMBUS_BYTE_COUNT_SHIFT) |
++				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
++				   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
++			REG_READ(GMBUS2+reg_offset);
++			do {
++				u32 val, loop = 0;
++
++				if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
++					goto timeout;
++				if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
++					goto clear_err;
++
++				val = REG_READ(GMBUS3 + reg_offset);
++				do {
++					*buf++ = val & 0xff;
++					val >>= 8;
++				} while (--len && ++loop < 4);
++			} while (len);
++		} else {
++			u32 val, loop;
++
++			val = loop = 0;
++			do {
++				val |= *buf++ << (8 * loop);
++			} while (--len && ++loop < 4);
++
++			REG_WRITE(GMBUS3 + reg_offset, val);
++			REG_WRITE(GMBUS1 + reg_offset,
++				   (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
++				   (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
++				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
++				   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
++			REG_READ(GMBUS2+reg_offset);
++
++			while (len) {
++				if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
++					goto timeout;
++				if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
++					goto clear_err;
++
++				val = loop = 0;
++				do {
++					val |= *buf++ << (8 * loop);
++				} while (--len && ++loop < 4);
++
++				REG_WRITE(GMBUS3 + reg_offset, val);
++				REG_READ(GMBUS2+reg_offset);
++			}
++		}
++
++		if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
++			goto timeout;
++		if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
++			goto clear_err;
++	}
++
++	goto done;
++
++clear_err:
++	/* Toggle the Software Clear Interrupt bit. This has the effect
++	 * of resetting the GMBUS controller and so clearing the
++	 * BUS_ERROR raised by the slave's NAK.
++	 */
++	REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
++	REG_WRITE(GMBUS1 + reg_offset, 0);
++
++done:
++	/* Mark the GMBUS interface as disabled. We will re-enable it at the
++	 * start of the next xfer, till then let it sleep.
++	 */
++	REG_WRITE(GMBUS0 + reg_offset, 0);
++	return i;
++
++timeout:
++	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
++		 bus->reg0 & 0xff, bus->adapter.name);
++	REG_WRITE(GMBUS0 + reg_offset, 0);
++
++	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
++	bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
++	if (!bus->force_bit)
++		return -ENOMEM;
++
++	return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
++}
++
++static u32 gmbus_func(struct i2c_adapter *adapter)
++{
++	struct intel_gmbus *bus = container_of(adapter,
++					       struct intel_gmbus,
++					       adapter);
++
++	if (bus->force_bit)
++		bus->force_bit->algo->functionality(bus->force_bit);
++
++	return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
++		/* I2C_FUNC_10BIT_ADDR | */
++		I2C_FUNC_SMBUS_READ_BLOCK_DATA |
++		I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
++}
++
++static const struct i2c_algorithm gmbus_algorithm = {
++	.master_xfer	= gmbus_xfer,
++	.functionality	= gmbus_func
++};
++
++/**
++ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
++ * @dev: DRM device
++ */
++int gma_intel_setup_gmbus(struct drm_device *dev)
++{
++	static const char *names[GMBUS_NUM_PORTS] = {
++		"disabled",
++		"ssc",
++		"vga",
++		"panel",
++		"dpc",
++		"dpb",
++		"reserved",
++		"dpd",
++	};
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int ret, i;
++
++	dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
++				  GFP_KERNEL);
++	if (dev_priv->gmbus == NULL)
++		return -ENOMEM;
++
++	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
++		struct intel_gmbus *bus = &dev_priv->gmbus[i];
++
++		bus->adapter.owner = THIS_MODULE;
++		bus->adapter.class = I2C_CLASS_DDC;
++		snprintf(bus->adapter.name,
++			 sizeof(bus->adapter.name),
++			 "gma500 gmbus %s",
++			 names[i]);
++
++		bus->adapter.dev.parent = &dev->pdev->dev;
++		bus->adapter.algo_data	= dev_priv;
++
++		bus->adapter.algo = &gmbus_algorithm;
++		ret = i2c_add_adapter(&bus->adapter);
++		if (ret)
++			goto err;
++
++		/* By default use a conservative clock rate */
++		bus->reg0 = i | GMBUS_RATE_100KHZ;
++
++		/* XXX force bit banging until GMBUS is fully debugged */
++		bus->force_bit = intel_gpio_create(dev_priv, i);
++	}
++
++	gma_intel_i2c_reset(dev_priv->dev);
++
++	return 0;
++
++err:
++	while (--i) {
++		struct intel_gmbus *bus = &dev_priv->gmbus[i];
++		i2c_del_adapter(&bus->adapter);
++	}
++	kfree(dev_priv->gmbus);
++	dev_priv->gmbus = NULL;
++	return ret;
++}
++
++void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
++{
++	struct intel_gmbus *bus = to_intel_gmbus(adapter);
++
++	/* speed:
++	 * 0x0 = 100 KHz
++	 * 0x1 = 50 KHz
++	 * 0x2 = 400 KHz
++	 * 0x3 = 1000 Khz
++	 */
++	bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
++}
++
++void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
++{
++	struct intel_gmbus *bus = to_intel_gmbus(adapter);
++
++	if (force_bit) {
++		if (bus->force_bit == NULL) {
++			struct drm_psb_private *dev_priv = adapter->algo_data;
++			bus->force_bit = intel_gpio_create(dev_priv,
++							   bus->reg0 & 0xff);
++		}
++	} else {
++		if (bus->force_bit) {
++			i2c_del_adapter(bus->force_bit);
++			kfree(bus->force_bit);
++			bus->force_bit = NULL;
++		}
++	}
++}
++
++void gma_intel_teardown_gmbus(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int i;
++
++	if (dev_priv->gmbus == NULL)
++		return;
++
++	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
++		struct intel_gmbus *bus = &dev_priv->gmbus[i];
++		if (bus->force_bit) {
++			i2c_del_adapter(bus->force_bit);
++			kfree(bus->force_bit);
++		}
++		i2c_del_adapter(&bus->adapter);
++	}
++
++	kfree(dev_priv->gmbus);
++	dev_priv->gmbus = NULL;
++}
+diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
+new file mode 100644
+index 0000000..98a28c2
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/intel_i2c.c
+@@ -0,0 +1,169 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ */
++#include <linux/export.h>
++#include <linux/i2c.h>
++#include <linux/i2c-algo-bit.h>
++
++#include "psb_drv.h"
++#include "psb_intel_reg.h"
++
++/*
++ * Intel GPIO access functions
++ */
++
++#define I2C_RISEFALL_TIME 20
++
++static int get_clock(void *data)
++{
++	struct psb_intel_i2c_chan *chan = data;
++	struct drm_device *dev = chan->drm_dev;
++	u32 val;
++
++	val = REG_READ(chan->reg);
++	return (val & GPIO_CLOCK_VAL_IN) != 0;
++}
++
++static int get_data(void *data)
++{
++	struct psb_intel_i2c_chan *chan = data;
++	struct drm_device *dev = chan->drm_dev;
++	u32 val;
++
++	val = REG_READ(chan->reg);
++	return (val & GPIO_DATA_VAL_IN) != 0;
++}
++
++static void set_clock(void *data, int state_high)
++{
++	struct psb_intel_i2c_chan *chan = data;
++	struct drm_device *dev = chan->drm_dev;
++	u32 reserved = 0, clock_bits;
++
++	/* On most chips, these bits must be preserved in software. */
++	reserved =
++		    REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
++					   GPIO_CLOCK_PULLUP_DISABLE);
++
++	if (state_high)
++		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
++	else
++		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
++		    GPIO_CLOCK_VAL_MASK;
++	REG_WRITE(chan->reg, reserved | clock_bits);
++	udelay(I2C_RISEFALL_TIME);	/* wait for the line to change state */
++}
++
++static void set_data(void *data, int state_high)
++{
++	struct psb_intel_i2c_chan *chan = data;
++	struct drm_device *dev = chan->drm_dev;
++	u32 reserved = 0, data_bits;
++
++	/* On most chips, these bits must be preserved in software. */
++	reserved =
++		    REG_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
++					   GPIO_CLOCK_PULLUP_DISABLE);
++
++	if (state_high)
++		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
++	else
++		data_bits =
++		    GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
++		    GPIO_DATA_VAL_MASK;
++
++	REG_WRITE(chan->reg, reserved | data_bits);
++	udelay(I2C_RISEFALL_TIME);	/* wait for the line to change state */
++}
++
++/**
++ * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
++ * @dev: DRM device
++ * @output: driver specific output device
++ * @reg: GPIO reg to use
++ * @name: name for this bus
++ *
++ * Creates and registers a new i2c bus with the Linux i2c layer, for use
++ * in output probing and control (e.g. DDC or SDVO control functions).
++ *
++ * Possible values for @reg include:
++ *   %GPIOA
++ *   %GPIOB
++ *   %GPIOC
++ *   %GPIOD
++ *   %GPIOE
++ *   %GPIOF
++ *   %GPIOG
++ *   %GPIOH
++ * see PRM for details on how these different busses are used.
++ */
++struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
++					const u32 reg, const char *name)
++{
++	struct psb_intel_i2c_chan *chan;
++
++	chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
++	if (!chan)
++		goto out_free;
++
++	chan->drm_dev = dev;
++	chan->reg = reg;
++	snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
++	chan->adapter.owner = THIS_MODULE;
++	chan->adapter.algo_data = &chan->algo;
++	chan->adapter.dev.parent = &dev->pdev->dev;
++	chan->algo.setsda = set_data;
++	chan->algo.setscl = set_clock;
++	chan->algo.getsda = get_data;
++	chan->algo.getscl = get_clock;
++	chan->algo.udelay = 20;
++	chan->algo.timeout = usecs_to_jiffies(2200);
++	chan->algo.data = chan;
++
++	i2c_set_adapdata(&chan->adapter, chan);
++
++	if (i2c_bit_add_bus(&chan->adapter))
++		goto out_free;
++
++	/* JJJ:  raise SCL and SDA? */
++	set_data(chan, 1);
++	set_clock(chan, 1);
++	udelay(20);
++
++	return chan;
++
++out_free:
++	kfree(chan);
++	return NULL;
++}
++
++/**
++ * psb_intel_i2c_destroy - unregister and free i2c bus resources
++ * @output: channel to free
++ *
++ * Unregister the adapter from the i2c layer, then free the structure.
++ */
++void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan)
++{
++	if (!chan)
++		return;
++
++	i2c_del_adapter(&chan->adapter);
++	kfree(chan);
++}
+diff --git a/drivers/gpu/drm/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/intel_opregion.c
+new file mode 100644
+index 0000000..d946bc1
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/intel_opregion.c
+@@ -0,0 +1,81 @@
++/*
++ * Copyright 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * FIXME: resolve with the i915 version
++ */
++
++#include "psb_drv.h"
++
++struct opregion_header {
++	u8 signature[16];
++	u32 size;
++	u32 opregion_ver;
++	u8 bios_ver[32];
++	u8 vbios_ver[16];
++	u8 driver_ver[16];
++	u32 mboxes;
++	u8 reserved[164];
++} __packed;
++
++struct opregion_apci {
++	/*FIXME: add it later*/
++} __packed;
++
++struct opregion_swsci {
++	/*FIXME: add it later*/
++} __packed;
++
++struct opregion_acpi {
++	/*FIXME: add it later*/
++} __packed;
++
++int gma_intel_opregion_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 opregion_phy;
++	void *base;
++	u32 *lid_state;
++
++	dev_priv->lid_state = NULL;
++
++	pci_read_config_dword(dev->pdev, 0xfc, &opregion_phy);
++	if (opregion_phy == 0)
++		return -ENOTSUPP;
++
++	base = ioremap(opregion_phy, 8*1024);
++	if (!base)
++		return -ENOMEM;
++
++	lid_state = base + 0x01ac;
++
++	dev_priv->lid_state = lid_state;
++	dev_priv->lid_last_state = readl(lid_state);
++	return 0;
++}
++
++int gma_intel_opregion_exit(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	if (dev_priv->lid_state)
++		iounmap(dev_priv->lid_state);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c
+new file mode 100644
+index 0000000..af65678
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_device.c
+@@ -0,0 +1,691 @@
++/**************************************************************************
++ * Copyright (c) 2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include "psb_drv.h"
++#include "mid_bios.h"
++#include "mdfld_output.h"
++#include "mdfld_dsi_output.h"
++#include "tc35876x-dsi-lvds.h"
++
++#include <asm/intel_scu_ipc.h>
++
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++
++#define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
++#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++#define BRIGHTNESS_MIN_LEVEL 1
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BRIGHTNESS_MASK	0xFF
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++#define BLC_ADJUSTMENT_MAX 100
++
++#define MDFLD_BLC_PWM_PRECISION_FACTOR    10
++#define MDFLD_BLC_MAX_PWM_REG_FREQ        0xFFFE
++#define MDFLD_BLC_MIN_PWM_REG_FREQ        0x2
++
++#define MDFLD_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++#define MDFLD_BACKLIGHT_PWM_CTL_SHIFT	(16)
++
++static struct backlight_device *mdfld_backlight_device;
++
++int mdfld_set_brightness(struct backlight_device *bd)
++{
++	struct drm_device *dev =
++		(struct drm_device *)bl_get_data(mdfld_backlight_device);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int level = bd->props.brightness;
++
++	DRM_DEBUG_DRIVER("backlight level set to %d\n", level);
++
++	/* Perform value bounds checking */
++	if (level < BRIGHTNESS_MIN_LEVEL)
++		level = BRIGHTNESS_MIN_LEVEL;
++
++	if (gma_power_begin(dev, false)) {
++		u32 adjusted_level = 0;
++
++		/*
++		 * Adjust the backlight level with the percent in
++		 * dev_priv->blc_adj2
++		 */
++		adjusted_level = level * dev_priv->blc_adj2;
++		adjusted_level = adjusted_level / BLC_ADJUSTMENT_MAX;
++		dev_priv->brightness_adjusted = adjusted_level;
++
++		if (mdfld_get_panel_type(dev, 0) == TC35876X) {
++			if (dev_priv->dpi_panel_on[0] ||
++					dev_priv->dpi_panel_on[2])
++				tc35876x_brightness_control(dev,
++						dev_priv->brightness_adjusted);
++		} else {
++			if (dev_priv->dpi_panel_on[0])
++				mdfld_dsi_brightness_control(dev, 0,
++						dev_priv->brightness_adjusted);
++		}
++
++		if (dev_priv->dpi_panel_on[2])
++			mdfld_dsi_brightness_control(dev, 2,
++					dev_priv->brightness_adjusted);
++		gma_power_end(dev);
++	}
++
++	/* cache the brightness for later use */
++	dev_priv->brightness = level;
++	return 0;
++}
++
++static int mdfld_get_brightness(struct backlight_device *bd)
++{
++	struct drm_device *dev =
++		(struct drm_device *)bl_get_data(mdfld_backlight_device);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	DRM_DEBUG_DRIVER("brightness = 0x%x \n", dev_priv->brightness);
++
++	/* return locally cached var instead of HW read (due to DPST etc.) */
++	return dev_priv->brightness;
++}
++
++static const struct backlight_ops mdfld_ops = {
++	.get_brightness = mdfld_get_brightness,
++	.update_status  = mdfld_set_brightness,
++};
++
++static int device_backlight_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = (struct drm_psb_private *)
++		dev->dev_private;
++
++	dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
++	dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
++
++	return 0;
++}
++
++static int mdfld_backlight_init(struct drm_device *dev)
++{
++	struct backlight_properties props;
++	int ret = 0;
++
++	memset(&props, 0, sizeof(struct backlight_properties));
++	props.max_brightness = BRIGHTNESS_MAX_LEVEL;
++	props.type = BACKLIGHT_PLATFORM;
++	mdfld_backlight_device = backlight_device_register("mdfld-bl",
++				NULL, (void *)dev, &mdfld_ops, &props);
++
++	if (IS_ERR(mdfld_backlight_device))
++		return PTR_ERR(mdfld_backlight_device);
++
++	ret = device_backlight_init(dev);
++	if (ret)
++		return ret;
++
++	mdfld_backlight_device->props.brightness = BRIGHTNESS_MAX_LEVEL;
++	mdfld_backlight_device->props.max_brightness = BRIGHTNESS_MAX_LEVEL;
++	backlight_update_status(mdfld_backlight_device);
++	return 0;
++}
++#endif
++
++struct backlight_device *mdfld_get_backlight_device(void)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++	return mdfld_backlight_device;
++#else
++	return NULL;
++#endif
++}
++
++/*
++ * mdfld_save_display_registers
++ *
++ * Description: We are going to suspend so save current display
++ * register state.
++ *
++ * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
++ */
++static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct medfield_state *regs = &dev_priv->regs.mdfld;
++	int i;
++
++	/* register */
++	u32 dpll_reg = MRST_DPLL_A;
++	u32 fp_reg = MRST_FPA0;
++	u32 pipeconf_reg = PIPEACONF;
++	u32 htot_reg = HTOTAL_A;
++	u32 hblank_reg = HBLANK_A;
++	u32 hsync_reg = HSYNC_A;
++	u32 vtot_reg = VTOTAL_A;
++	u32 vblank_reg = VBLANK_A;
++	u32 vsync_reg = VSYNC_A;
++	u32 pipesrc_reg = PIPEASRC;
++	u32 dspstride_reg = DSPASTRIDE;
++	u32 dsplinoff_reg = DSPALINOFF;
++	u32 dsptileoff_reg = DSPATILEOFF;
++	u32 dspsize_reg = DSPASIZE;
++	u32 dsppos_reg = DSPAPOS;
++	u32 dspsurf_reg = DSPASURF;
++	u32 mipi_reg = MIPI;
++	u32 dspcntr_reg = DSPACNTR;
++	u32 dspstatus_reg = PIPEASTAT;
++	u32 palette_reg = PALETTE_A;
++
++	/* pointer to values */
++	u32 *dpll_val = &regs->saveDPLL_A;
++	u32 *fp_val = &regs->saveFPA0;
++	u32 *pipeconf_val = &regs->savePIPEACONF;
++	u32 *htot_val = &regs->saveHTOTAL_A;
++	u32 *hblank_val = &regs->saveHBLANK_A;
++	u32 *hsync_val = &regs->saveHSYNC_A;
++	u32 *vtot_val = &regs->saveVTOTAL_A;
++	u32 *vblank_val = &regs->saveVBLANK_A;
++	u32 *vsync_val = &regs->saveVSYNC_A;
++	u32 *pipesrc_val = &regs->savePIPEASRC;
++	u32 *dspstride_val = &regs->saveDSPASTRIDE;
++	u32 *dsplinoff_val = &regs->saveDSPALINOFF;
++	u32 *dsptileoff_val = &regs->saveDSPATILEOFF;
++	u32 *dspsize_val = &regs->saveDSPASIZE;
++	u32 *dsppos_val = &regs->saveDSPAPOS;
++	u32 *dspsurf_val = &regs->saveDSPASURF;
++	u32 *mipi_val = &regs->saveMIPI;
++	u32 *dspcntr_val = &regs->saveDSPACNTR;
++	u32 *dspstatus_val = &regs->saveDSPASTATUS;
++	u32 *palette_val = regs->save_palette_a;
++
++	switch (pipe) {
++	case 0:
++		break;
++	case 1:
++		/* regester */
++		dpll_reg = MDFLD_DPLL_B;
++		fp_reg = MDFLD_DPLL_DIV0;
++		pipeconf_reg = PIPEBCONF;
++		htot_reg = HTOTAL_B;
++		hblank_reg = HBLANK_B;
++		hsync_reg = HSYNC_B;
++		vtot_reg = VTOTAL_B;
++		vblank_reg = VBLANK_B;
++		vsync_reg = VSYNC_B;
++		pipesrc_reg = PIPEBSRC;
++		dspstride_reg = DSPBSTRIDE;
++		dsplinoff_reg = DSPBLINOFF;
++		dsptileoff_reg = DSPBTILEOFF;
++		dspsize_reg = DSPBSIZE;
++		dsppos_reg = DSPBPOS;
++		dspsurf_reg = DSPBSURF;
++		dspcntr_reg = DSPBCNTR;
++		dspstatus_reg = PIPEBSTAT;
++		palette_reg = PALETTE_B;
++
++		/* values */
++		dpll_val = &regs->saveDPLL_B;
++		fp_val = &regs->saveFPB0;
++		pipeconf_val = &regs->savePIPEBCONF;
++		htot_val = &regs->saveHTOTAL_B;
++		hblank_val = &regs->saveHBLANK_B;
++		hsync_val = &regs->saveHSYNC_B;
++		vtot_val = &regs->saveVTOTAL_B;
++		vblank_val = &regs->saveVBLANK_B;
++		vsync_val = &regs->saveVSYNC_B;
++		pipesrc_val = &regs->savePIPEBSRC;
++		dspstride_val = &regs->saveDSPBSTRIDE;
++		dsplinoff_val = &regs->saveDSPBLINOFF;
++		dsptileoff_val = &regs->saveDSPBTILEOFF;
++		dspsize_val = &regs->saveDSPBSIZE;
++		dsppos_val = &regs->saveDSPBPOS;
++		dspsurf_val = &regs->saveDSPBSURF;
++		dspcntr_val = &regs->saveDSPBCNTR;
++		dspstatus_val = &regs->saveDSPBSTATUS;
++		palette_val = regs->save_palette_b;
++		break;
++	case 2:
++		/* register */
++		pipeconf_reg = PIPECCONF;
++		htot_reg = HTOTAL_C;
++		hblank_reg = HBLANK_C;
++		hsync_reg = HSYNC_C;
++		vtot_reg = VTOTAL_C;
++		vblank_reg = VBLANK_C;
++		vsync_reg = VSYNC_C;
++		pipesrc_reg = PIPECSRC;
++		dspstride_reg = DSPCSTRIDE;
++		dsplinoff_reg = DSPCLINOFF;
++		dsptileoff_reg = DSPCTILEOFF;
++		dspsize_reg = DSPCSIZE;
++		dsppos_reg = DSPCPOS;
++		dspsurf_reg = DSPCSURF;
++		mipi_reg = MIPI_C;
++		dspcntr_reg = DSPCCNTR;
++		dspstatus_reg = PIPECSTAT;
++		palette_reg = PALETTE_C;
++
++		/* pointer to values */
++		pipeconf_val = &regs->savePIPECCONF;
++		htot_val = &regs->saveHTOTAL_C;
++		hblank_val = &regs->saveHBLANK_C;
++		hsync_val = &regs->saveHSYNC_C;
++		vtot_val = &regs->saveVTOTAL_C;
++		vblank_val = &regs->saveVBLANK_C;
++		vsync_val = &regs->saveVSYNC_C;
++		pipesrc_val = &regs->savePIPECSRC;
++		dspstride_val = &regs->saveDSPCSTRIDE;
++		dsplinoff_val = &regs->saveDSPCLINOFF;
++		dsptileoff_val = &regs->saveDSPCTILEOFF;
++		dspsize_val = &regs->saveDSPCSIZE;
++		dsppos_val = &regs->saveDSPCPOS;
++		dspsurf_val = &regs->saveDSPCSURF;
++		mipi_val = &regs->saveMIPI_C;
++		dspcntr_val = &regs->saveDSPCCNTR;
++		dspstatus_val = &regs->saveDSPCSTATUS;
++		palette_val = regs->save_palette_c;
++		break;
++	default:
++		DRM_ERROR("%s, invalid pipe number.\n", __func__);
++		return -EINVAL;
++	}
++
++	/* Pipe & plane A info */
++	*dpll_val = PSB_RVDC32(dpll_reg);
++	*fp_val = PSB_RVDC32(fp_reg);
++	*pipeconf_val = PSB_RVDC32(pipeconf_reg);
++	*htot_val = PSB_RVDC32(htot_reg);
++	*hblank_val = PSB_RVDC32(hblank_reg);
++	*hsync_val = PSB_RVDC32(hsync_reg);
++	*vtot_val = PSB_RVDC32(vtot_reg);
++	*vblank_val = PSB_RVDC32(vblank_reg);
++	*vsync_val = PSB_RVDC32(vsync_reg);
++	*pipesrc_val = PSB_RVDC32(pipesrc_reg);
++	*dspstride_val = PSB_RVDC32(dspstride_reg);
++	*dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
++	*dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
++	*dspsize_val = PSB_RVDC32(dspsize_reg);
++	*dsppos_val = PSB_RVDC32(dsppos_reg);
++	*dspsurf_val = PSB_RVDC32(dspsurf_reg);
++	*dspcntr_val = PSB_RVDC32(dspcntr_reg);
++	*dspstatus_val = PSB_RVDC32(dspstatus_reg);
++
++	/*save palette (gamma) */
++	for (i = 0; i < 256; i++)
++		palette_val[i] = PSB_RVDC32(palette_reg + (i << 2));
++
++	if (pipe == 1) {
++		regs->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
++		regs->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
++
++		regs->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
++		regs->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
++		return 0;
++	}
++
++	*mipi_val = PSB_RVDC32(mipi_reg);
++	return 0;
++}
++
++/*
++ * mdfld_restore_display_registers
++ *
++ * Description: We are going to resume so restore display register state.
++ *
++ * Notes: FIXME_JLIU7 need to add the support for DPI MIPI & HDMI audio
++ */
++static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
++{
++	/* To get  panel out of ULPS mode. */
++	u32 temp = 0;
++	u32 device_ready_reg = DEVICE_READY_REG;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct mdfld_dsi_config *dsi_config = NULL;
++	struct medfield_state *regs = &dev_priv->regs.mdfld;
++	u32 i = 0;
++	u32 dpll = 0;
++	u32 timeout = 0;
++
++	/* regester */
++	u32 dpll_reg = MRST_DPLL_A;
++	u32 fp_reg = MRST_FPA0;
++	u32 pipeconf_reg = PIPEACONF;
++	u32 htot_reg = HTOTAL_A;
++	u32 hblank_reg = HBLANK_A;
++	u32 hsync_reg = HSYNC_A;
++	u32 vtot_reg = VTOTAL_A;
++	u32 vblank_reg = VBLANK_A;
++	u32 vsync_reg = VSYNC_A;
++	u32 pipesrc_reg = PIPEASRC;
++	u32 dspstride_reg = DSPASTRIDE;
++	u32 dsplinoff_reg = DSPALINOFF;
++	u32 dsptileoff_reg = DSPATILEOFF;
++	u32 dspsize_reg = DSPASIZE;
++	u32 dsppos_reg = DSPAPOS;
++	u32 dspsurf_reg = DSPASURF;
++	u32 dspstatus_reg = PIPEASTAT;
++	u32 mipi_reg = MIPI;
++	u32 dspcntr_reg = DSPACNTR;
++	u32 palette_reg = PALETTE_A;
++
++	/* values */
++	u32 dpll_val = regs->saveDPLL_A & ~DPLL_VCO_ENABLE;
++	u32 fp_val = regs->saveFPA0;
++	u32 pipeconf_val = regs->savePIPEACONF;
++	u32 htot_val = regs->saveHTOTAL_A;
++	u32 hblank_val = regs->saveHBLANK_A;
++	u32 hsync_val = regs->saveHSYNC_A;
++	u32 vtot_val = regs->saveVTOTAL_A;
++	u32 vblank_val = regs->saveVBLANK_A;
++	u32 vsync_val = regs->saveVSYNC_A;
++	u32 pipesrc_val = regs->savePIPEASRC;
++	u32 dspstride_val = regs->saveDSPASTRIDE;
++	u32 dsplinoff_val = regs->saveDSPALINOFF;
++	u32 dsptileoff_val = regs->saveDSPATILEOFF;
++	u32 dspsize_val = regs->saveDSPASIZE;
++	u32 dsppos_val = regs->saveDSPAPOS;
++	u32 dspsurf_val = regs->saveDSPASURF;
++	u32 dspstatus_val = regs->saveDSPASTATUS;
++	u32 mipi_val = regs->saveMIPI;
++	u32 dspcntr_val = regs->saveDSPACNTR;
++	u32 *palette_val = regs->save_palette_a;
++
++	switch (pipe) {
++	case 0:
++		dsi_config = dev_priv->dsi_configs[0];
++		break;
++	case 1:
++		/* regester */
++		dpll_reg = MDFLD_DPLL_B;
++		fp_reg = MDFLD_DPLL_DIV0;
++		pipeconf_reg = PIPEBCONF;
++		htot_reg = HTOTAL_B;
++		hblank_reg = HBLANK_B;
++		hsync_reg = HSYNC_B;
++		vtot_reg = VTOTAL_B;
++		vblank_reg = VBLANK_B;
++		vsync_reg = VSYNC_B;
++		pipesrc_reg = PIPEBSRC;
++		dspstride_reg = DSPBSTRIDE;
++		dsplinoff_reg = DSPBLINOFF;
++		dsptileoff_reg = DSPBTILEOFF;
++		dspsize_reg = DSPBSIZE;
++		dsppos_reg = DSPBPOS;
++		dspsurf_reg = DSPBSURF;
++		dspcntr_reg = DSPBCNTR;
++		dspstatus_reg = PIPEBSTAT;
++		palette_reg = PALETTE_B;
++
++		/* values */
++		dpll_val = regs->saveDPLL_B & ~DPLL_VCO_ENABLE;
++		fp_val = regs->saveFPB0;
++		pipeconf_val = regs->savePIPEBCONF;
++		htot_val = regs->saveHTOTAL_B;
++		hblank_val = regs->saveHBLANK_B;
++		hsync_val = regs->saveHSYNC_B;
++		vtot_val = regs->saveVTOTAL_B;
++		vblank_val = regs->saveVBLANK_B;
++		vsync_val = regs->saveVSYNC_B;
++		pipesrc_val = regs->savePIPEBSRC;
++		dspstride_val = regs->saveDSPBSTRIDE;
++		dsplinoff_val = regs->saveDSPBLINOFF;
++		dsptileoff_val = regs->saveDSPBTILEOFF;
++		dspsize_val = regs->saveDSPBSIZE;
++		dsppos_val = regs->saveDSPBPOS;
++		dspsurf_val = regs->saveDSPBSURF;
++		dspcntr_val = regs->saveDSPBCNTR;
++		dspstatus_val = regs->saveDSPBSTATUS;
++		palette_val = regs->save_palette_b;
++		break;
++	case 2:
++		/* regester */
++		pipeconf_reg = PIPECCONF;
++		htot_reg = HTOTAL_C;
++		hblank_reg = HBLANK_C;
++		hsync_reg = HSYNC_C;
++		vtot_reg = VTOTAL_C;
++		vblank_reg = VBLANK_C;
++		vsync_reg = VSYNC_C;
++		pipesrc_reg = PIPECSRC;
++		dspstride_reg = DSPCSTRIDE;
++		dsplinoff_reg = DSPCLINOFF;
++		dsptileoff_reg = DSPCTILEOFF;
++		dspsize_reg = DSPCSIZE;
++		dsppos_reg = DSPCPOS;
++		dspsurf_reg = DSPCSURF;
++		mipi_reg = MIPI_C;
++		dspcntr_reg = DSPCCNTR;
++		dspstatus_reg = PIPECSTAT;
++		palette_reg = PALETTE_C;
++
++		/* values */
++		pipeconf_val = regs->savePIPECCONF;
++		htot_val = regs->saveHTOTAL_C;
++		hblank_val = regs->saveHBLANK_C;
++		hsync_val = regs->saveHSYNC_C;
++		vtot_val = regs->saveVTOTAL_C;
++		vblank_val = regs->saveVBLANK_C;
++		vsync_val = regs->saveVSYNC_C;
++		pipesrc_val = regs->savePIPECSRC;
++		dspstride_val = regs->saveDSPCSTRIDE;
++		dsplinoff_val = regs->saveDSPCLINOFF;
++		dsptileoff_val = regs->saveDSPCTILEOFF;
++		dspsize_val = regs->saveDSPCSIZE;
++		dsppos_val = regs->saveDSPCPOS;
++		dspsurf_val = regs->saveDSPCSURF;
++		mipi_val = regs->saveMIPI_C;
++		dspcntr_val = regs->saveDSPCCNTR;
++		dspstatus_val = regs->saveDSPCSTATUS;
++		palette_val = regs->save_palette_c;
++
++		dsi_config = dev_priv->dsi_configs[1];
++		break;
++	default:
++		DRM_ERROR("%s, invalid pipe number.\n", __func__);
++		return -EINVAL;
++	}
++
++	/*make sure VGA plane is off. it initializes to on after reset!*/
++	PSB_WVDC32(0x80000000, VGACNTRL);
++
++	if (pipe == 1) {
++		PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
++		PSB_RVDC32(dpll_reg);
++
++		PSB_WVDC32(fp_val, fp_reg);
++	} else {
++
++		dpll = PSB_RVDC32(dpll_reg);
++
++		if (!(dpll & DPLL_VCO_ENABLE)) {
++
++			/* When ungating power of DPLL, needs to wait 0.5us
++			   before enable the VCO */
++			if (dpll & MDFLD_PWR_GATE_EN) {
++				dpll &= ~MDFLD_PWR_GATE_EN;
++				PSB_WVDC32(dpll, dpll_reg);
++				/* FIXME_MDFLD PO - change 500 to 1 after PO */
++				udelay(500);
++			}
++
++			PSB_WVDC32(fp_val, fp_reg);
++			PSB_WVDC32(dpll_val, dpll_reg);
++			/* FIXME_MDFLD PO - change 500 to 1 after PO */
++			udelay(500);
++
++			dpll_val |= DPLL_VCO_ENABLE;
++			PSB_WVDC32(dpll_val, dpll_reg);
++			PSB_RVDC32(dpll_reg);
++
++			/* wait for DSI PLL to lock */
++			while (timeout < 20000 &&
++			  !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
++				udelay(150);
++				timeout++;
++			}
++
++			if (timeout == 20000) {
++				DRM_ERROR("%s, can't lock DSIPLL.\n",
++								__func__);
++				return -EINVAL;
++			}
++		}
++	}
++	/* Restore mode */
++	PSB_WVDC32(htot_val, htot_reg);
++	PSB_WVDC32(hblank_val, hblank_reg);
++	PSB_WVDC32(hsync_val, hsync_reg);
++	PSB_WVDC32(vtot_val, vtot_reg);
++	PSB_WVDC32(vblank_val, vblank_reg);
++	PSB_WVDC32(vsync_val, vsync_reg);
++	PSB_WVDC32(pipesrc_val, pipesrc_reg);
++	PSB_WVDC32(dspstatus_val, dspstatus_reg);
++
++	/*set up the plane*/
++	PSB_WVDC32(dspstride_val, dspstride_reg);
++	PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
++	PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
++	PSB_WVDC32(dspsize_val, dspsize_reg);
++	PSB_WVDC32(dsppos_val, dsppos_reg);
++	PSB_WVDC32(dspsurf_val, dspsurf_reg);
++
++	if (pipe == 1) {
++		/* restore palette (gamma) */
++		/*DRM_UDELAY(50000); */
++		for (i = 0; i < 256; i++)
++			PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
++
++		PSB_WVDC32(regs->savePFIT_CONTROL, PFIT_CONTROL);
++		PSB_WVDC32(regs->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
++
++		/*TODO: resume HDMI port */
++
++		/*TODO: resume pipe*/
++
++		/*enable the plane*/
++		PSB_WVDC32(dspcntr_val & ~DISPLAY_PLANE_ENABLE, dspcntr_reg);
++
++		return 0;
++	}
++
++	/*set up pipe related registers*/
++	PSB_WVDC32(mipi_val, mipi_reg);
++
++	/*setup MIPI adapter + MIPI IP registers*/
++	if (dsi_config)
++		mdfld_dsi_controller_init(dsi_config, pipe);
++
++	if (in_atomic() || in_interrupt())
++		mdelay(20);
++	else
++		msleep(20);
++
++	/*enable the plane*/
++	PSB_WVDC32(dspcntr_val, dspcntr_reg);
++
++	if (in_atomic() || in_interrupt())
++		mdelay(20);
++	else
++		msleep(20);
++
++	/* LP Hold Release */
++	temp = REG_READ(mipi_reg);
++	temp |= LP_OUTPUT_HOLD_RELEASE;
++	REG_WRITE(mipi_reg, temp);
++	mdelay(1);
++
++
++	/* Set DSI host to exit from Utra Low Power State */
++	temp = REG_READ(device_ready_reg);
++	temp &= ~ULPS_MASK;
++	temp |= 0x3;
++	temp |= EXIT_ULPS_DEV_READY;
++	REG_WRITE(device_ready_reg, temp);
++	mdelay(1);
++
++	temp = REG_READ(device_ready_reg);
++	temp &= ~ULPS_MASK;
++	temp |= EXITING_ULPS;
++	REG_WRITE(device_ready_reg, temp);
++	mdelay(1);
++
++	/*enable the pipe*/
++	PSB_WVDC32(pipeconf_val, pipeconf_reg);
++
++	/* restore palette (gamma) */
++	/*DRM_UDELAY(50000); */
++	for (i = 0; i < 256; i++)
++		PSB_WVDC32(palette_val[i], palette_reg + (i << 2));
++
++	return 0;
++}
++
++static int mdfld_save_registers(struct drm_device *dev)
++{
++	/* mdfld_save_cursor_overlay_registers(dev); */
++	mdfld_save_display_registers(dev, 0);
++	mdfld_save_display_registers(dev, 2);
++	mdfld_disable_crtc(dev, 0);
++	mdfld_disable_crtc(dev, 2);
++
++	return 0;
++}
++
++static int mdfld_restore_registers(struct drm_device *dev)
++{
++	mdfld_restore_display_registers(dev, 2);
++	mdfld_restore_display_registers(dev, 0);
++	/* mdfld_restore_cursor_overlay_registers(dev); */
++
++	return 0;
++}
++
++static int mdfld_power_down(struct drm_device *dev)
++{
++	/* FIXME */
++	return 0;
++}
++
++static int mdfld_power_up(struct drm_device *dev)
++{
++	/* FIXME */
++	return 0;
++}
++
++const struct psb_ops mdfld_chip_ops = {
++	.name = "mdfld",
++	.accel_2d = 0,
++	.pipes = 3,
++	.crtcs = 3,
++	.sgx_offset = MRST_SGX_OFFSET,
++
++	.chip_setup = mid_chip_setup,
++	.crtc_helper = &mdfld_helper_funcs,
++	.crtc_funcs = &psb_intel_crtc_funcs,
++
++	.output_init = mdfld_output_init,
++
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++	.backlight_init = mdfld_backlight_init,
++#endif
++
++	.save_regs = mdfld_save_registers,
++	.restore_regs = mdfld_restore_registers,
++	.power_down = mdfld_power_down,
++	.power_up = mdfld_power_up,
++};
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+new file mode 100644
+index 0000000..d52358b
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+@@ -0,0 +1,1017 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu at intel.com>
++ * Jackie Li<yaodong.li at intel.com>
++ */
++
++#include "mdfld_dsi_dpi.h"
++#include "mdfld_output.h"
++#include "mdfld_dsi_pkg_sender.h"
++#include "psb_drv.h"
++#include "tc35876x-dsi-lvds.h"
++
++static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
++								int pipe);
++
++static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
++{
++	u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
++	int timeout = 0;
++
++	udelay(500);
++
++	/* This will time out after approximately 2+ seconds */
++	while ((timeout < 20000) &&
++		(REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
++		udelay(100);
++		timeout++;
++	}
++
++	if (timeout == 20000)
++		DRM_INFO("MIPI: HS Data FIFO was never cleared!\n");
++}
++
++static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
++{
++	u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
++	int timeout = 0;
++
++	udelay(500);
++
++	/* This will time out after approximately 2+ seconds */
++	while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg)
++					& DSI_FIFO_GEN_HS_CTRL_FULL)) {
++		udelay(100);
++		timeout++;
++	}
++	if (timeout == 20000)
++		DRM_INFO("MIPI: HS CMD FIFO was never cleared!\n");
++}
++
++static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
++{
++	u32 gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
++	int timeout = 0;
++
++	udelay(500);
++
++	/* This will time out after approximately 2+ seconds */
++	while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) &
++					DPI_FIFO_EMPTY) != DPI_FIFO_EMPTY)) {
++		udelay(100);
++		timeout++;
++	}
++
++	if (timeout == 20000)
++		DRM_ERROR("MIPI: DPI FIFO was never cleared\n");
++}
++
++static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
++{
++	u32 intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
++	int timeout = 0;
++
++	udelay(500);
++
++	/* This will time out after approximately 2+ seconds */
++	while ((timeout < 20000) && (!(REG_READ(intr_stat_reg)
++					& DSI_INTR_STATE_SPL_PKG_SENT))) {
++		udelay(100);
++		timeout++;
++	}
++
++	if (timeout == 20000)
++                DRM_ERROR("MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
++}
++
++/* For TC35876X */
++
++static void dsi_set_device_ready_state(struct drm_device *dev, int state,
++				int pipe)
++{
++	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), !!state, 0, 0);
++}
++
++static void dsi_set_pipe_plane_enable_state(struct drm_device *dev,
++							int state, int pipe)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 pipeconf_reg = PIPEACONF;
++	u32 dspcntr_reg = DSPACNTR;
++
++	u32 dspcntr = dev_priv->dspcntr[pipe];
++	u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
++
++	if (pipe) {
++		pipeconf_reg = PIPECCONF;
++		dspcntr_reg = DSPCCNTR;
++	} else
++		mipi &= (~0x03);
++
++	if (state) {
++		/*Set up pipe */
++		REG_WRITE(pipeconf_reg, BIT(31));
++
++		if (REG_BIT_WAIT(pipeconf_reg, 1, 30))
++			dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n",
++				__func__);
++
++		/*Set up display plane */
++		REG_WRITE(dspcntr_reg, dspcntr);
++	} else {
++		u32 dspbase_reg = pipe ? MDFLD_DSPCBASE : MRST_DSPABASE;
++
++		/* Put DSI lanes to ULPS to disable pipe */
++		REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 2, 2, 1);
++		REG_READ(MIPI_DEVICE_READY_REG(pipe)); /* posted write? */
++
++		/* LP Hold */
++		REG_FLD_MOD(MIPI_PORT_CONTROL(pipe), 0, 16, 16);
++		REG_READ(MIPI_PORT_CONTROL(pipe)); /* posted write? */
++
++		/* Disable display plane */
++		REG_FLD_MOD(dspcntr_reg, 0, 31, 31);
++
++		/* Flush the plane changes ??? posted write? */
++		REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++		REG_READ(dspbase_reg);
++
++		/* Disable PIPE */
++		REG_FLD_MOD(pipeconf_reg, 0, 31, 31);
++
++		if (REG_BIT_WAIT(pipeconf_reg, 0, 30))
++			dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n",
++				__func__);
++
++		if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28))
++			dev_err(&dev->pdev->dev, "%s: FIFO not empty\n",
++				__func__);
++	}
++}
++
++static void mdfld_dsi_configure_down(struct mdfld_dsi_encoder *dsi_encoder,
++								int pipe)
++{
++	struct mdfld_dsi_dpi_output *dpi_output =
++				MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
++	struct mdfld_dsi_config *dsi_config =
++				mdfld_dsi_encoder_get_config(dsi_encoder);
++	struct drm_device *dev = dsi_config->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (!dev_priv->dpi_panel_on[pipe]) {
++		dev_err(dev->dev, "DPI panel is already off\n");
++		return;
++	}
++	tc35876x_toshiba_bridge_panel_off(dev);
++	tc35876x_set_bridge_reset_state(dev, 1);
++	dsi_set_pipe_plane_enable_state(dev, 0, pipe);
++	mdfld_dsi_dpi_shut_down(dpi_output, pipe);
++	dsi_set_device_ready_state(dev, 0, pipe);
++}
++
++static void mdfld_dsi_configure_up(struct mdfld_dsi_encoder *dsi_encoder,
++								int pipe)
++{
++	struct mdfld_dsi_dpi_output *dpi_output =
++				MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
++	struct mdfld_dsi_config *dsi_config =
++				mdfld_dsi_encoder_get_config(dsi_encoder);
++	struct drm_device *dev = dsi_config->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->dpi_panel_on[pipe]) {
++		dev_err(dev->dev, "DPI panel is already on\n");
++		return;
++	}
++
++	/* For resume path sequence */
++	mdfld_dsi_dpi_shut_down(dpi_output, pipe);
++	dsi_set_device_ready_state(dev, 0, pipe);
++
++	dsi_set_device_ready_state(dev, 1, pipe);
++	tc35876x_set_bridge_reset_state(dev, 0);
++	tc35876x_configure_lvds_bridge(dev);
++	mdfld_dsi_dpi_turn_on(dpi_output, pipe);  /* Send turn on command */
++	dsi_set_pipe_plane_enable_state(dev, 1, pipe);
++}
++/* End for TC35876X */
++
++/* ************************************************************************* *\
++ * FUNCTION: mdfld_dsi_tpo_ic_init
++ *
++ * DESCRIPTION:  This function is called only by mrst_dsi_mode_set and
++ *               restore_display_registers.  since this function does not
++ *               acquire the mutex, it is important that the calling function
++ *               does!
++\* ************************************************************************* */
++static void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
++{
++	struct drm_device *dev = dsi_config->dev;
++	u32 dcsChannelNumber = dsi_config->channel_num;
++	u32 gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
++	u32 gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
++	u32 gen_ctrl_val = GEN_LONG_WRITE;
++
++	DRM_INFO("Enter mrst init TPO MIPI display.\n");
++
++	gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
++
++	/* Flip page order */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x00008036);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
++
++	/* 0xF0 */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x005a5af0);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
++
++	/* Write protection key */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x005a5af1);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
++
++	/* 0xFC */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x005a5afc);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
++
++	/* 0xB7 */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x770000b7);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x00000044);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
++
++	/* 0xB6 */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x000a0ab6);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
++
++	/* 0xF2 */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x081010f2);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x4a070708);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x000000c5);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
++
++	/* 0xF8 */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x024003f8);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x01030a04);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x0e020220);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x00000004);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
++
++	/* 0xE2 */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x398fc3e2);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x0000916f);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
++
++	/* 0xB0 */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x000000b0);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
++
++	/* 0xF4 */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x240242f4);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x78ee2002);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x2a071050);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x507fee10);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x10300710);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
++
++	/* 0xBA */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x19fe07ba);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x101c0a31);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x00000010);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
++
++	/* 0xBB */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x28ff07bb);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x24280a31);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x00000034);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
++
++	/* 0xFB */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x535d05fb);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x1b1a2130);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x221e180e);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x131d2120);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x535d0508);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x1c1a2131);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x231f160d);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x111b2220);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x535c2008);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x1f1d2433);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x2c251a10);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x2c34372d);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x00000023);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
++
++	/* 0xFA */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x525c0bfa);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x1c1c232f);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x2623190e);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x18212625);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x545d0d0e);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x1e1d2333);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x26231a10);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x1a222725);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x545d280f);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x21202635);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x31292013);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x31393d33);
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x00000029);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
++
++	/* Set DM */
++	mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
++	REG_WRITE(gen_data_reg, 0x000100f7);
++	mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
++	REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
++}
++
++static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
++						int num_lane, int bpp)
++{
++	return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
++}
++
++/*
++ * Calculate the dpi time basing on a given drm mode @mode
++ * return 0 on success.
++ * FIXME: I was using proposed mode value for calculation, may need to
++ * use crtc mode values later
++ */
++int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
++				struct mdfld_dsi_dpi_timing *dpi_timing,
++				int num_lane, int bpp)
++{
++	int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
++	int pclk_vsync, pclk_vfp, pclk_vbp;
++
++	pclk_hactive = mode->hdisplay;
++	pclk_hfp = mode->hsync_start - mode->hdisplay;
++	pclk_hsync = mode->hsync_end - mode->hsync_start;
++	pclk_hbp = mode->htotal - mode->hsync_end;
++
++	pclk_vfp = mode->vsync_start - mode->vdisplay;
++	pclk_vsync = mode->vsync_end - mode->vsync_start;
++	pclk_vbp = mode->vtotal - mode->vsync_end;
++
++	/*
++	 * byte clock counts were calculated by following formula
++	 * bclock_count = pclk_count * bpp / num_lane / 8
++	 */
++	dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(
++						pclk_hsync, num_lane, bpp);
++	dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(
++						pclk_hbp, num_lane, bpp);
++	dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(
++						pclk_hfp, num_lane, bpp);
++	dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(
++						pclk_hactive, num_lane, bpp);
++	dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(
++						pclk_vsync, num_lane, bpp);
++	dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(
++						pclk_vbp, num_lane, bpp);
++	dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(
++						pclk_vfp, num_lane, bpp);
++
++	return 0;
++}
++
++void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
++								int pipe)
++{
++	struct drm_device *dev = dsi_config->dev;
++	int lane_count = dsi_config->lane_count;
++	struct mdfld_dsi_dpi_timing dpi_timing;
++	struct drm_display_mode *mode = dsi_config->mode;
++	u32 val;
++
++	/*un-ready device*/
++	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 0, 0, 0);
++
++	/*init dsi adapter before kicking off*/
++	REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
++
++	/*enable all interrupts*/
++	REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
++
++	/*set up func_prg*/
++	val = lane_count;
++	val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
++
++	switch (dsi_config->bpp) {
++	case 16:
++		val |= DSI_DPI_COLOR_FORMAT_RGB565;
++		break;
++	case 18:
++		val |= DSI_DPI_COLOR_FORMAT_RGB666;
++		break;
++	case 24:
++		val |= DSI_DPI_COLOR_FORMAT_RGB888;
++		break;
++	default:
++		DRM_ERROR("unsupported color format, bpp = %d\n",
++							dsi_config->bpp);
++	}
++	REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), val);
++
++	REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe),
++			(mode->vtotal * mode->htotal * dsi_config->bpp /
++				(8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
++	REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe),
++				0xffff & DSI_LP_RX_TIMEOUT_MASK);
++
++	/*max value: 20 clock cycles of txclkesc*/
++	REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe),
++				0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
++
++	/*min 21 txclkesc, max: ffffh*/
++	REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe),
++				0xffff & DSI_RESET_TIMER_MASK);
++
++	REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
++				mode->vdisplay << 16 | mode->hdisplay);
++
++	/*set DPI timing registers*/
++	mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
++				dsi_config->lane_count, dsi_config->bpp);
++
++	REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
++			dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
++			dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
++			dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
++			dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
++			dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
++			dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
++			dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
++
++	REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x46);
++
++	/*min: 7d0 max: 4e20*/
++	REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0x000007d0);
++
++	/*set up video mode*/
++	val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
++	REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), val);
++
++	REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
++
++	REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
++
++	/*TODO: figure out how to setup these registers*/
++	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
++		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
++	else
++		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150c3408);
++
++	REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
++
++	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
++		tc35876x_set_bridge_reset_state(dev, 0);  /*Pull High Reset */
++
++	/*set device ready*/
++	REG_FLD_MOD(MIPI_DEVICE_READY_REG(pipe), 1, 0, 0);
++}
++
++void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
++{
++	struct drm_device *dev = output->dev;
++
++	/* clear special packet sent bit */
++	if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
++		REG_WRITE(MIPI_INTR_STAT_REG(pipe),
++					DSI_INTR_STATE_SPL_PKG_SENT);
++
++	/*send turn on package*/
++	REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_TURN_ON);
++
++	/*wait for SPL_PKG_SENT interrupt*/
++	mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
++
++	if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
++		REG_WRITE(MIPI_INTR_STAT_REG(pipe),
++					DSI_INTR_STATE_SPL_PKG_SENT);
++
++	output->panel_on = 1;
++
++	/* FIXME the following is disabled to WA the X slow start issue
++	   for TMD panel
++	if (pipe == 2)
++		dev_priv->dpi_panel_on2 = true;
++	else if (pipe == 0)
++		dev_priv->dpi_panel_on = true; */
++}
++
++static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output,
++								int pipe)
++{
++	struct drm_device *dev = output->dev;
++
++	/*if output is on, or mode setting didn't happen, ignore this*/
++	if ((!output->panel_on) || output->first_boot) {
++		output->first_boot = 0;
++		return;
++	}
++
++	/* Wait for dpi fifo to empty */
++	mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
++
++	/* Clear the special packet interrupt bit if set */
++	if (REG_READ(MIPI_INTR_STAT_REG(pipe)) & DSI_INTR_STATE_SPL_PKG_SENT)
++		REG_WRITE(MIPI_INTR_STAT_REG(pipe),
++					DSI_INTR_STATE_SPL_PKG_SENT);
++
++	if (REG_READ(MIPI_DPI_CONTROL_REG(pipe)) == DSI_DPI_CTRL_HS_SHUTDOWN)
++		goto shutdown_out;
++
++	REG_WRITE(MIPI_DPI_CONTROL_REG(pipe), DSI_DPI_CTRL_HS_SHUTDOWN);
++
++shutdown_out:
++	output->panel_on = 0;
++	output->first_boot = 0;
++
++	/* FIXME the following is disabled to WA the X slow start issue
++	   for TMD panel
++	if (pipe == 2)
++		dev_priv->dpi_panel_on2 = false;
++	else if (pipe == 0)
++		dev_priv->dpi_panel_on = false;	 */
++}
++
++static void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
++{
++	struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
++	struct mdfld_dsi_dpi_output *dpi_output =
++				MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
++	struct mdfld_dsi_config *dsi_config =
++				mdfld_dsi_encoder_get_config(dsi_encoder);
++	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
++	struct drm_device *dev = dsi_config->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	/*start up display island if it was shutdown*/
++	if (!gma_power_begin(dev, true))
++		return;
++
++	if (on) {
++		if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
++			mdfld_dsi_dpi_turn_on(dpi_output, pipe);
++		else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
++			mdfld_dsi_configure_up(dsi_encoder, pipe);
++		else {
++			/*enable mipi port*/
++			REG_WRITE(MIPI_PORT_CONTROL(pipe),
++				REG_READ(MIPI_PORT_CONTROL(pipe)) | BIT(31));
++			REG_READ(MIPI_PORT_CONTROL(pipe));
++
++			mdfld_dsi_dpi_turn_on(dpi_output, pipe);
++			mdfld_dsi_tpo_ic_init(dsi_config, pipe);
++		}
++		dev_priv->dpi_panel_on[pipe] = true;
++	} else {
++		if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
++			mdfld_dsi_dpi_shut_down(dpi_output, pipe);
++		else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
++			mdfld_dsi_configure_down(dsi_encoder, pipe);
++		else {
++			mdfld_dsi_dpi_shut_down(dpi_output, pipe);
++
++			/*disable mipi port*/
++			REG_WRITE(MIPI_PORT_CONTROL(pipe),
++				REG_READ(MIPI_PORT_CONTROL(pipe)) & ~BIT(31));
++			REG_READ(MIPI_PORT_CONTROL(pipe));
++		}
++		dev_priv->dpi_panel_on[pipe] = false;
++	}
++	gma_power_end(dev);
++}
++
++void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
++{
++	mdfld_dsi_dpi_set_power(encoder, mode == DRM_MODE_DPMS_ON);
++}
++
++bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
++				     struct drm_display_mode *mode,
++				     struct drm_display_mode *adjusted_mode)
++{
++	struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
++	struct mdfld_dsi_config *dsi_config =
++				mdfld_dsi_encoder_get_config(dsi_encoder);
++	struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
++
++	if (fixed_mode) {
++		adjusted_mode->hdisplay = fixed_mode->hdisplay;
++		adjusted_mode->hsync_start = fixed_mode->hsync_start;
++		adjusted_mode->hsync_end = fixed_mode->hsync_end;
++		adjusted_mode->htotal = fixed_mode->htotal;
++		adjusted_mode->vdisplay = fixed_mode->vdisplay;
++		adjusted_mode->vsync_start = fixed_mode->vsync_start;
++		adjusted_mode->vsync_end = fixed_mode->vsync_end;
++		adjusted_mode->vtotal = fixed_mode->vtotal;
++		adjusted_mode->clock = fixed_mode->clock;
++		drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
++	}
++	return true;
++}
++
++void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
++{
++	mdfld_dsi_dpi_set_power(encoder, false);
++}
++
++void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
++{
++	mdfld_dsi_dpi_set_power(encoder, true);
++}
++
++/* For TC35876X */
++/* This functionality was implemented in FW in iCDK */
++/* But removed in DV0 and later. So need to add here. */
++static void mipi_set_properties(struct mdfld_dsi_config *dsi_config, int pipe)
++{
++	struct drm_device *dev = dsi_config->dev;
++
++	REG_WRITE(MIPI_CTRL_REG(pipe), 0x00000018);
++	REG_WRITE(MIPI_INTR_EN_REG(pipe), 0xffffffff);
++	REG_WRITE(MIPI_HS_TX_TIMEOUT_REG(pipe), 0xffffff);
++	REG_WRITE(MIPI_LP_RX_TIMEOUT_REG(pipe), 0xffffff);
++	REG_WRITE(MIPI_TURN_AROUND_TIMEOUT_REG(pipe), 0x14);
++	REG_WRITE(MIPI_DEVICE_RESET_TIMER_REG(pipe), 0xff);
++	REG_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe), 0x25);
++	REG_WRITE(MIPI_INIT_COUNT_REG(pipe), 0xf0);
++	REG_WRITE(MIPI_EOT_DISABLE_REG(pipe), 0x00000000);
++	REG_WRITE(MIPI_LP_BYTECLK_REG(pipe), 0x00000004);
++	REG_WRITE(MIPI_DBI_BW_CTRL_REG(pipe), 0x00000820);
++	REG_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe), (0xa << 16) | 0x14);
++}
++
++static void mdfld_mipi_set_video_timing(struct mdfld_dsi_config *dsi_config,
++					int pipe)
++{
++	struct drm_device *dev = dsi_config->dev;
++	struct mdfld_dsi_dpi_timing dpi_timing;
++	struct drm_display_mode *mode = dsi_config->mode;
++
++	mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing,
++					dsi_config->lane_count,
++					dsi_config->bpp);
++
++	REG_WRITE(MIPI_DPI_RESOLUTION_REG(pipe),
++		mode->vdisplay << 16 | mode->hdisplay);
++	REG_WRITE(MIPI_HSYNC_COUNT_REG(pipe),
++		dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_HBP_COUNT_REG(pipe),
++		dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_HFP_COUNT_REG(pipe),
++		dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_HACTIVE_COUNT_REG(pipe),
++		dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_VSYNC_COUNT_REG(pipe),
++		dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_VBP_COUNT_REG(pipe),
++		dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
++	REG_WRITE(MIPI_VFP_COUNT_REG(pipe),
++		dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
++}
++
++static void mdfld_mipi_config(struct mdfld_dsi_config *dsi_config, int pipe)
++{
++	struct drm_device *dev = dsi_config->dev;
++	int lane_count = dsi_config->lane_count;
++
++	if (pipe) {
++		REG_WRITE(MIPI_PORT_CONTROL(0), 0x00000002);
++		REG_WRITE(MIPI_PORT_CONTROL(2), 0x80000000);
++	} else {
++		REG_WRITE(MIPI_PORT_CONTROL(0), 0x80010000);
++		REG_WRITE(MIPI_PORT_CONTROL(2), 0x00);
++	}
++
++	REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x150A600F);
++	REG_WRITE(MIPI_VIDEO_MODE_FORMAT_REG(pipe), 0x0000000F);
++
++	/* lane_count = 3 */
++	REG_WRITE(MIPI_DSI_FUNC_PRG_REG(pipe), 0x00000200 | lane_count);
++
++	mdfld_mipi_set_video_timing(dsi_config, pipe);
++}
++
++static void mdfld_set_pipe_timing(struct mdfld_dsi_config *dsi_config, int pipe)
++{
++	struct drm_device *dev = dsi_config->dev;
++	struct drm_display_mode *mode = dsi_config->mode;
++
++	REG_WRITE(HTOTAL_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
++	REG_WRITE(HBLANK_A, ((mode->htotal - 1) << 16) | (mode->hdisplay - 1));
++	REG_WRITE(HSYNC_A,
++		((mode->hsync_end - 1) << 16) | (mode->hsync_start - 1));
++
++	REG_WRITE(VTOTAL_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
++	REG_WRITE(VBLANK_A, ((mode->vtotal - 1) << 16) | (mode->vdisplay - 1));
++	REG_WRITE(VSYNC_A,
++		((mode->vsync_end - 1) << 16) | (mode->vsync_start - 1));
++
++	REG_WRITE(PIPEASRC,
++		((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
++}
++/* End for TC35876X */
++
++void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
++				   struct drm_display_mode *mode,
++				   struct drm_display_mode *adjusted_mode)
++{
++	struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
++	struct mdfld_dsi_dpi_output *dpi_output =
++					MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
++	struct mdfld_dsi_config *dsi_config =
++				mdfld_dsi_encoder_get_config(dsi_encoder);
++	struct drm_device *dev = dsi_config->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
++
++	u32 pipeconf_reg = PIPEACONF;
++	u32 dspcntr_reg = DSPACNTR;
++
++	u32 pipeconf = dev_priv->pipeconf[pipe];
++	u32 dspcntr = dev_priv->dspcntr[pipe];
++	u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
++
++	if (pipe) {
++		pipeconf_reg = PIPECCONF;
++		dspcntr_reg = DSPCCNTR;
++	} else {
++		if (mdfld_get_panel_type(dev, pipe) == TC35876X)
++			mipi &= (~0x03); /* Use all four lanes */
++		else
++			mipi |= 2;
++	}
++
++	/*start up display island if it was shutdown*/
++	if (!gma_power_begin(dev, true))
++		return;
++
++	if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
++		/*
++		 * The following logic is required to reset the bridge and
++		 * configure. This also starts the DSI clock at 200MHz.
++		 */
++		tc35876x_set_bridge_reset_state(dev, 0);  /*Pull High Reset */
++		tc35876x_toshiba_bridge_panel_on(dev);
++		udelay(100);
++		/* Now start the DSI clock */
++		REG_WRITE(MRST_DPLL_A, 0x00);
++		REG_WRITE(MRST_FPA0, 0xC1);
++		REG_WRITE(MRST_DPLL_A, 0x00800000);
++		udelay(500);
++		REG_WRITE(MRST_DPLL_A, 0x80800000);
++
++		if (REG_BIT_WAIT(pipeconf_reg, 1, 29))
++			dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n",
++				__func__);
++
++		REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008);
++
++		mipi_set_properties(dsi_config, pipe);
++		mdfld_mipi_config(dsi_config, pipe);
++		mdfld_set_pipe_timing(dsi_config, pipe);
++
++		REG_WRITE(DSPABASE, 0x00);
++		REG_WRITE(DSPASTRIDE, (mode->hdisplay * 4));
++		REG_WRITE(DSPASIZE,
++			((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
++
++		REG_WRITE(DSPACNTR, 0x98000000);
++		REG_WRITE(DSPASURF, 0x00);
++
++		REG_WRITE(VGACNTRL, 0x80000000);
++		REG_WRITE(DEVICE_READY_REG, 0x00000001);
++
++		REG_WRITE(MIPI_PORT_CONTROL(pipe), 0x80810000);
++	} else {
++		/*set up mipi port FIXME: do at init time */
++		REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi);
++	}
++	REG_READ(MIPI_PORT_CONTROL(pipe));
++
++	if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
++		/* NOP */
++	} else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
++		/* set up DSI controller DPI interface */
++		mdfld_dsi_dpi_controller_init(dsi_config, pipe);
++
++		/* Configure MIPI Bridge and Panel */
++		tc35876x_configure_lvds_bridge(dev);
++		dev_priv->dpi_panel_on[pipe] = true;
++	} else {
++		/*turn on DPI interface*/
++		mdfld_dsi_dpi_turn_on(dpi_output, pipe);
++	}
++
++	/*set up pipe*/
++	REG_WRITE(pipeconf_reg, pipeconf);
++	REG_READ(pipeconf_reg);
++
++	/*set up display plane*/
++	REG_WRITE(dspcntr_reg, dspcntr);
++	REG_READ(dspcntr_reg);
++
++	msleep(20); /* FIXME: this should wait for vblank */
++
++	if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
++		/* NOP */
++	} else if (mdfld_get_panel_type(dev, pipe) == TC35876X) {
++		mdfld_dsi_dpi_turn_on(dpi_output, pipe);
++	} else {
++		/* init driver ic */
++		mdfld_dsi_tpo_ic_init(dsi_config, pipe);
++		/*init backlight*/
++		mdfld_dsi_brightness_init(dsi_config, pipe);
++	}
++
++	gma_power_end(dev);
++}
++
++/*
++ * Init DSI DPI encoder.
++ * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
++ * return pointer of newly allocated DPI encoder, NULL on error
++ */
++struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
++				struct mdfld_dsi_connector *dsi_connector,
++				const struct panel_funcs *p_funcs)
++{
++	struct mdfld_dsi_dpi_output *dpi_output = NULL;
++	struct mdfld_dsi_config *dsi_config;
++	struct drm_connector *connector = NULL;
++	struct drm_encoder *encoder = NULL;
++	int pipe;
++	u32 data;
++	int ret;
++
++	pipe = dsi_connector->pipe;
++
++	if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
++		dsi_config = mdfld_dsi_get_config(dsi_connector);
++
++		/* panel hard-reset */
++		if (p_funcs->reset) {
++			ret = p_funcs->reset(pipe);
++			if (ret) {
++				DRM_ERROR("Panel %d hard-reset failed\n", pipe);
++				return NULL;
++			}
++		}
++
++		/* panel drvIC init */
++		if (p_funcs->drv_ic_init)
++			p_funcs->drv_ic_init(dsi_config, pipe);
++
++		/* panel power mode detect */
++		ret = mdfld_dsi_get_power_mode(dsi_config, &data, false);
++		if (ret) {
++			DRM_ERROR("Panel %d get power mode failed\n", pipe);
++			dsi_connector->status = connector_status_disconnected;
++		} else {
++			DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
++			dsi_connector->status = connector_status_connected;
++		}
++	}
++
++	dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
++	if (!dpi_output) {
++		DRM_ERROR("No memory\n");
++		return NULL;
++	}
++
++	if (dsi_connector->pipe)
++		dpi_output->panel_on = 0;
++	else
++		dpi_output->panel_on = 0;
++
++	dpi_output->dev = dev;
++	if (mdfld_get_panel_type(dev, pipe) != TC35876X)
++		dpi_output->p_funcs = p_funcs;
++	dpi_output->first_boot = 1;
++
++	/*get fixed mode*/
++	dsi_config = mdfld_dsi_get_config(dsi_connector);
++
++	/*create drm encoder object*/
++	connector = &dsi_connector->base.base;
++	encoder = &dpi_output->base.base.base;
++	drm_encoder_init(dev,
++			encoder,
++			p_funcs->encoder_funcs,
++			DRM_MODE_ENCODER_LVDS);
++	drm_encoder_helper_add(encoder,
++				p_funcs->encoder_helper_funcs);
++
++	/*attach to given connector*/
++	drm_mode_connector_attach_encoder(connector, encoder);
++
++	/*set possible crtcs and clones*/
++	if (dsi_connector->pipe) {
++		encoder->possible_crtcs = (1 << 2);
++		encoder->possible_clones = (1 << 1);
++	} else {
++		encoder->possible_crtcs = (1 << 0);
++		encoder->possible_clones = (1 << 0);
++	}
++
++	dsi_connector->base.encoder = &dpi_output->base.base;
++
++	return &dpi_output->base;
++}
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
+new file mode 100644
+index 0000000..6f76247
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
+@@ -0,0 +1,79 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu at intel.com>
++ * Jackie Li<yaodong.li at intel.com>
++ */
++
++#ifndef __MDFLD_DSI_DPI_H__
++#define __MDFLD_DSI_DPI_H__
++
++#include "mdfld_dsi_output.h"
++#include "mdfld_output.h"
++
++struct mdfld_dsi_dpi_timing {
++	u16 hsync_count;
++	u16 hbp_count;
++	u16 hfp_count;
++	u16 hactive_count;
++	u16 vsync_count;
++	u16 vbp_count;
++	u16 vfp_count;
++};
++
++struct mdfld_dsi_dpi_output {
++	struct mdfld_dsi_encoder base;
++	struct drm_device *dev;
++
++	int panel_on;
++	int first_boot;
++
++	const struct panel_funcs *p_funcs;
++};
++
++#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder)\
++	container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
++
++/* Export functions */
++extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
++				struct mdfld_dsi_dpi_timing *dpi_timing,
++				int num_lane, int bpp);
++extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
++				struct mdfld_dsi_connector *dsi_connector,
++				const struct panel_funcs *p_funcs);
++
++/* MDFLD DPI helper functions */
++extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
++extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode);
++extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
++extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
++extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode);
++extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
++				int pipe);
++extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config,
++				int pipe);
++#endif /*__MDFLD_DSI_DPI_H__*/
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+new file mode 100644
+index 0000000..5675d93
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+@@ -0,0 +1,621 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu at intel.com>
++ * Jackie Li<yaodong.li at intel.com>
++ */
++
++#include <linux/module.h>
++
++#include "mdfld_dsi_output.h"
++#include "mdfld_dsi_dpi.h"
++#include "mdfld_output.h"
++#include "mdfld_dsi_pkg_sender.h"
++#include "tc35876x-dsi-lvds.h"
++#include <linux/pm_runtime.h>
++#include <asm/intel_scu_ipc.h>
++
++/* get the LABC from command line. */
++static int LABC_control = 1;
++
++#ifdef MODULE
++module_param(LABC_control, int, 0644);
++#else
++
++static int __init parse_LABC_control(char *arg)
++{
++	/* LABC control can be passed in as a cmdline parameter */
++	/* to enable this feature add LABC=1 to cmdline */
++	/* to disable this feature add LABC=0 to cmdline */
++	if (!arg)
++		return -EINVAL;
++
++	if (!strcasecmp(arg, "0"))
++		LABC_control = 0;
++	else if (!strcasecmp(arg, "1"))
++		LABC_control = 1;
++
++	return 0;
++}
++early_param("LABC", parse_LABC_control);
++#endif
++
++/**
++ * Check and see if the generic control or data buffer is empty and ready.
++ */
++void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg,
++							u32 fifo_stat)
++{
++	u32 GEN_BF_time_out_count;
++
++	/* Check MIPI Adatper command registers */
++	for (GEN_BF_time_out_count = 0;
++			GEN_BF_time_out_count < GEN_FB_TIME_OUT;
++			GEN_BF_time_out_count++) {
++		if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
++			break;
++		udelay(100);
++	}
++
++	if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
++		DRM_ERROR("mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x.\n",
++					gen_fifo_stat_reg);
++}
++
++/**
++ * Manage the DSI MIPI keyboard and display brightness.
++ * FIXME: this is exported to OSPM code. should work out an specific
++ * display interface to OSPM.
++ */
++
++void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
++{
++	struct mdfld_dsi_pkg_sender *sender =
++				mdfld_dsi_get_pkg_sender(dsi_config);
++	struct drm_device *dev = sender->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 gen_ctrl_val;
++
++	if (!sender) {
++		DRM_ERROR("No sender found\n");
++		return;
++	}
++
++	/* Set default display backlight value to 85% (0xd8)*/
++	mdfld_dsi_send_mcs_short(sender, write_display_brightness, 0xd8, 1,
++				true);
++
++	/* Set minimum brightness setting of CABC function to 20% (0x33)*/
++	mdfld_dsi_send_mcs_short(sender, write_cabc_min_bright, 0x33, 1, true);
++
++	/* Enable backlight or/and LABC */
++	gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON |
++								BACKLIGHT_ON;
++	if (LABC_control == 1)
++		gen_ctrl_val |= DISPLAY_DIMMING_ON | DISPLAY_BRIGHTNESS_AUTO
++								| GAMMA_AUTO;
++
++	if (LABC_control == 1)
++		gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
++
++	dev_priv->mipi_ctrl_display = gen_ctrl_val;
++
++	mdfld_dsi_send_mcs_short(sender, write_ctrl_display, (u8)gen_ctrl_val,
++				1, true);
++
++	mdfld_dsi_send_mcs_short(sender, write_ctrl_cabc, UI_IMAGE, 1, true);
++}
++
++void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
++{
++	struct mdfld_dsi_pkg_sender *sender;
++	struct drm_psb_private *dev_priv;
++	struct mdfld_dsi_config *dsi_config;
++	u32 gen_ctrl_val = 0;
++	int p_type = TMD_VID;
++
++	if (!dev || (pipe != 0 && pipe != 2)) {
++		DRM_ERROR("Invalid parameter\n");
++		return;
++	}
++
++	p_type = mdfld_get_panel_type(dev, 0);
++
++	dev_priv = dev->dev_private;
++
++	if (pipe)
++		dsi_config = dev_priv->dsi_configs[1];
++	else
++		dsi_config = dev_priv->dsi_configs[0];
++
++	sender = mdfld_dsi_get_pkg_sender(dsi_config);
++
++	if (!sender) {
++		DRM_ERROR("No sender found\n");
++		return;
++	}
++
++	gen_ctrl_val = (level * 0xff / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
++
++	dev_dbg(sender->dev->dev, "pipe = %d, gen_ctrl_val = %d.\n",
++							pipe, gen_ctrl_val);
++
++	if (p_type == TMD_VID) {
++		/* Set display backlight value */
++		mdfld_dsi_send_mcs_short(sender, tmd_write_display_brightness,
++					(u8)gen_ctrl_val, 1, true);
++	} else {
++		/* Set display backlight value */
++		mdfld_dsi_send_mcs_short(sender, write_display_brightness,
++					(u8)gen_ctrl_val, 1, true);
++
++		/* Enable backlight control */
++		if (level == 0)
++			gen_ctrl_val = 0;
++		else
++			gen_ctrl_val = dev_priv->mipi_ctrl_display;
++
++		mdfld_dsi_send_mcs_short(sender, write_ctrl_display,
++					(u8)gen_ctrl_val, 1, true);
++	}
++}
++
++static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
++				u8 dcs, u32 *data, bool hs)
++{
++	struct mdfld_dsi_pkg_sender *sender
++		= mdfld_dsi_get_pkg_sender(dsi_config);
++
++	if (!sender || !data) {
++		DRM_ERROR("Invalid parameter\n");
++		return -EINVAL;
++	}
++
++	return mdfld_dsi_read_mcs(sender, dcs, data, 1, hs);
++}
++
++int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config, u32 *mode,
++			bool hs)
++{
++	if (!dsi_config || !mode) {
++		DRM_ERROR("Invalid parameter\n");
++		return -EINVAL;
++	}
++
++	return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, hs);
++}
++
++/*
++ * NOTE: this function was used by OSPM.
++ * TODO: will be removed later, should work out display interfaces for OSPM
++ */
++void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
++{
++	if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
++		DRM_ERROR("Invalid parameters\n");
++		return;
++	}
++
++	mdfld_dsi_dpi_controller_init(dsi_config, pipe);
++}
++
++static void mdfld_dsi_connector_save(struct drm_connector *connector)
++{
++}
++
++static void mdfld_dsi_connector_restore(struct drm_connector *connector)
++{
++}
++
++/* FIXME: start using the force parameter */
++static enum drm_connector_status
++mdfld_dsi_connector_detect(struct drm_connector *connector, bool force)
++{
++	struct mdfld_dsi_connector *dsi_connector
++		= mdfld_dsi_connector(connector);
++
++	dsi_connector->status = connector_status_connected;
++
++	return dsi_connector->status;
++}
++
++static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
++				struct drm_property *property,
++				uint64_t value)
++{
++	struct drm_encoder *encoder = connector->encoder;
++
++	if (!strcmp(property->name, "scaling mode") && encoder) {
++		struct psb_intel_crtc *psb_crtc =
++					to_psb_intel_crtc(encoder->crtc);
++		bool centerechange;
++		uint64_t val;
++
++		if (!psb_crtc)
++			goto set_prop_error;
++
++		switch (value) {
++		case DRM_MODE_SCALE_FULLSCREEN:
++			break;
++		case DRM_MODE_SCALE_NO_SCALE:
++			break;
++		case DRM_MODE_SCALE_ASPECT:
++			break;
++		default:
++			goto set_prop_error;
++		}
++
++		if (drm_connector_property_get_value(connector, property, &val))
++			goto set_prop_error;
++
++		if (val == value)
++			goto set_prop_done;
++
++		if (drm_connector_property_set_value(connector,
++							property, value))
++			goto set_prop_error;
++
++		centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
++			(value == DRM_MODE_SCALE_NO_SCALE);
++
++		if (psb_crtc->saved_mode.hdisplay != 0 &&
++		    psb_crtc->saved_mode.vdisplay != 0) {
++			if (centerechange) {
++				if (!drm_crtc_helper_set_mode(encoder->crtc,
++						&psb_crtc->saved_mode,
++						encoder->crtc->x,
++						encoder->crtc->y,
++						encoder->crtc->fb))
++					goto set_prop_error;
++			} else {
++				struct drm_encoder_helper_funcs *funcs =
++						encoder->helper_private;
++				funcs->mode_set(encoder,
++					&psb_crtc->saved_mode,
++					&psb_crtc->saved_adjusted_mode);
++			}
++		}
++	} else if (!strcmp(property->name, "backlight") && encoder) {
++		if (drm_connector_property_set_value(connector, property,
++									value))
++			goto set_prop_error;
++		else {
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++			struct backlight_device *psb_bd;
++
++			psb_bd = mdfld_get_backlight_device();
++			if (psb_bd) {
++				psb_bd->props.brightness = value;
++				mdfld_set_brightness(psb_bd);
++			}
++#endif
++		}
++	}
++set_prop_done:
++	return 0;
++set_prop_error:
++	return -1;
++}
++
++static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
++{
++	struct mdfld_dsi_connector *dsi_connector =
++					mdfld_dsi_connector(connector);
++	struct mdfld_dsi_pkg_sender *sender;
++
++	if (!dsi_connector)
++		return;
++	drm_sysfs_connector_remove(connector);
++	drm_connector_cleanup(connector);
++	sender = dsi_connector->pkg_sender;
++	mdfld_dsi_pkg_sender_destroy(sender);
++	kfree(dsi_connector);
++}
++
++static int mdfld_dsi_connector_get_modes(struct drm_connector *connector)
++{
++	struct mdfld_dsi_connector *dsi_connector =
++				mdfld_dsi_connector(connector);
++	struct mdfld_dsi_config *dsi_config =
++				mdfld_dsi_get_config(dsi_connector);
++	struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
++	struct drm_display_mode *dup_mode = NULL;
++	struct drm_device *dev = connector->dev;
++
++	connector->display_info.min_vfreq = 0;
++	connector->display_info.max_vfreq = 200;
++	connector->display_info.min_hfreq = 0;
++	connector->display_info.max_hfreq = 200;
++
++	if (fixed_mode) {
++		dev_dbg(dev->dev, "fixed_mode %dx%d\n",
++				fixed_mode->hdisplay, fixed_mode->vdisplay);
++		dup_mode = drm_mode_duplicate(dev, fixed_mode);
++		drm_mode_probed_add(connector, dup_mode);
++		return 1;
++	}
++	DRM_ERROR("Didn't get any modes!\n");
++	return 0;
++}
++
++static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
++						struct drm_display_mode *mode)
++{
++	struct mdfld_dsi_connector *dsi_connector =
++					mdfld_dsi_connector(connector);
++	struct mdfld_dsi_config *dsi_config =
++					mdfld_dsi_get_config(dsi_connector);
++	struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
++
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		return MODE_NO_DBLESCAN;
++
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++		return MODE_NO_INTERLACE;
++
++	/**
++	 * FIXME: current DC has no fitting unit, reject any mode setting
++	 * request
++	 * Will figure out a way to do up-scaling(pannel fitting) later.
++	 **/
++	if (fixed_mode) {
++		if (mode->hdisplay != fixed_mode->hdisplay)
++			return MODE_PANEL;
++
++		if (mode->vdisplay != fixed_mode->vdisplay)
++			return MODE_PANEL;
++	}
++
++	return MODE_OK;
++}
++
++static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
++{
++	if (mode == connector->dpms)
++		return;
++
++	/*first, execute dpms*/
++
++	drm_helper_connector_dpms(connector, mode);
++}
++
++static struct drm_encoder *mdfld_dsi_connector_best_encoder(
++				struct drm_connector *connector)
++{
++	struct mdfld_dsi_connector *dsi_connector =
++				mdfld_dsi_connector(connector);
++	struct mdfld_dsi_config *dsi_config =
++				mdfld_dsi_get_config(dsi_connector);
++	return &dsi_config->encoder->base.base;
++}
++
++/*DSI connector funcs*/
++static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
++	.dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
++	.save = mdfld_dsi_connector_save,
++	.restore = mdfld_dsi_connector_restore,
++	.detect = mdfld_dsi_connector_detect,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.set_property = mdfld_dsi_connector_set_property,
++	.destroy = mdfld_dsi_connector_destroy,
++};
++
++/*DSI connector helper funcs*/
++static const struct drm_connector_helper_funcs
++	mdfld_dsi_connector_helper_funcs = {
++	.get_modes = mdfld_dsi_connector_get_modes,
++	.mode_valid = mdfld_dsi_connector_mode_valid,
++	.best_encoder = mdfld_dsi_connector_best_encoder,
++};
++
++static int mdfld_dsi_get_default_config(struct drm_device *dev,
++				struct mdfld_dsi_config *config, int pipe)
++{
++	if (!dev || !config) {
++		DRM_ERROR("Invalid parameters");
++		return -EINVAL;
++	}
++
++	config->bpp = 24;
++	if (mdfld_get_panel_type(dev, pipe) == TC35876X)
++		config->lane_count = 4;
++	else
++		config->lane_count = 2;
++	config->channel_num = 0;
++
++	if (mdfld_get_panel_type(dev, pipe) == TMD_VID)
++		config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
++	else if (mdfld_get_panel_type(dev, pipe) == TC35876X)
++		config->video_mode =
++				MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS;
++	else
++		config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
++
++	return 0;
++}
++
++int mdfld_dsi_panel_reset(int pipe)
++{
++	unsigned gpio;
++	int ret = 0;
++
++	switch (pipe) {
++	case 0:
++		gpio = 128;
++		break;
++	case 2:
++		gpio = 34;
++		break;
++	default:
++		DRM_ERROR("Invalid output\n");
++		return -EINVAL;
++	}
++
++	ret = gpio_request(gpio, "gfx");
++	if (ret) {
++		DRM_ERROR("gpio_rqueset failed\n");
++		return ret;
++	}
++
++	ret = gpio_direction_output(gpio, 1);
++	if (ret) {
++		DRM_ERROR("gpio_direction_output failed\n");
++		goto gpio_error;
++	}
++
++	gpio_get_value(128);
++
++gpio_error:
++	if (gpio_is_valid(gpio))
++		gpio_free(gpio);
++
++	return ret;
++}
++
++/*
++ * MIPI output init
++ * @dev drm device
++ * @pipe pipe number. 0 or 2
++ * @config
++ *
++ * Do the initialization of a MIPI output, including create DRM mode objects
++ * initialization of DSI output on @pipe
++ */
++void mdfld_dsi_output_init(struct drm_device *dev,
++			   int pipe,
++			   const struct panel_funcs *p_vid_funcs)
++{
++	struct mdfld_dsi_config *dsi_config;
++	struct mdfld_dsi_connector *dsi_connector;
++	struct drm_connector *connector;
++	struct mdfld_dsi_encoder *encoder;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct panel_info dsi_panel_info;
++	u32 width_mm, height_mm;
++
++	dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
++
++	if (!dev || ((pipe != 0) && (pipe != 2))) {
++		DRM_ERROR("Invalid parameter\n");
++		return;
++	}
++
++	/*create a new connetor*/
++	dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
++	if (!dsi_connector) {
++		DRM_ERROR("No memory");
++		return;
++	}
++
++	dsi_connector->pipe =  pipe;
++
++	dsi_config = kzalloc(sizeof(struct mdfld_dsi_config),
++			GFP_KERNEL);
++	if (!dsi_config) {
++		DRM_ERROR("cannot allocate memory for DSI config\n");
++		goto dsi_init_err0;
++	}
++	mdfld_dsi_get_default_config(dev, dsi_config, pipe);
++
++	dsi_connector->private = dsi_config;
++
++	dsi_config->changed = 1;
++	dsi_config->dev = dev;
++
++	dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
++	if (p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
++			goto dsi_init_err0;
++
++	width_mm = dsi_panel_info.width_mm;
++	height_mm = dsi_panel_info.height_mm;
++
++	dsi_config->mode = dsi_config->fixed_mode;
++	dsi_config->connector = dsi_connector;
++
++	if (!dsi_config->fixed_mode) {
++		DRM_ERROR("No pannel fixed mode was found\n");
++		goto dsi_init_err0;
++	}
++
++	if (pipe && dev_priv->dsi_configs[0]) {
++		dsi_config->dvr_ic_inited = 0;
++		dev_priv->dsi_configs[1] = dsi_config;
++	} else if (pipe == 0) {
++		dsi_config->dvr_ic_inited = 1;
++		dev_priv->dsi_configs[0] = dsi_config;
++	} else {
++		DRM_ERROR("Trying to init MIPI1 before MIPI0\n");
++		goto dsi_init_err0;
++	}
++
++
++	connector = &dsi_connector->base.base;
++	drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
++						DRM_MODE_CONNECTOR_LVDS);
++	drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
++
++	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++	connector->display_info.width_mm = width_mm;
++	connector->display_info.height_mm = height_mm;
++	connector->interlace_allowed = false;
++	connector->doublescan_allowed = false;
++
++	/*attach properties*/
++	drm_connector_attach_property(connector,
++				dev->mode_config.scaling_mode_property,
++				DRM_MODE_SCALE_FULLSCREEN);
++	drm_connector_attach_property(connector,
++				dev_priv->backlight_property,
++				MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
++
++	/*init DSI package sender on this output*/
++	if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
++		DRM_ERROR("Package Sender initialization failed on pipe %d\n",
++									pipe);
++		goto dsi_init_err0;
++	}
++
++	encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
++	if (!encoder) {
++		DRM_ERROR("Create DPI encoder failed\n");
++		goto dsi_init_err1;
++	}
++	encoder->private = dsi_config;
++	dsi_config->encoder = encoder;
++	encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
++		INTEL_OUTPUT_MIPI2;
++	drm_sysfs_connector_add(connector);
++	return;
++
++	/*TODO: add code to destroy outputs on error*/
++dsi_init_err1:
++	/*destroy sender*/
++	mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
++
++	drm_connector_cleanup(connector);
++
++	kfree(dsi_config->fixed_mode);
++	kfree(dsi_config);
++dsi_init_err0:
++	kfree(dsi_connector);
++}
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+new file mode 100644
+index 0000000..36eb074
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+@@ -0,0 +1,377 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu at intel.com>
++ * Jackie Li<yaodong.li at intel.com>
++ */
++
++#ifndef __MDFLD_DSI_OUTPUT_H__
++#define __MDFLD_DSI_OUTPUT_H__
++
++#include <linux/backlight.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_edid.h>
++
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "mdfld_output.h"
++
++#include <asm/mrst.h>
++
++#define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
++#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
++#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
++#define FLD_MOD(orig, val, start, end) \
++	(((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
++
++#define REG_FLD_MOD(reg, val, start, end) \
++	REG_WRITE(reg, FLD_MOD(REG_READ(reg), val, start, end))
++
++static inline int REGISTER_FLD_WAIT(struct drm_device *dev, u32 reg,
++		u32 val, int start, int end)
++{
++	int t = 100000;
++
++	while (FLD_GET(REG_READ(reg), start, end) != val) {
++		if (--t == 0)
++			return 1;
++	}
++
++	return 0;
++}
++
++#define REG_FLD_WAIT(reg, val, start, end) \
++	REGISTER_FLD_WAIT(dev, reg, val, start, end)
++
++#define REG_BIT_WAIT(reg, val, bitnum) \
++	REGISTER_FLD_WAIT(dev, reg, val, bitnum, bitnum)
++
++#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
++
++#ifdef DEBUG
++#define CHECK_PIPE(pipe) ({			\
++	const typeof(pipe) __pipe = (pipe);	\
++	BUG_ON(__pipe != 0 && __pipe != 2);	\
++	__pipe;	})
++#else
++#define CHECK_PIPE(pipe) (pipe)
++#endif
++
++/*
++ * Actual MIPIA->MIPIC reg offset is 0x800, value 0x400 is valid for 0 and 2
++ */
++#define REG_OFFSET(pipe) (CHECK_PIPE(pipe) * 0x400)
++
++/* mdfld DSI controller registers */
++#define MIPI_DEVICE_READY_REG(pipe)		(0xb000 + REG_OFFSET(pipe))
++#define MIPI_INTR_STAT_REG(pipe)		(0xb004 + REG_OFFSET(pipe))
++#define MIPI_INTR_EN_REG(pipe)			(0xb008 + REG_OFFSET(pipe))
++#define MIPI_DSI_FUNC_PRG_REG(pipe)		(0xb00c + REG_OFFSET(pipe))
++#define MIPI_HS_TX_TIMEOUT_REG(pipe)		(0xb010 + REG_OFFSET(pipe))
++#define MIPI_LP_RX_TIMEOUT_REG(pipe)		(0xb014 + REG_OFFSET(pipe))
++#define MIPI_TURN_AROUND_TIMEOUT_REG(pipe)	(0xb018 + REG_OFFSET(pipe))
++#define MIPI_DEVICE_RESET_TIMER_REG(pipe)	(0xb01c + REG_OFFSET(pipe))
++#define MIPI_DPI_RESOLUTION_REG(pipe)		(0xb020 + REG_OFFSET(pipe))
++#define MIPI_DBI_FIFO_THROTTLE_REG(pipe)	(0xb024 + REG_OFFSET(pipe))
++#define MIPI_HSYNC_COUNT_REG(pipe)		(0xb028 + REG_OFFSET(pipe))
++#define MIPI_HBP_COUNT_REG(pipe)		(0xb02c + REG_OFFSET(pipe))
++#define MIPI_HFP_COUNT_REG(pipe)		(0xb030 + REG_OFFSET(pipe))
++#define MIPI_HACTIVE_COUNT_REG(pipe)		(0xb034 + REG_OFFSET(pipe))
++#define MIPI_VSYNC_COUNT_REG(pipe)		(0xb038 + REG_OFFSET(pipe))
++#define MIPI_VBP_COUNT_REG(pipe)		(0xb03c + REG_OFFSET(pipe))
++#define MIPI_VFP_COUNT_REG(pipe)		(0xb040 + REG_OFFSET(pipe))
++#define MIPI_HIGH_LOW_SWITCH_COUNT_REG(pipe)	(0xb044 + REG_OFFSET(pipe))
++#define MIPI_DPI_CONTROL_REG(pipe)		(0xb048 + REG_OFFSET(pipe))
++#define MIPI_DPI_DATA_REG(pipe)			(0xb04c + REG_OFFSET(pipe))
++#define MIPI_INIT_COUNT_REG(pipe)		(0xb050 + REG_OFFSET(pipe))
++#define MIPI_MAX_RETURN_PACK_SIZE_REG(pipe)	(0xb054 + REG_OFFSET(pipe))
++#define MIPI_VIDEO_MODE_FORMAT_REG(pipe)	(0xb058 + REG_OFFSET(pipe))
++#define MIPI_EOT_DISABLE_REG(pipe)		(0xb05c + REG_OFFSET(pipe))
++#define MIPI_LP_BYTECLK_REG(pipe)		(0xb060 + REG_OFFSET(pipe))
++#define MIPI_LP_GEN_DATA_REG(pipe)		(0xb064 + REG_OFFSET(pipe))
++#define MIPI_HS_GEN_DATA_REG(pipe)		(0xb068 + REG_OFFSET(pipe))
++#define MIPI_LP_GEN_CTRL_REG(pipe)		(0xb06c + REG_OFFSET(pipe))
++#define MIPI_HS_GEN_CTRL_REG(pipe)		(0xb070 + REG_OFFSET(pipe))
++#define MIPI_GEN_FIFO_STAT_REG(pipe)		(0xb074 + REG_OFFSET(pipe))
++#define MIPI_HS_LS_DBI_ENABLE_REG(pipe)		(0xb078 + REG_OFFSET(pipe))
++#define MIPI_DPHY_PARAM_REG(pipe)		(0xb080 + REG_OFFSET(pipe))
++#define MIPI_DBI_BW_CTRL_REG(pipe)		(0xb084 + REG_OFFSET(pipe))
++#define MIPI_CLK_LANE_SWITCH_TIME_CNT_REG(pipe)	(0xb088 + REG_OFFSET(pipe))
++
++#define MIPI_CTRL_REG(pipe)			(0xb104 + REG_OFFSET(pipe))
++#define MIPI_DATA_ADD_REG(pipe)			(0xb108 + REG_OFFSET(pipe))
++#define MIPI_DATA_LEN_REG(pipe)			(0xb10c + REG_OFFSET(pipe))
++#define MIPI_CMD_ADD_REG(pipe)			(0xb110 + REG_OFFSET(pipe))
++#define MIPI_CMD_LEN_REG(pipe)			(0xb114 + REG_OFFSET(pipe))
++
++/* non-uniform reg offset */
++#define MIPI_PORT_CONTROL(pipe)		(CHECK_PIPE(pipe) ? MIPI_C : MIPI)
++
++#define DSI_DEVICE_READY				(0x1)
++#define DSI_POWER_STATE_ULPS_ENTER			(0x2 << 1)
++#define DSI_POWER_STATE_ULPS_EXIT			(0x1 << 1)
++#define DSI_POWER_STATE_ULPS_OFFSET			(0x1)
++
++
++#define DSI_ONE_DATA_LANE					(0x1)
++#define DSI_TWO_DATA_LANE					(0x2)
++#define DSI_THREE_DATA_LANE					(0X3)
++#define DSI_FOUR_DATA_LANE					(0x4)
++#define DSI_DPI_VIRT_CHANNEL_OFFSET			(0x3)
++#define DSI_DBI_VIRT_CHANNEL_OFFSET			(0x5)
++#define DSI_DPI_COLOR_FORMAT_RGB565			(0x01 << 7)
++#define DSI_DPI_COLOR_FORMAT_RGB666			(0x02 << 7)
++#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK		(0x03 << 7)
++#define DSI_DPI_COLOR_FORMAT_RGB888			(0x04 << 7)
++#define DSI_DBI_COLOR_FORMAT_OPTION2			(0x05 << 13)
++
++#define DSI_INTR_STATE_RXSOTERROR			BIT(0)
++
++#define DSI_INTR_STATE_SPL_PKG_SENT			BIT(30)
++#define DSI_INTR_STATE_TE				BIT(31)
++
++#define DSI_HS_TX_TIMEOUT_MASK				(0xffffff)
++
++#define DSI_LP_RX_TIMEOUT_MASK				(0xffffff)
++
++#define DSI_TURN_AROUND_TIMEOUT_MASK		(0x3f)
++
++#define DSI_RESET_TIMER_MASK				(0xffff)
++
++#define DSI_DBI_FIFO_WM_HALF				(0x0)
++#define DSI_DBI_FIFO_WM_QUARTER				(0x1)
++#define DSI_DBI_FIFO_WM_LOW					(0x2)
++
++#define DSI_DPI_TIMING_MASK					(0xffff)
++
++#define DSI_INIT_TIMER_MASK					(0xffff)
++
++#define DSI_DBI_RETURN_PACK_SIZE_MASK		(0x3ff)
++
++#define DSI_LP_BYTECLK_MASK					(0x0ffff)
++
++#define DSI_HS_CTRL_GEN_SHORT_W0			(0x03)
++#define DSI_HS_CTRL_GEN_SHORT_W1			(0x13)
++#define DSI_HS_CTRL_GEN_SHORT_W2			(0x23)
++#define DSI_HS_CTRL_GEN_R0					(0x04)
++#define DSI_HS_CTRL_GEN_R1					(0x14)
++#define DSI_HS_CTRL_GEN_R2					(0x24)
++#define DSI_HS_CTRL_GEN_LONG_W				(0x29)
++#define DSI_HS_CTRL_MCS_SHORT_W0			(0x05)
++#define DSI_HS_CTRL_MCS_SHORT_W1			(0x15)
++#define DSI_HS_CTRL_MCS_R0					(0x06)
++#define DSI_HS_CTRL_MCS_LONG_W				(0x39)
++#define DSI_HS_CTRL_VC_OFFSET				(0x06)
++#define DSI_HS_CTRL_WC_OFFSET				(0x08)
++
++#define	DSI_FIFO_GEN_HS_DATA_FULL			BIT(0)
++#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY		BIT(1)
++#define DSI_FIFO_GEN_HS_DATA_EMPTY			BIT(2)
++#define DSI_FIFO_GEN_LP_DATA_FULL			BIT(8)
++#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY		BIT(9)
++#define DSI_FIFO_GEN_LP_DATA_EMPTY			BIT(10)
++#define DSI_FIFO_GEN_HS_CTRL_FULL			BIT(16)
++#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY		BIT(17)
++#define DSI_FIFO_GEN_HS_CTRL_EMPTY			BIT(18)
++#define DSI_FIFO_GEN_LP_CTRL_FULL			BIT(24)
++#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY		BIT(25)
++#define DSI_FIFO_GEN_LP_CTRL_EMPTY			BIT(26)
++#define DSI_FIFO_DBI_EMPTY					BIT(27)
++#define DSI_FIFO_DPI_EMPTY					BIT(28)
++
++#define DSI_DBI_HS_LP_SWITCH_MASK			(0x1)
++
++#define DSI_HS_LP_SWITCH_COUNTER_OFFSET		(0x0)
++#define DSI_LP_HS_SWITCH_COUNTER_OFFSET		(0x16)
++
++#define DSI_DPI_CTRL_HS_SHUTDOWN			(0x00000001)
++#define DSI_DPI_CTRL_HS_TURN_ON				(0x00000002)
++
++/*dsi power modes*/
++#define DSI_POWER_MODE_DISPLAY_ON	BIT(2)
++#define DSI_POWER_MODE_NORMAL_ON	BIT(3)
++#define DSI_POWER_MODE_SLEEP_OUT	BIT(4)
++#define DSI_POWER_MODE_PARTIAL_ON	BIT(5)
++#define DSI_POWER_MODE_IDLE_ON		BIT(6)
++
++enum {
++	MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
++	MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
++	MDFLD_DSI_VIDEO_BURST_MODE = 3,
++};
++
++#define DSI_DPI_COMPLETE_LAST_LINE			BIT(2)
++#define DSI_DPI_DISABLE_BTA					BIT(3)
++
++struct mdfld_dsi_connector {
++	struct psb_intel_connector base;
++
++	int pipe;
++	void *private;
++	void *pkg_sender;
++
++	/* Connection status */
++	enum drm_connector_status status;
++};
++
++struct mdfld_dsi_encoder {
++	struct psb_intel_encoder base;
++	void *private;
++};
++
++/*
++ * DSI config, consists of one DSI connector, two DSI encoders.
++ * DRM will pick up on DSI encoder basing on differents configs.
++ */
++struct mdfld_dsi_config {
++	struct drm_device *dev;
++	struct drm_display_mode *fixed_mode;
++	struct drm_display_mode *mode;
++
++	struct mdfld_dsi_connector *connector;
++	struct mdfld_dsi_encoder *encoder;
++
++	int changed;
++
++	int bpp;
++	int lane_count;
++	/*Virtual channel number for this encoder*/
++	int channel_num;
++	/*video mode configure*/
++	int video_mode;
++
++	int dvr_ic_inited;
++};
++
++static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
++		struct drm_connector *connector)
++{
++	struct psb_intel_connector *psb_connector;
++
++	psb_connector = to_psb_intel_connector(connector);
++
++	return container_of(psb_connector, struct mdfld_dsi_connector, base);
++}
++
++static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
++		struct drm_encoder *encoder)
++{
++	struct psb_intel_encoder *psb_encoder;
++
++	psb_encoder = to_psb_intel_encoder(encoder);
++
++	return container_of(psb_encoder, struct mdfld_dsi_encoder, base);
++}
++
++static inline struct mdfld_dsi_config *
++	mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
++{
++	if (!connector)
++		return NULL;
++	return (struct mdfld_dsi_config *)connector->private;
++}
++
++static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
++{
++	struct mdfld_dsi_connector *dsi_connector;
++
++	if (!config)
++		return NULL;
++
++	dsi_connector = config->connector;
++
++	if (!dsi_connector)
++		return NULL;
++
++	return dsi_connector->pkg_sender;
++}
++
++static inline struct mdfld_dsi_config *
++	mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
++{
++	if (!encoder)
++		return NULL;
++	return (struct mdfld_dsi_config *)encoder->private;
++}
++
++static inline struct mdfld_dsi_connector *
++	mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
++{
++	struct mdfld_dsi_config *config;
++
++	if (!encoder)
++		return NULL;
++
++	config = mdfld_dsi_encoder_get_config(encoder);
++	if (!config)
++		return NULL;
++
++	return config->connector;
++}
++
++static inline void *mdfld_dsi_encoder_get_pkg_sender(
++				struct mdfld_dsi_encoder *encoder)
++{
++	struct mdfld_dsi_config *dsi_config;
++
++	dsi_config = mdfld_dsi_encoder_get_config(encoder);
++	if (!dsi_config)
++		return NULL;
++
++	return mdfld_dsi_get_pkg_sender(dsi_config);
++}
++
++static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
++{
++	struct mdfld_dsi_connector *connector;
++
++	if (!encoder)
++		return -1;
++
++	connector = mdfld_dsi_encoder_get_connector(encoder);
++	if (!connector)
++		return -1;
++	return connector->pipe;
++}
++
++/* Export functions */
++extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
++					u32 gen_fifo_stat_reg, u32 fifo_stat);
++extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
++					int pipe);
++extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
++					int level);
++extern void mdfld_dsi_output_init(struct drm_device *dev,
++					int pipe,
++					const struct panel_funcs *p_vid_funcs);
++extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
++					int pipe);
++
++extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
++					u32 *mode, bool hs);
++extern int mdfld_dsi_panel_reset(int pipe);
++
++#endif /*__MDFLD_DSI_OUTPUT_H__*/
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+new file mode 100644
+index 0000000..baa0e14
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+@@ -0,0 +1,694 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Jackie Li<yaodong.li at intel.com>
++ */
++
++#include <linux/freezer.h>
++
++#include "mdfld_dsi_output.h"
++#include "mdfld_dsi_pkg_sender.h"
++#include "mdfld_dsi_dpi.h"
++
++#define MDFLD_DSI_READ_MAX_COUNT		5000
++
++enum data_type {
++	DSI_DT_GENERIC_SHORT_WRITE_0	= 0x03,
++	DSI_DT_GENERIC_SHORT_WRITE_1	= 0x13,
++	DSI_DT_GENERIC_SHORT_WRITE_2	= 0x23,
++	DSI_DT_GENERIC_READ_0		= 0x04,
++	DSI_DT_GENERIC_READ_1		= 0x14,
++	DSI_DT_GENERIC_READ_2		= 0x24,
++	DSI_DT_GENERIC_LONG_WRITE	= 0x29,
++	DSI_DT_DCS_SHORT_WRITE_0	= 0x05,
++	DSI_DT_DCS_SHORT_WRITE_1	= 0x15,
++	DSI_DT_DCS_READ			= 0x06,
++	DSI_DT_DCS_LONG_WRITE		= 0x39,
++};
++
++enum {
++	MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
++};
++
++enum {
++	MDFLD_DSI_PKG_SENDER_FREE = 0x0,
++	MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
++};
++
++static const char *const dsi_errors[] = {
++	"RX SOT Error",
++	"RX SOT Sync Error",
++	"RX EOT Sync Error",
++	"RX Escape Mode Entry Error",
++	"RX LP TX Sync Error",
++	"RX HS Receive Timeout Error",
++	"RX False Control Error",
++	"RX ECC Single Bit Error",
++	"RX ECC Multibit Error",
++	"RX Checksum Error",
++	"RX DSI Data Type Not Recognised",
++	"RX DSI VC ID Invalid",
++	"TX False Control Error",
++	"TX ECC Single Bit Error",
++	"TX ECC Multibit Error",
++	"TX Checksum Error",
++	"TX DSI Data Type Not Recognised",
++	"TX DSI VC ID invalid",
++	"High Contention",
++	"Low contention",
++	"DPI FIFO Under run",
++	"HS TX Timeout",
++	"LP RX Timeout",
++	"Turn Around ACK Timeout",
++	"ACK With No Error",
++	"RX Invalid TX Length",
++	"RX Prot Violation",
++	"HS Generic Write FIFO Full",
++	"LP Generic Write FIFO Full",
++	"Generic Read Data Avail"
++	"Special Packet Sent",
++	"Tearing Effect",
++};
++
++static inline int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
++						u32 mask)
++{
++	struct drm_device *dev = sender->dev;
++	u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
++	int retry = 0xffff;
++
++	while (retry--) {
++		if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
++			return 0;
++		udelay(100);
++	}
++	DRM_ERROR("fifo is NOT empty 0x%08x\n", REG_READ(gen_fifo_stat_reg));
++	return -EIO;
++}
++
++static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
++{
++	return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(10) | BIT(18) |
++						BIT(26) | BIT(27) | BIT(28)));
++}
++
++static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
++{
++	return wait_for_gen_fifo_empty(sender, (BIT(10) | BIT(26)));
++}
++
++static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
++{
++	return wait_for_gen_fifo_empty(sender, (BIT(2) | BIT(18)));
++}
++
++static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
++{
++	u32 intr_stat_reg = sender->mipi_intr_stat_reg;
++	struct drm_device *dev = sender->dev;
++
++	dev_dbg(sender->dev->dev, "Handling error 0x%08x\n", mask);
++
++	switch (mask) {
++	case BIT(0):
++	case BIT(1):
++	case BIT(2):
++	case BIT(3):
++	case BIT(4):
++	case BIT(5):
++	case BIT(6):
++	case BIT(7):
++	case BIT(8):
++	case BIT(9):
++	case BIT(10):
++	case BIT(11):
++	case BIT(12):
++	case BIT(13):
++		dev_dbg(sender->dev->dev, "No Action required\n");
++		break;
++	case BIT(14):
++		/*wait for all fifo empty*/
++		/*wait_for_all_fifos_empty(sender)*/;
++		break;
++	case BIT(15):
++		dev_dbg(sender->dev->dev, "No Action required\n");
++		break;
++	case BIT(16):
++		break;
++	case BIT(17):
++		break;
++	case BIT(18):
++	case BIT(19):
++		dev_dbg(sender->dev->dev, "High/Low contention detected\n");
++		/*wait for contention recovery time*/
++		/*mdelay(10);*/
++		/*wait for all fifo empty*/
++		if (0)
++			wait_for_all_fifos_empty(sender);
++		break;
++	case BIT(20):
++		dev_dbg(sender->dev->dev, "No Action required\n");
++		break;
++	case BIT(21):
++		/*wait for all fifo empty*/
++		/*wait_for_all_fifos_empty(sender);*/
++		break;
++	case BIT(22):
++		break;
++	case BIT(23):
++	case BIT(24):
++	case BIT(25):
++	case BIT(26):
++	case BIT(27):
++		dev_dbg(sender->dev->dev, "HS Gen fifo full\n");
++		REG_WRITE(intr_stat_reg, mask);
++		wait_for_hs_fifos_empty(sender);
++		break;
++	case BIT(28):
++		dev_dbg(sender->dev->dev, "LP Gen fifo full\n");
++		REG_WRITE(intr_stat_reg, mask);
++		wait_for_lp_fifos_empty(sender);
++		break;
++	case BIT(29):
++	case BIT(30):
++	case BIT(31):
++		dev_dbg(sender->dev->dev, "No Action required\n");
++		break;
++	}
++
++	if (mask & REG_READ(intr_stat_reg))
++		dev_dbg(sender->dev->dev,
++				"Cannot clean interrupt 0x%08x\n", mask);
++	return 0;
++}
++
++static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
++{
++	struct drm_device *dev = sender->dev;
++	u32 intr_stat_reg = sender->mipi_intr_stat_reg;
++	u32 mask;
++	u32 intr_stat;
++	int i;
++	int err = 0;
++
++	intr_stat = REG_READ(intr_stat_reg);
++
++	for (i = 0; i < 32; i++) {
++		mask = (0x00000001UL) << i;
++		if (intr_stat & mask) {
++			dev_dbg(sender->dev->dev, "[DSI]: %s\n", dsi_errors[i]);
++			err = handle_dsi_error(sender, mask);
++			if (err)
++				DRM_ERROR("Cannot handle error\n");
++		}
++	}
++	return err;
++}
++
++static int send_short_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
++			u8 cmd, u8 param, bool hs)
++{
++	struct drm_device *dev = sender->dev;
++	u32 ctrl_reg;
++	u32 val;
++	u8 virtual_channel = 0;
++
++	if (hs) {
++		ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
++
++		/* FIXME: wait_for_hs_fifos_empty(sender); */
++	} else {
++		ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
++
++		/* FIXME: wait_for_lp_fifos_empty(sender); */
++	}
++
++	val = FLD_VAL(param, 23, 16) | FLD_VAL(cmd, 15, 8) |
++		FLD_VAL(virtual_channel, 7, 6) | FLD_VAL(data_type, 5, 0);
++
++	REG_WRITE(ctrl_reg, val);
++
++	return 0;
++}
++
++static int send_long_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
++			u8 *data, int len, bool hs)
++{
++	struct drm_device *dev = sender->dev;
++	u32 ctrl_reg;
++	u32 data_reg;
++	u32 val;
++	u8 *p;
++	u8 b1, b2, b3, b4;
++	u8 virtual_channel = 0;
++	int i;
++
++	if (hs) {
++		ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
++		data_reg = sender->mipi_hs_gen_data_reg;
++
++		/* FIXME: wait_for_hs_fifos_empty(sender); */
++	} else {
++		ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
++		data_reg = sender->mipi_lp_gen_data_reg;
++
++		/* FIXME: wait_for_lp_fifos_empty(sender); */
++	}
++
++	p = data;
++	for (i = 0; i < len / 4; i++) {
++		b1 = *p++;
++		b2 = *p++;
++		b3 = *p++;
++		b4 = *p++;
++
++		REG_WRITE(data_reg, b4 << 24 | b3 << 16 | b2 << 8 | b1);
++	}
++
++	i = len % 4;
++	if (i) {
++		b1 = 0; b2 = 0; b3 = 0;
++
++		switch (i) {
++		case 3:
++			b1 = *p++;
++			b2 = *p++;
++			b3 = *p++;
++			break;
++		case 2:
++			b1 = *p++;
++			b2 = *p++;
++			break;
++		case 1:
++			b1 = *p++;
++			break;
++		}
++
++		REG_WRITE(data_reg, b3 << 16 | b2 << 8 | b1);
++	}
++
++	val = FLD_VAL(len, 23, 8) | FLD_VAL(virtual_channel, 7, 6) |
++		FLD_VAL(data_type, 5, 0);
++
++	REG_WRITE(ctrl_reg, val);
++
++	return 0;
++}
++
++static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
++			u8 *data, u16 len)
++{
++	u8 cmd;
++
++	switch (data_type) {
++	case DSI_DT_DCS_SHORT_WRITE_0:
++	case DSI_DT_DCS_SHORT_WRITE_1:
++	case DSI_DT_DCS_LONG_WRITE:
++		cmd = *data;
++		break;
++	default:
++		return 0;
++	}
++
++	/*this prevents other package sending while doing msleep*/
++	sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
++
++	/*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
++	if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
++		/*TODO: replace it with msleep later*/
++		mdelay(120);
++	}
++
++	if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
++		/*TODO: replace it with msleep later*/
++		mdelay(120);
++	}
++	return 0;
++}
++
++static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
++			u8 *data, u16 len)
++{
++	u8 cmd;
++
++	switch (data_type) {
++	case DSI_DT_DCS_SHORT_WRITE_0:
++	case DSI_DT_DCS_SHORT_WRITE_1:
++	case DSI_DT_DCS_LONG_WRITE:
++		cmd = *data;
++		break;
++	default:
++		return 0;
++	}
++
++	/*update panel status*/
++	if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) {
++		sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
++		/*TODO: replace it with msleep later*/
++		mdelay(120);
++	} else if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) {
++		sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
++		/*TODO: replace it with msleep later*/
++		mdelay(120);
++	} else if (unlikely(cmd == DCS_SOFT_RESET)) {
++		/*TODO: replace it with msleep later*/
++		mdelay(5);
++	}
++
++	sender->status = MDFLD_DSI_PKG_SENDER_FREE;
++
++	return 0;
++}
++
++static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
++		u8 *data, u16 len, bool hs)
++{
++	int ret;
++
++	/*handle DSI error*/
++	ret = dsi_error_handler(sender);
++	if (ret) {
++		DRM_ERROR("Error handling failed\n");
++		return -EAGAIN;
++	}
++
++	/* send pkg */
++	if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
++		DRM_ERROR("sender is busy\n");
++		return -EAGAIN;
++	}
++
++	ret = send_pkg_prepare(sender, data_type, data, len);
++	if (ret) {
++		DRM_ERROR("send_pkg_prepare error\n");
++		return ret;
++	}
++
++	switch (data_type) {
++	case DSI_DT_GENERIC_SHORT_WRITE_0:
++	case DSI_DT_GENERIC_SHORT_WRITE_1:
++	case DSI_DT_GENERIC_SHORT_WRITE_2:
++	case DSI_DT_GENERIC_READ_0:
++	case DSI_DT_GENERIC_READ_1:
++	case DSI_DT_GENERIC_READ_2:
++	case DSI_DT_DCS_SHORT_WRITE_0:
++	case DSI_DT_DCS_SHORT_WRITE_1:
++	case DSI_DT_DCS_READ:
++		ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
++		break;
++	case DSI_DT_GENERIC_LONG_WRITE:
++	case DSI_DT_DCS_LONG_WRITE:
++		ret = send_long_pkg(sender, data_type, data, len, hs);
++		break;
++	}
++
++	send_pkg_done(sender, data_type, data, len);
++
++	/*FIXME: should I query complete and fifo empty here?*/
++
++	return ret;
++}
++
++int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
++			u32 len, bool hs)
++{
++	unsigned long flags;
++
++	if (!sender || !data || !len) {
++		DRM_ERROR("Invalid parameters\n");
++		return -EINVAL;
++	}
++
++	spin_lock_irqsave(&sender->lock, flags);
++	send_pkg(sender, DSI_DT_DCS_LONG_WRITE, data, len, hs);
++	spin_unlock_irqrestore(&sender->lock, flags);
++
++	return 0;
++}
++
++int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
++			u8 param, u8 param_num, bool hs)
++{
++	u8 data[2];
++	unsigned long flags;
++	u8 data_type;
++
++	if (!sender) {
++		DRM_ERROR("Invalid parameter\n");
++		return -EINVAL;
++	}
++
++	data[0] = cmd;
++
++	if (param_num) {
++		data_type = DSI_DT_DCS_SHORT_WRITE_1;
++		data[1] = param;
++	} else {
++		data_type = DSI_DT_DCS_SHORT_WRITE_0;
++		data[1] = 0;
++	}
++
++	spin_lock_irqsave(&sender->lock, flags);
++	send_pkg(sender, data_type, data, sizeof(data), hs);
++	spin_unlock_irqrestore(&sender->lock, flags);
++
++	return 0;
++}
++
++int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
++			u8 param1, u8 param_num, bool hs)
++{
++	u8 data[2];
++	unsigned long flags;
++	u8 data_type;
++
++	if (!sender || param_num > 2) {
++		DRM_ERROR("Invalid parameter\n");
++		return -EINVAL;
++	}
++
++	switch (param_num) {
++	case 0:
++		data_type = DSI_DT_GENERIC_SHORT_WRITE_0;
++		data[0] = 0;
++		data[1] = 0;
++		break;
++	case 1:
++		data_type = DSI_DT_GENERIC_SHORT_WRITE_1;
++		data[0] = param0;
++		data[1] = 0;
++		break;
++	case 2:
++		data_type = DSI_DT_GENERIC_SHORT_WRITE_2;
++		data[0] = param0;
++		data[1] = param1;
++		break;
++	}
++
++	spin_lock_irqsave(&sender->lock, flags);
++	send_pkg(sender, data_type, data, sizeof(data), hs);
++	spin_unlock_irqrestore(&sender->lock, flags);
++
++	return 0;
++}
++
++int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
++			u32 len, bool hs)
++{
++	unsigned long flags;
++
++	if (!sender || !data || !len) {
++		DRM_ERROR("Invalid parameters\n");
++		return -EINVAL;
++	}
++
++	spin_lock_irqsave(&sender->lock, flags);
++	send_pkg(sender, DSI_DT_GENERIC_LONG_WRITE, data, len, hs);
++	spin_unlock_irqrestore(&sender->lock, flags);
++
++	return 0;
++}
++
++static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
++			u8 *data, u16 len, u32 *data_out, u16 len_out, bool hs)
++{
++	unsigned long flags;
++	struct drm_device *dev = sender->dev;
++	int i;
++	u32 gen_data_reg;
++	int retry = MDFLD_DSI_READ_MAX_COUNT;
++
++	if (!sender || !data_out || !len_out) {
++		DRM_ERROR("Invalid parameters\n");
++		return -EINVAL;
++	}
++
++	/**
++	 * do reading.
++	 * 0) send out generic read request
++	 * 1) polling read data avail interrupt
++	 * 2) read data
++	 */
++	spin_lock_irqsave(&sender->lock, flags);
++
++	REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
++
++	if ((REG_READ(sender->mipi_intr_stat_reg) & BIT(29)))
++		DRM_ERROR("Can NOT clean read data valid interrupt\n");
++
++	/*send out read request*/
++	send_pkg(sender, data_type, data, len, hs);
++
++	/*polling read data avail interrupt*/
++	while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & BIT(29))) {
++		udelay(100);
++		retry--;
++	}
++
++	if (!retry) {
++		spin_unlock_irqrestore(&sender->lock, flags);
++		return -ETIMEDOUT;
++	}
++
++	REG_WRITE(sender->mipi_intr_stat_reg, BIT(29));
++
++	/*read data*/
++	if (hs)
++		gen_data_reg = sender->mipi_hs_gen_data_reg;
++	else
++		gen_data_reg = sender->mipi_lp_gen_data_reg;
++
++	for (i = 0; i < len_out; i++)
++		*(data_out + i) = REG_READ(gen_data_reg);
++
++	spin_unlock_irqrestore(&sender->lock, flags);
++
++	return 0;
++}
++
++int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
++		u32 *data, u16 len, bool hs)
++{
++	if (!sender || !data || !len) {
++		DRM_ERROR("Invalid parameters\n");
++		return -EINVAL;
++	}
++
++	return __read_panel_data(sender, DSI_DT_DCS_READ, &cmd, 1,
++				data, len, hs);
++}
++
++int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
++								int pipe)
++{
++	struct mdfld_dsi_pkg_sender *pkg_sender;
++	struct mdfld_dsi_config *dsi_config =
++				mdfld_dsi_get_config(dsi_connector);
++	struct drm_device *dev = dsi_config->dev;
++	u32 mipi_val = 0;
++
++	if (!dsi_connector) {
++		DRM_ERROR("Invalid parameter\n");
++		return -EINVAL;
++	}
++
++	pkg_sender = dsi_connector->pkg_sender;
++
++	if (!pkg_sender || IS_ERR(pkg_sender)) {
++		pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
++								GFP_KERNEL);
++		if (!pkg_sender) {
++			DRM_ERROR("Create DSI pkg sender failed\n");
++			return -ENOMEM;
++		}
++		dsi_connector->pkg_sender = (void *)pkg_sender;
++	}
++
++	pkg_sender->dev = dev;
++	pkg_sender->dsi_connector = dsi_connector;
++	pkg_sender->pipe = pipe;
++	pkg_sender->pkg_num = 0;
++	pkg_sender->panel_mode = 0;
++	pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
++
++	/*init regs*/
++	if (pipe == 0) {
++		pkg_sender->dpll_reg = MRST_DPLL_A;
++		pkg_sender->dspcntr_reg = DSPACNTR;
++		pkg_sender->pipeconf_reg = PIPEACONF;
++		pkg_sender->dsplinoff_reg = DSPALINOFF;
++		pkg_sender->dspsurf_reg = DSPASURF;
++		pkg_sender->pipestat_reg = PIPEASTAT;
++	} else if (pipe == 2) {
++		pkg_sender->dpll_reg = MRST_DPLL_A;
++		pkg_sender->dspcntr_reg = DSPCCNTR;
++		pkg_sender->pipeconf_reg = PIPECCONF;
++		pkg_sender->dsplinoff_reg = DSPCLINOFF;
++		pkg_sender->dspsurf_reg = DSPCSURF;
++		pkg_sender->pipestat_reg = PIPECSTAT;
++	}
++
++	pkg_sender->mipi_intr_stat_reg = MIPI_INTR_STAT_REG(pipe);
++	pkg_sender->mipi_lp_gen_data_reg = MIPI_LP_GEN_DATA_REG(pipe);
++	pkg_sender->mipi_hs_gen_data_reg = MIPI_HS_GEN_DATA_REG(pipe);
++	pkg_sender->mipi_lp_gen_ctrl_reg = MIPI_LP_GEN_CTRL_REG(pipe);
++	pkg_sender->mipi_hs_gen_ctrl_reg = MIPI_HS_GEN_CTRL_REG(pipe);
++	pkg_sender->mipi_gen_fifo_stat_reg = MIPI_GEN_FIFO_STAT_REG(pipe);
++	pkg_sender->mipi_data_addr_reg = MIPI_DATA_ADD_REG(pipe);
++	pkg_sender->mipi_data_len_reg = MIPI_DATA_LEN_REG(pipe);
++	pkg_sender->mipi_cmd_addr_reg = MIPI_CMD_ADD_REG(pipe);
++	pkg_sender->mipi_cmd_len_reg = MIPI_CMD_LEN_REG(pipe);
++
++	/*init lock*/
++	spin_lock_init(&pkg_sender->lock);
++
++	if (mdfld_get_panel_type(dev, pipe) != TC35876X) {
++		/**
++		 * For video mode, don't enable DPI timing output here,
++		 * will init the DPI timing output during mode setting.
++		 */
++		mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
++
++		if (pipe == 0)
++			mipi_val |= 0x2;
++
++		REG_WRITE(MIPI_PORT_CONTROL(pipe), mipi_val);
++		REG_READ(MIPI_PORT_CONTROL(pipe));
++
++		/* do dsi controller init */
++		mdfld_dsi_controller_init(dsi_config, pipe);
++	}
++
++	return 0;
++}
++
++void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
++{
++	if (!sender || IS_ERR(sender))
++		return;
++
++	/*free*/
++	kfree(sender);
++}
++
++
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
+new file mode 100644
+index 0000000..459cd7e
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
+@@ -0,0 +1,92 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Jackie Li<yaodong.li at intel.com>
++ */
++#ifndef __MDFLD_DSI_PKG_SENDER_H__
++#define __MDFLD_DSI_PKG_SENDER_H__
++
++#include <linux/kthread.h>
++
++#define MDFLD_MAX_DCS_PARAM	8
++
++struct mdfld_dsi_pkg_sender {
++	struct drm_device *dev;
++	struct mdfld_dsi_connector *dsi_connector;
++	u32 status;
++	u32 panel_mode;
++
++	int pipe;
++
++	spinlock_t lock;
++
++	u32 pkg_num;
++
++	/* Registers */
++	u32 dpll_reg;
++	u32 dspcntr_reg;
++	u32 pipeconf_reg;
++	u32 pipestat_reg;
++	u32 dsplinoff_reg;
++	u32 dspsurf_reg;
++
++	u32 mipi_intr_stat_reg;
++	u32 mipi_lp_gen_data_reg;
++	u32 mipi_hs_gen_data_reg;
++	u32 mipi_lp_gen_ctrl_reg;
++	u32 mipi_hs_gen_ctrl_reg;
++	u32 mipi_gen_fifo_stat_reg;
++	u32 mipi_data_addr_reg;
++	u32 mipi_data_len_reg;
++	u32 mipi_cmd_addr_reg;
++	u32 mipi_cmd_len_reg;
++};
++
++/* DCS definitions */
++#define DCS_SOFT_RESET			0x01
++#define DCS_ENTER_SLEEP_MODE		0x10
++#define DCS_EXIT_SLEEP_MODE		0x11
++#define DCS_SET_DISPLAY_OFF		0x28
++#define DCS_SET_DISPLAY_ON		0x29
++#define DCS_SET_COLUMN_ADDRESS		0x2a
++#define DCS_SET_PAGE_ADDRESS		0x2b
++#define DCS_WRITE_MEM_START		0x2c
++#define DCS_SET_TEAR_OFF		0x34
++#define DCS_SET_TEAR_ON			0x35
++
++extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
++					int pipe);
++extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
++int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
++					u8 param, u8 param_num, bool hs);
++int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
++					u32 len, bool hs);
++int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
++					u8 param1, u8 param_num, bool hs);
++int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
++					u32 len, bool hs);
++/* Read interfaces */
++int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
++		u32 *data, u16 len, bool hs);
++
++#endif
+diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
+new file mode 100644
+index 0000000..a35a292
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
+@@ -0,0 +1,1180 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ */
++
++#include <linux/i2c.h>
++#include <linux/pm_runtime.h>
++
++#include <drm/drmP.h>
++#include "psb_intel_reg.h"
++#include "psb_intel_display.h"
++#include "framebuffer.h"
++#include "mdfld_output.h"
++#include "mdfld_dsi_output.h"
++
++/* Hardcoded currently */
++static int ksel = KSEL_CRYSTAL_19;
++
++struct psb_intel_range_t {
++	int min, max;
++};
++
++struct mrst_limit_t {
++	struct psb_intel_range_t dot, m, p1;
++};
++
++struct mrst_clock_t {
++	/* derived values */
++	int dot;
++	int m;
++	int p1;
++};
++
++#define COUNT_MAX 0x10000000
++
++void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
++{
++	int count, temp;
++	u32 pipeconf_reg = PIPEACONF;
++
++	switch (pipe) {
++	case 0:
++		break;
++	case 1:
++		pipeconf_reg = PIPEBCONF;
++		break;
++	case 2:
++		pipeconf_reg = PIPECCONF;
++		break;
++	default:
++		DRM_ERROR("Illegal Pipe Number.\n");
++		return;
++	}
++
++	/* FIXME JLIU7_PO */
++	psb_intel_wait_for_vblank(dev);
++	return;
++
++	/* Wait for for the pipe disable to take effect. */
++	for (count = 0; count < COUNT_MAX; count++) {
++		temp = REG_READ(pipeconf_reg);
++		if ((temp & PIPEACONF_PIPE_STATE) == 0)
++			break;
++	}
++}
++
++void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
++{
++	int count, temp;
++	u32 pipeconf_reg = PIPEACONF;
++
++	switch (pipe) {
++	case 0:
++		break;
++	case 1:
++		pipeconf_reg = PIPEBCONF;
++		break;
++	case 2:
++		pipeconf_reg = PIPECCONF;
++		break;
++	default:
++		DRM_ERROR("Illegal Pipe Number.\n");
++		return;
++	}
++
++	/* FIXME JLIU7_PO */
++	psb_intel_wait_for_vblank(dev);
++	return;
++
++	/* Wait for for the pipe enable to take effect. */
++	for (count = 0; count < COUNT_MAX; count++) {
++		temp = REG_READ(pipeconf_reg);
++		if ((temp & PIPEACONF_PIPE_STATE) == 1)
++			break;
++	}
++}
++
++static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
++{
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++}
++
++static void psb_intel_crtc_commit(struct drm_crtc *crtc)
++{
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++}
++
++static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
++				  struct drm_display_mode *mode,
++				  struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++/**
++ * Return the pipe currently connected to the panel fitter,
++ * or -1 if the panel fitter is not present or not in use
++ */
++static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
++{
++	u32 pfit_control;
++
++	pfit_control = REG_READ(PFIT_CONTROL);
++
++	/* See if the panel fitter is in use */
++	if ((pfit_control & PFIT_ENABLE) == 0)
++		return -1;
++
++	/* 965 can place panel fitter on either pipe */
++	return (pfit_control >> 29) & 0x3;
++}
++
++static struct drm_device globle_dev;
++
++void mdfld__intel_plane_set_alpha(int enable)
++{
++	struct drm_device *dev = &globle_dev;
++	int dspcntr_reg = DSPACNTR;
++	u32 dspcntr;
++
++	dspcntr = REG_READ(dspcntr_reg);
++
++	if (enable) {
++		dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
++		dspcntr |= DISPPLANE_32BPP;
++	} else {
++		dspcntr &= ~DISPPLANE_32BPP;
++		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++	}
++
++	REG_WRITE(dspcntr_reg, dspcntr);
++}
++
++static int check_fb(struct drm_framebuffer *fb)
++{
++	if (!fb)
++		return 0;
++
++	switch (fb->bits_per_pixel) {
++	case 8:
++	case 16:
++	case 24:
++	case 32:
++		return 0;
++	default:
++		DRM_ERROR("Unknown color depth\n");
++		return -EINVAL;
++	}
++}
++
++static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
++				struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	/* struct drm_i915_master_private *master_priv; */
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++	int pipe = psb_intel_crtc->pipe;
++	unsigned long start, offset;
++	int dsplinoff = DSPALINOFF;
++	int dspsurf = DSPASURF;
++	int dspstride = DSPASTRIDE;
++	int dspcntr_reg = DSPACNTR;
++	u32 dspcntr;
++	int ret;
++
++	memcpy(&globle_dev, dev, sizeof(struct drm_device));
++
++	dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
++
++	/* no fb bound */
++	if (!crtc->fb) {
++		dev_dbg(dev->dev, "No FB bound\n");
++		return 0;
++	}
++
++	ret = check_fb(crtc->fb);
++	if (ret)
++		return ret;
++
++	switch (pipe) {
++	case 0:
++		dsplinoff = DSPALINOFF;
++		break;
++	case 1:
++		dsplinoff = DSPBLINOFF;
++		dspsurf = DSPBSURF;
++		dspstride = DSPBSTRIDE;
++		dspcntr_reg = DSPBCNTR;
++		break;
++	case 2:
++		dsplinoff = DSPCLINOFF;
++		dspsurf = DSPCSURF;
++		dspstride = DSPCSTRIDE;
++		dspcntr_reg = DSPCCNTR;
++		break;
++	default:
++		DRM_ERROR("Illegal Pipe Number.\n");
++		return -EINVAL;
++	}
++
++	if (!gma_power_begin(dev, true))
++		return 0;
++
++	start = psbfb->gtt->offset;
++	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
++
++	REG_WRITE(dspstride, crtc->fb->pitches[0]);
++	dspcntr = REG_READ(dspcntr_reg);
++	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
++
++	switch (crtc->fb->bits_per_pixel) {
++	case 8:
++		dspcntr |= DISPPLANE_8BPP;
++		break;
++	case 16:
++		if (crtc->fb->depth == 15)
++			dspcntr |= DISPPLANE_15_16BPP;
++		else
++			dspcntr |= DISPPLANE_16BPP;
++		break;
++	case 24:
++	case 32:
++		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++		break;
++	}
++	REG_WRITE(dspcntr_reg, dspcntr);
++
++	dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
++						start, offset, x, y);
++	REG_WRITE(dsplinoff, offset);
++	REG_READ(dsplinoff);
++	REG_WRITE(dspsurf, start);
++	REG_READ(dspsurf);
++
++	gma_power_end(dev);
++
++	return 0;
++}
++
++/*
++ * Disable the pipe, plane and pll.
++ *
++ */
++void mdfld_disable_crtc(struct drm_device *dev, int pipe)
++{
++	int dpll_reg = MRST_DPLL_A;
++	int dspcntr_reg = DSPACNTR;
++	int dspbase_reg = MRST_DSPABASE;
++	int pipeconf_reg = PIPEACONF;
++	u32 temp;
++
++	dev_dbg(dev->dev, "pipe = %d\n", pipe);
++
++
++	switch (pipe) {
++	case 0:
++		break;
++	case 1:
++		dpll_reg = MDFLD_DPLL_B;
++		dspcntr_reg = DSPBCNTR;
++		dspbase_reg = DSPBSURF;
++		pipeconf_reg = PIPEBCONF;
++		break;
++	case 2:
++		dpll_reg = MRST_DPLL_A;
++		dspcntr_reg = DSPCCNTR;
++		dspbase_reg = MDFLD_DSPCBASE;
++		pipeconf_reg = PIPECCONF;
++		break;
++	default:
++		DRM_ERROR("Illegal Pipe Number.\n");
++		return;
++	}
++
++	if (pipe != 1)
++		mdfld_dsi_gen_fifo_ready(dev, MIPI_GEN_FIFO_STAT_REG(pipe),
++				HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++
++	/* Disable display plane */
++	temp = REG_READ(dspcntr_reg);
++	if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++		REG_WRITE(dspcntr_reg,
++			  temp & ~DISPLAY_PLANE_ENABLE);
++		/* Flush the plane changes */
++		REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++		REG_READ(dspbase_reg);
++	}
++
++	/* FIXME_JLIU7 MDFLD_PO revisit */
++
++	/* Next, disable display pipes */
++	temp = REG_READ(pipeconf_reg);
++	if ((temp & PIPEACONF_ENABLE) != 0) {
++		temp &= ~PIPEACONF_ENABLE;
++		temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
++		REG_WRITE(pipeconf_reg, temp);
++		REG_READ(pipeconf_reg);
++
++		/* Wait for for the pipe disable to take effect. */
++		mdfldWaitForPipeDisable(dev, pipe);
++	}
++
++	temp = REG_READ(dpll_reg);
++	if (temp & DPLL_VCO_ENABLE) {
++		if ((pipe != 1 &&
++			!((REG_READ(PIPEACONF) | REG_READ(PIPECCONF))
++				& PIPEACONF_ENABLE)) || pipe == 1) {
++			temp &= ~(DPLL_VCO_ENABLE);
++			REG_WRITE(dpll_reg, temp);
++			REG_READ(dpll_reg);
++			/* Wait for the clocks to turn off. */
++			/* FIXME_MDFLD PO may need more delay */
++			udelay(500);
++
++			if (!(temp & MDFLD_PWR_GATE_EN)) {
++				/* gating power of DPLL */
++				REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
++				/* FIXME_MDFLD PO - change 500 to 1 after PO */
++				udelay(5000);
++			}
++		}
++	}
++
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	int dpll_reg = MRST_DPLL_A;
++	int dspcntr_reg = DSPACNTR;
++	int dspbase_reg = MRST_DSPABASE;
++	int pipeconf_reg = PIPEACONF;
++	u32 pipestat_reg = PIPEASTAT;
++	u32 pipeconf = dev_priv->pipeconf[pipe];
++	u32 temp;
++	int timeout = 0;
++
++	dev_dbg(dev->dev, "mode = %d, pipe = %d\n", mode, pipe);
++
++/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
++/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
++
++	switch (pipe) {
++	case 0:
++		break;
++	case 1:
++		dpll_reg = DPLL_B;
++		dspcntr_reg = DSPBCNTR;
++		dspbase_reg = MRST_DSPBBASE;
++		pipeconf_reg = PIPEBCONF;
++		dpll_reg = MDFLD_DPLL_B;
++		break;
++	case 2:
++		dpll_reg = MRST_DPLL_A;
++		dspcntr_reg = DSPCCNTR;
++		dspbase_reg = MDFLD_DSPCBASE;
++		pipeconf_reg = PIPECCONF;
++		pipestat_reg = PIPECSTAT;
++		break;
++	default:
++		DRM_ERROR("Illegal Pipe Number.\n");
++		return;
++	}
++
++	if (!gma_power_begin(dev, true))
++		return;
++
++	/* XXX: When our outputs are all unaware of DPMS modes other than off
++	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++	 */
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++	case DRM_MODE_DPMS_STANDBY:
++	case DRM_MODE_DPMS_SUSPEND:
++		/* Enable the DPLL */
++		temp = REG_READ(dpll_reg);
++
++		if ((temp & DPLL_VCO_ENABLE) == 0) {
++			/* When ungating power of DPLL, needs to wait 0.5us
++			   before enable the VCO */
++			if (temp & MDFLD_PWR_GATE_EN) {
++				temp &= ~MDFLD_PWR_GATE_EN;
++				REG_WRITE(dpll_reg, temp);
++				/* FIXME_MDFLD PO - change 500 to 1 after PO */
++				udelay(500);
++			}
++
++			REG_WRITE(dpll_reg, temp);
++			REG_READ(dpll_reg);
++			/* FIXME_MDFLD PO - change 500 to 1 after PO */
++			udelay(500);
++
++			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++			REG_READ(dpll_reg);
++
++			/**
++			 * wait for DSI PLL to lock
++			 * NOTE: only need to poll status of pipe 0 and pipe 1,
++			 * since both MIPI pipes share the same PLL.
++			 */
++			while ((pipe != 2) && (timeout < 20000) &&
++			  !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
++				udelay(150);
++				timeout++;
++			}
++		}
++
++		/* Enable the plane */
++		temp = REG_READ(dspcntr_reg);
++		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++			REG_WRITE(dspcntr_reg,
++				temp | DISPLAY_PLANE_ENABLE);
++			/* Flush the plane changes */
++			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++		}
++
++		/* Enable the pipe */
++		temp = REG_READ(pipeconf_reg);
++		if ((temp & PIPEACONF_ENABLE) == 0) {
++			REG_WRITE(pipeconf_reg, pipeconf);
++
++			/* Wait for for the pipe enable to take effect. */
++			mdfldWaitForPipeEnable(dev, pipe);
++		}
++
++		/*workaround for sighting 3741701 Random X blank display*/
++		/*perform w/a in video mode only on pipe A or C*/
++		if (pipe == 0 || pipe == 2) {
++			REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
++			msleep(100);
++			if (PIPE_VBLANK_STATUS & REG_READ(pipestat_reg))
++				dev_dbg(dev->dev, "OK");
++			else {
++				dev_dbg(dev->dev, "STUCK!!!!");
++				/*shutdown controller*/
++				temp = REG_READ(dspcntr_reg);
++				REG_WRITE(dspcntr_reg,
++						temp & ~DISPLAY_PLANE_ENABLE);
++				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++				/*mdfld_dsi_dpi_shut_down(dev, pipe);*/
++				REG_WRITE(0xb048, 1);
++				msleep(100);
++				temp = REG_READ(pipeconf_reg);
++				temp &= ~PIPEACONF_ENABLE;
++				REG_WRITE(pipeconf_reg, temp);
++				msleep(100); /*wait for pipe disable*/
++				REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 0);
++				msleep(100);
++				REG_WRITE(0xb004, REG_READ(0xb004));
++				/* try to bring the controller back up again*/
++				REG_WRITE(MIPI_DEVICE_READY_REG(pipe), 1);
++				temp = REG_READ(dspcntr_reg);
++				REG_WRITE(dspcntr_reg,
++						temp | DISPLAY_PLANE_ENABLE);
++				REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++				/*mdfld_dsi_dpi_turn_on(dev, pipe);*/
++				REG_WRITE(0xb048, 2);
++				msleep(100);
++				temp = REG_READ(pipeconf_reg);
++				temp |= PIPEACONF_ENABLE;
++				REG_WRITE(pipeconf_reg, temp);
++			}
++		}
++
++		psb_intel_crtc_load_lut(crtc);
++
++		/* Give the overlay scaler a chance to enable
++		   if it's on this pipe */
++		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
++
++		break;
++	case DRM_MODE_DPMS_OFF:
++		/* Give the overlay scaler a chance to disable
++		 * if it's on this pipe */
++		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++		if (pipe != 1)
++			mdfld_dsi_gen_fifo_ready(dev,
++				MIPI_GEN_FIFO_STAT_REG(pipe),
++				HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
++
++		/* Disable the VGA plane that we never use */
++		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++		/* Disable display plane */
++		temp = REG_READ(dspcntr_reg);
++		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++			REG_WRITE(dspcntr_reg,
++				  temp & ~DISPLAY_PLANE_ENABLE);
++			/* Flush the plane changes */
++			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++			REG_READ(dspbase_reg);
++		}
++
++		/* Next, disable display pipes */
++		temp = REG_READ(pipeconf_reg);
++		if ((temp & PIPEACONF_ENABLE) != 0) {
++			temp &= ~PIPEACONF_ENABLE;
++			temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
++			REG_WRITE(pipeconf_reg, temp);
++			REG_READ(pipeconf_reg);
++
++			/* Wait for for the pipe disable to take effect. */
++			mdfldWaitForPipeDisable(dev, pipe);
++		}
++
++		temp = REG_READ(dpll_reg);
++		if (temp & DPLL_VCO_ENABLE) {
++			if ((pipe != 1 && !((REG_READ(PIPEACONF)
++				| REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
++					|| pipe == 1) {
++				temp &= ~(DPLL_VCO_ENABLE);
++				REG_WRITE(dpll_reg, temp);
++				REG_READ(dpll_reg);
++				/* Wait for the clocks to turn off. */
++				/* FIXME_MDFLD PO may need more delay */
++				udelay(500);
++			}
++		}
++		break;
++	}
++	gma_power_end(dev);
++}
++
++
++#define MDFLD_LIMT_DPLL_19	    0
++#define MDFLD_LIMT_DPLL_25	    1
++#define MDFLD_LIMT_DPLL_83	    2
++#define MDFLD_LIMT_DPLL_100	    3
++#define MDFLD_LIMT_DSIPLL_19	    4
++#define MDFLD_LIMT_DSIPLL_25	    5
++#define MDFLD_LIMT_DSIPLL_83	    6
++#define MDFLD_LIMT_DSIPLL_100	    7
++
++#define MDFLD_DOT_MIN		  19750
++#define MDFLD_DOT_MAX		  120000
++#define MDFLD_DPLL_M_MIN_19	    113
++#define MDFLD_DPLL_M_MAX_19	    155
++#define MDFLD_DPLL_P1_MIN_19	    2
++#define MDFLD_DPLL_P1_MAX_19	    10
++#define MDFLD_DPLL_M_MIN_25	    101
++#define MDFLD_DPLL_M_MAX_25	    130
++#define MDFLD_DPLL_P1_MIN_25	    2
++#define MDFLD_DPLL_P1_MAX_25	    10
++#define MDFLD_DPLL_M_MIN_83	    64
++#define MDFLD_DPLL_M_MAX_83	    64
++#define MDFLD_DPLL_P1_MIN_83	    2
++#define MDFLD_DPLL_P1_MAX_83	    2
++#define MDFLD_DPLL_M_MIN_100	    64
++#define MDFLD_DPLL_M_MAX_100	    64
++#define MDFLD_DPLL_P1_MIN_100	    2
++#define MDFLD_DPLL_P1_MAX_100	    2
++#define MDFLD_DSIPLL_M_MIN_19	    131
++#define MDFLD_DSIPLL_M_MAX_19	    175
++#define MDFLD_DSIPLL_P1_MIN_19	    3
++#define MDFLD_DSIPLL_P1_MAX_19	    8
++#define MDFLD_DSIPLL_M_MIN_25	    97
++#define MDFLD_DSIPLL_M_MAX_25	    140
++#define MDFLD_DSIPLL_P1_MIN_25	    3
++#define MDFLD_DSIPLL_P1_MAX_25	    9
++#define MDFLD_DSIPLL_M_MIN_83	    33
++#define MDFLD_DSIPLL_M_MAX_83	    92
++#define MDFLD_DSIPLL_P1_MIN_83	    2
++#define MDFLD_DSIPLL_P1_MAX_83	    3
++#define MDFLD_DSIPLL_M_MIN_100	    97
++#define MDFLD_DSIPLL_M_MAX_100	    140
++#define MDFLD_DSIPLL_P1_MIN_100	    3
++#define MDFLD_DSIPLL_P1_MAX_100	    9
++
++static const struct mrst_limit_t mdfld_limits[] = {
++	{			/* MDFLD_LIMT_DPLL_19 */
++	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++	 .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
++	 .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
++	 },
++	{			/* MDFLD_LIMT_DPLL_25 */
++	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++	 .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
++	 .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
++	 },
++	{			/* MDFLD_LIMT_DPLL_83 */
++	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++	 .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
++	 .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
++	 },
++	{			/* MDFLD_LIMT_DPLL_100 */
++	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++	 .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
++	 .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
++	 },
++	{			/* MDFLD_LIMT_DSIPLL_19 */
++	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++	 .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
++	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
++	 },
++	{			/* MDFLD_LIMT_DSIPLL_25 */
++	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++	 .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
++	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
++	 },
++	{			/* MDFLD_LIMT_DSIPLL_83 */
++	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++	 .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
++	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
++	 },
++	{			/* MDFLD_LIMT_DSIPLL_100 */
++	 .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
++	 .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
++	 .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
++	 },
++};
++
++#define MDFLD_M_MIN	    21
++#define MDFLD_M_MAX	    180
++static const u32 mdfld_m_converts[] = {
++/* M configuration table from 9-bit LFSR table */
++	224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
++	173, 342, 171, 85, 298, 149, 74, 37, 18, 265,   /* 31 - 40 */
++	388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
++	83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
++	341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
++	461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
++	106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
++	71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
++	253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
++	478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
++	477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
++	210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
++	145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
++	380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
++	103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
++	396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
++};
++
++static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
++{
++	const struct mrst_limit_t *limit = NULL;
++	struct drm_device *dev = crtc->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
++	    || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
++		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
++			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
++		else if (ksel == KSEL_BYPASS_25)
++			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
++		else if ((ksel == KSEL_BYPASS_83_100) &&
++				(dev_priv->core_freq == 166))
++			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
++		else if ((ksel == KSEL_BYPASS_83_100) &&
++			 (dev_priv->core_freq == 100 ||
++				dev_priv->core_freq == 200))
++			limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
++	} else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
++		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
++			limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
++		else if (ksel == KSEL_BYPASS_25)
++			limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
++		else if ((ksel == KSEL_BYPASS_83_100) &&
++				(dev_priv->core_freq == 166))
++			limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
++		else if ((ksel == KSEL_BYPASS_83_100) &&
++				 (dev_priv->core_freq == 100 ||
++				 dev_priv->core_freq == 200))
++			limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
++	} else {
++		limit = NULL;
++		dev_dbg(dev->dev, "mdfld_limit Wrong display type.\n");
++	}
++
++	return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++static void mdfld_clock(int refclk, struct mrst_clock_t *clock)
++{
++	clock->dot = (refclk * clock->m) / clock->p1;
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given refclk,
++ * or FALSE.  Divisor values are the actual divisors for
++ */
++static bool
++mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
++		struct mrst_clock_t *best_clock)
++{
++	struct mrst_clock_t clock;
++	const struct mrst_limit_t *limit = mdfld_limit(crtc);
++	int err = target;
++
++	memset(best_clock, 0, sizeof(*best_clock));
++
++	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
++		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
++		     clock.p1++) {
++			int this_err;
++
++			mdfld_clock(refclk, &clock);
++
++			this_err = abs(clock.dot - target);
++			if (this_err < err) {
++				*best_clock = clock;
++				err = this_err;
++			}
++		}
++	}
++	return err != target;
++}
++
++static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
++			      struct drm_display_mode *mode,
++			      struct drm_display_mode *adjusted_mode,
++			      int x, int y,
++			      struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int pipe = psb_intel_crtc->pipe;
++	int fp_reg = MRST_FPA0;
++	int dpll_reg = MRST_DPLL_A;
++	int dspcntr_reg = DSPACNTR;
++	int pipeconf_reg = PIPEACONF;
++	int htot_reg = HTOTAL_A;
++	int hblank_reg = HBLANK_A;
++	int hsync_reg = HSYNC_A;
++	int vtot_reg = VTOTAL_A;
++	int vblank_reg = VBLANK_A;
++	int vsync_reg = VSYNC_A;
++	int dspsize_reg = DSPASIZE;
++	int dsppos_reg = DSPAPOS;
++	int pipesrc_reg = PIPEASRC;
++	u32 *pipeconf = &dev_priv->pipeconf[pipe];
++	u32 *dspcntr = &dev_priv->dspcntr[pipe];
++	int refclk = 0;
++	int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
++								clk_tmp = 0;
++	struct mrst_clock_t clock;
++	bool ok;
++	u32 dpll = 0, fp = 0;
++	bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
++	struct drm_mode_config *mode_config = &dev->mode_config;
++	struct psb_intel_encoder *psb_intel_encoder = NULL;
++	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
++	struct drm_encoder *encoder;
++	struct drm_connector *connector;
++	int timeout = 0;
++	int ret;
++
++	dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
++
++#if 0
++	if (pipe == 1) {
++		if (!gma_power_begin(dev, true))
++			return 0;
++		android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
++			x, y, old_fb);
++		goto mrst_crtc_mode_set_exit;
++	}
++#endif
++
++	switch (pipe) {
++	case 0:
++		break;
++	case 1:
++		fp_reg = FPB0;
++		dpll_reg = DPLL_B;
++		dspcntr_reg = DSPBCNTR;
++		pipeconf_reg = PIPEBCONF;
++		htot_reg = HTOTAL_B;
++		hblank_reg = HBLANK_B;
++		hsync_reg = HSYNC_B;
++		vtot_reg = VTOTAL_B;
++		vblank_reg = VBLANK_B;
++		vsync_reg = VSYNC_B;
++		dspsize_reg = DSPBSIZE;
++		dsppos_reg = DSPBPOS;
++		pipesrc_reg = PIPEBSRC;
++		fp_reg = MDFLD_DPLL_DIV0;
++		dpll_reg = MDFLD_DPLL_B;
++		break;
++	case 2:
++		dpll_reg = MRST_DPLL_A;
++		dspcntr_reg = DSPCCNTR;
++		pipeconf_reg = PIPECCONF;
++		htot_reg = HTOTAL_C;
++		hblank_reg = HBLANK_C;
++		hsync_reg = HSYNC_C;
++		vtot_reg = VTOTAL_C;
++		vblank_reg = VBLANK_C;
++		vsync_reg = VSYNC_C;
++		dspsize_reg = DSPCSIZE;
++		dsppos_reg = DSPCPOS;
++		pipesrc_reg = PIPECSRC;
++		break;
++	default:
++		DRM_ERROR("Illegal Pipe Number.\n");
++		return 0;
++	}
++
++	ret = check_fb(crtc->fb);
++	if (ret)
++		return ret;
++
++	dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
++		 adjusted_mode->hdisplay);
++	dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
++		 adjusted_mode->vdisplay);
++	dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
++		 adjusted_mode->hsync_start);
++	dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
++		 adjusted_mode->hsync_end);
++	dev_dbg(dev->dev, "adjusted_htotal = %d\n",
++		 adjusted_mode->htotal);
++	dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
++		 adjusted_mode->vsync_start);
++	dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
++		 adjusted_mode->vsync_end);
++	dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
++		 adjusted_mode->vtotal);
++	dev_dbg(dev->dev, "adjusted_clock = %d\n",
++		 adjusted_mode->clock);
++	dev_dbg(dev->dev, "hdisplay = %d\n",
++		 mode->hdisplay);
++	dev_dbg(dev->dev, "vdisplay = %d\n",
++		 mode->vdisplay);
++
++	if (!gma_power_begin(dev, true))
++		return 0;
++
++	memcpy(&psb_intel_crtc->saved_mode, mode,
++					sizeof(struct drm_display_mode));
++	memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode,
++					sizeof(struct drm_display_mode));
++
++	list_for_each_entry(connector, &mode_config->connector_list, head) {
++		if (!connector)
++			continue;
++
++		encoder = connector->encoder;
++
++		if (!encoder)
++			continue;
++
++		if (encoder->crtc != crtc)
++			continue;
++
++		psb_intel_encoder = psb_intel_attached_encoder(connector);
++
++		switch (psb_intel_encoder->type) {
++		case INTEL_OUTPUT_MIPI:
++			is_mipi = true;
++			break;
++		case INTEL_OUTPUT_MIPI2:
++			is_mipi2 = true;
++			break;
++		case INTEL_OUTPUT_HDMI:
++			is_hdmi = true;
++			break;
++		}
++	}
++
++	/* Disable the VGA plane that we never use */
++	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++	/* Disable the panel fitter if it was on our pipe */
++	if (psb_intel_panel_fitter_pipe(dev) == pipe)
++		REG_WRITE(PFIT_CONTROL, 0);
++
++	/* pipesrc and dspsize control the size that is scaled from,
++	 * which should always be the user's requested size.
++	 */
++	if (pipe == 1) {
++		/* FIXME: To make HDMI display with 864x480 (TPO), 480x864
++		 * (PYR) or 480x854 (TMD), set the sprite width/height and
++		 * souce image size registers with the adjusted mode for
++		 * pipe B.
++		 */
++
++		/*
++		 * The defined sprite rectangle must always be completely
++		 * contained within the displayable area of the screen image
++		 * (frame buffer).
++		 */
++		REG_WRITE(dspsize_reg, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
++				| (min(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
++		/* Set the CRTC with encoder mode. */
++		REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
++				 | (mode->crtc_vdisplay - 1));
++	} else {
++		REG_WRITE(dspsize_reg,
++				((mode->crtc_vdisplay - 1) << 16) |
++						(mode->crtc_hdisplay - 1));
++		REG_WRITE(pipesrc_reg,
++				((mode->crtc_hdisplay - 1) << 16) |
++						(mode->crtc_vdisplay - 1));
++	}
++
++	REG_WRITE(dsppos_reg, 0);
++
++	if (psb_intel_encoder)
++		drm_connector_property_get_value(connector,
++			dev->mode_config.scaling_mode_property, &scalingType);
++
++	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
++		/* Medfield doesn't have register support for centering so we
++		 * need to mess with the h/vblank and h/vsync start and ends
++		 * to get centering
++		 */
++		int offsetX = 0, offsetY = 0;
++
++		offsetX = (adjusted_mode->crtc_hdisplay -
++					mode->crtc_hdisplay) / 2;
++		offsetY = (adjusted_mode->crtc_vdisplay -
++					mode->crtc_vdisplay) / 2;
++
++		REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
++			((adjusted_mode->crtc_htotal - 1) << 16));
++		REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
++			((adjusted_mode->crtc_vtotal - 1) << 16));
++		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start -
++								offsetX - 1) |
++			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
++		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start -
++								offsetX - 1) |
++			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
++		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start -
++								offsetY - 1) |
++			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
++		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start -
++								offsetY - 1) |
++			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
++	} else {
++		REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++			((adjusted_mode->crtc_htotal - 1) << 16));
++		REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++			((adjusted_mode->crtc_vtotal - 1) << 16));
++		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++			((adjusted_mode->crtc_hblank_end - 1) << 16));
++		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++			((adjusted_mode->crtc_hsync_end - 1) << 16));
++		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++			((adjusted_mode->crtc_vblank_end - 1) << 16));
++		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++			((adjusted_mode->crtc_vsync_end - 1) << 16));
++	}
++
++	/* Flush the plane changes */
++	{
++		struct drm_crtc_helper_funcs *crtc_funcs =
++		    crtc->helper_private;
++		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++	}
++
++	/* setup pipeconf */
++	*pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
++
++	/* Set up the display plane register */
++	*dspcntr = REG_READ(dspcntr_reg);
++	*dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
++	*dspcntr |= DISPLAY_PLANE_ENABLE;
++
++	if (is_mipi2)
++		goto mrst_crtc_mode_set_exit;
++	clk = adjusted_mode->clock;
++
++	if (is_hdmi) {
++		if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19)) {
++			refclk = 19200;
++
++			if (is_mipi || is_mipi2)
++				clk_n = 1, clk_p2 = 8;
++			else if (is_hdmi)
++				clk_n = 1, clk_p2 = 10;
++		} else if (ksel == KSEL_BYPASS_25) {
++			refclk = 25000;
++
++			if (is_mipi || is_mipi2)
++				clk_n = 1, clk_p2 = 8;
++			else if (is_hdmi)
++				clk_n = 1, clk_p2 = 10;
++		} else if ((ksel == KSEL_BYPASS_83_100) &&
++					dev_priv->core_freq == 166) {
++			refclk = 83000;
++
++			if (is_mipi || is_mipi2)
++				clk_n = 4, clk_p2 = 8;
++			else if (is_hdmi)
++				clk_n = 4, clk_p2 = 10;
++		} else if ((ksel == KSEL_BYPASS_83_100) &&
++					(dev_priv->core_freq == 100 ||
++					dev_priv->core_freq == 200)) {
++			refclk = 100000;
++			if (is_mipi || is_mipi2)
++				clk_n = 4, clk_p2 = 8;
++			else if (is_hdmi)
++				clk_n = 4, clk_p2 = 10;
++		}
++
++		if (is_mipi)
++			clk_byte = dev_priv->bpp / 8;
++		else if (is_mipi2)
++			clk_byte = dev_priv->bpp2 / 8;
++
++		clk_tmp = clk * clk_n * clk_p2 * clk_byte;
++
++		dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d.\n",
++					clk, clk_n, clk_p2);
++		dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d.\n",
++					adjusted_mode->clock, clk_tmp);
++
++		ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
++
++		if (!ok) {
++			DRM_ERROR
++			    ("mdfldFindBestPLL fail in mdfld_crtc_mode_set.\n");
++		} else {
++			m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
++
++			dev_dbg(dev->dev, "dot clock = %d,"
++				 "m = %d, p1 = %d, m_conv = %d.\n",
++					clock.dot, clock.m,
++					clock.p1, m_conv);
++		}
++
++		dpll = REG_READ(dpll_reg);
++
++		if (dpll & DPLL_VCO_ENABLE) {
++			dpll &= ~DPLL_VCO_ENABLE;
++			REG_WRITE(dpll_reg, dpll);
++			REG_READ(dpll_reg);
++
++			/* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
++			/* FIXME_MDFLD PO - change 500 to 1 after PO */
++			udelay(500);
++
++			/* reset M1, N1 & P1 */
++			REG_WRITE(fp_reg, 0);
++			dpll &= ~MDFLD_P1_MASK;
++			REG_WRITE(dpll_reg, dpll);
++			/* FIXME_MDFLD PO - change 500 to 1 after PO */
++			udelay(500);
++		}
++
++		/* When ungating power of DPLL, needs to wait 0.5us before
++		 * enable the VCO */
++		if (dpll & MDFLD_PWR_GATE_EN) {
++			dpll &= ~MDFLD_PWR_GATE_EN;
++			REG_WRITE(dpll_reg, dpll);
++			/* FIXME_MDFLD PO - change 500 to 1 after PO */
++			udelay(500);
++		}
++		dpll = 0;
++
++#if 0 /* FIXME revisit later */
++		if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 ||
++						ksel == KSEL_BYPASS_25)
++			dpll &= ~MDFLD_INPUT_REF_SEL;
++		else if (ksel == KSEL_BYPASS_83_100)
++			dpll |= MDFLD_INPUT_REF_SEL;
++#endif /* FIXME revisit later */
++
++		if (is_hdmi)
++			dpll |= MDFLD_VCO_SEL;
++
++		fp = (clk_n / 2) << 16;
++		fp |= m_conv;
++
++		/* compute bitmask from p1 value */
++		dpll |= (1 << (clock.p1 - 2)) << 17;
++
++#if 0 /* 1080p30 & 720p */
++		dpll = 0x00050000;
++		fp = 0x000001be;
++#endif
++#if 0 /* 480p */
++		dpll = 0x02010000;
++		fp = 0x000000d2;
++#endif
++	} else {
++#if 0 /*DBI_TPO_480x864*/
++		dpll = 0x00020000;
++		fp = 0x00000156;
++#endif /* DBI_TPO_480x864 */ /* get from spec. */
++
++		dpll = 0x00800000;
++		fp = 0x000000c1;
++	}
++
++	REG_WRITE(fp_reg, fp);
++	REG_WRITE(dpll_reg, dpll);
++	/* FIXME_MDFLD PO - change 500 to 1 after PO */
++	udelay(500);
++
++	dpll |= DPLL_VCO_ENABLE;
++	REG_WRITE(dpll_reg, dpll);
++	REG_READ(dpll_reg);
++
++	/* wait for DSI PLL to lock */
++	while (timeout < 20000 &&
++			!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
++		udelay(150);
++		timeout++;
++	}
++
++	if (is_mipi)
++		goto mrst_crtc_mode_set_exit;
++
++	dev_dbg(dev->dev, "is_mipi = 0x%x\n", is_mipi);
++
++	REG_WRITE(pipeconf_reg, *pipeconf);
++	REG_READ(pipeconf_reg);
++
++	/* Wait for for the pipe enable to take effect. */
++	REG_WRITE(dspcntr_reg, *dspcntr);
++	psb_intel_wait_for_vblank(dev);
++
++mrst_crtc_mode_set_exit:
++
++	gma_power_end(dev);
++
++	return 0;
++}
++
++const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
++	.dpms = mdfld_crtc_dpms,
++	.mode_fixup = psb_intel_crtc_mode_fixup,
++	.mode_set = mdfld_crtc_mode_set,
++	.mode_set_base = mdfld__intel_pipe_set_base,
++	.prepare = psb_intel_crtc_prepare,
++	.commit = psb_intel_crtc_commit,
++};
++
+diff --git a/drivers/gpu/drm/gma500/mdfld_output.c b/drivers/gpu/drm/gma500/mdfld_output.c
+new file mode 100644
+index 0000000..c95966b
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_output.c
+@@ -0,0 +1,74 @@
++/*
++ * Copyright (c)  2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Thomas Eaton <thomas.g.eaton at intel.com>
++ * Scott Rowe <scott.m.rowe at intel.com>
++*/
++
++#include "mdfld_output.h"
++#include "mdfld_dsi_dpi.h"
++#include "mdfld_dsi_output.h"
++
++#include "tc35876x-dsi-lvds.h"
++
++int mdfld_get_panel_type(struct drm_device *dev, int pipe)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	return dev_priv->mdfld_panel_id;
++}
++
++static void mdfld_init_panel(struct drm_device *dev, int mipi_pipe,
++								int p_type)
++{
++	switch (p_type) {
++	case TPO_VID:
++		mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tpo_vid_funcs);
++		break;
++	case TC35876X:
++		tc35876x_init(dev);
++		mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tc35876x_funcs);
++		break;
++	case TMD_VID:
++		mdfld_dsi_output_init(dev, mipi_pipe, &mdfld_tmd_vid_funcs);
++		break;
++	case HDMI:
++/*		if (dev_priv->mdfld_hdmi_present)
++			mdfld_hdmi_init(dev, &dev_priv->mode_dev); */
++		break;
++	}
++}
++
++
++int mdfld_output_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	/* FIXME: hardcoded for now */
++	dev_priv->mdfld_panel_id = TC35876X;
++	/* MIPI panel 1 */
++	mdfld_init_panel(dev, 0, dev_priv->mdfld_panel_id);
++	/* HDMI panel */
++	mdfld_init_panel(dev, 1, HDMI);
++	return 0;
++}
++
+diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h
+new file mode 100644
+index 0000000..ab2b27c
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_output.h
+@@ -0,0 +1,77 @@
++/*
++ * Copyright (c)  2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicensen
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Thomas Eaton <thomas.g.eaton at intel.com>
++ * Scott Rowe <scott.m.rowe at intel.com>
++*/
++
++#ifndef MDFLD_OUTPUT_H
++#define MDFLD_OUTPUT_H
++
++#include "psb_drv.h"
++
++#define TPO_PANEL_WIDTH		84
++#define TPO_PANEL_HEIGHT	46
++#define TMD_PANEL_WIDTH		39
++#define TMD_PANEL_HEIGHT	71
++
++struct mdfld_dsi_config;
++
++enum panel_type {
++	TPO_VID,
++	TMD_VID,
++	HDMI,
++	TC35876X,
++};
++
++struct panel_info {
++	u32 width_mm;
++	u32 height_mm;
++	/* Other info */
++};
++
++struct panel_funcs {
++	const struct drm_encoder_funcs *encoder_funcs;
++	const struct drm_encoder_helper_funcs *encoder_helper_funcs;
++	struct drm_display_mode * (*get_config_mode)(struct drm_device *);
++	int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
++	int (*reset)(int pipe);
++	void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
++};
++
++int mdfld_output_init(struct drm_device *dev);
++
++struct backlight_device *mdfld_get_backlight_device(void);
++int mdfld_set_brightness(struct backlight_device *bd);
++
++int mdfld_get_panel_type(struct drm_device *dev, int pipe);
++
++extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
++
++extern const struct panel_funcs mdfld_tmd_vid_funcs;
++extern const struct panel_funcs mdfld_tpo_vid_funcs;
++
++extern void mdfld_disable_crtc(struct drm_device *dev, int pipe);
++extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
++extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
++#endif
+diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
+new file mode 100644
+index 0000000..dc0c6c3
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
+@@ -0,0 +1,201 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Jim Liu <jim.liu at intel.com>
++ * Jackie Li<yaodong.li at intel.com>
++ * Gideon Eaton <eaton.
++ * Scott Rowe <scott.m.rowe at intel.com>
++ */
++
++#include "mdfld_dsi_dpi.h"
++#include "mdfld_dsi_pkg_sender.h"
++
++static struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
++{
++	struct drm_display_mode *mode;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
++	bool use_gct = false; /*Disable GCT for now*/
++
++	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++	if (!mode)
++		return NULL;
++
++	if (use_gct) {
++		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++		mode->hsync_start = mode->hdisplay + \
++				((ti->hsync_offset_hi << 8) | \
++				ti->hsync_offset_lo);
++		mode->hsync_end = mode->hsync_start + \
++				((ti->hsync_pulse_width_hi << 8) | \
++				ti->hsync_pulse_width_lo);
++		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
++								ti->hblank_lo);
++		mode->vsync_start = \
++			mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
++						ti->vsync_offset_lo);
++		mode->vsync_end = \
++			mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
++						ti->vsync_pulse_width_lo);
++		mode->vtotal = mode->vdisplay + \
++				((ti->vblank_hi << 8) | ti->vblank_lo);
++		mode->clock = ti->pixel_clock * 10;
++
++		dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
++		dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
++		dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
++		dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
++		dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
++		dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
++		dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
++		dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
++		dev_dbg(dev->dev, "clock is %d\n", mode->clock);
++	} else {
++		mode->hdisplay = 480;
++		mode->vdisplay = 854;
++		mode->hsync_start = 487;
++		mode->hsync_end = 490;
++		mode->htotal = 499;
++		mode->vsync_start = 861;
++		mode->vsync_end = 865;
++		mode->vtotal = 873;
++		mode->clock = 33264;
++	}
++
++	drm_mode_set_name(mode);
++	drm_mode_set_crtcinfo(mode, 0);
++
++	mode->type |= DRM_MODE_TYPE_PREFERRED;
++
++	return mode;
++}
++
++static int tmd_vid_get_panel_info(struct drm_device *dev,
++				int pipe,
++				struct panel_info *pi)
++{
++	if (!dev || !pi)
++		return -EINVAL;
++
++	pi->width_mm = TMD_PANEL_WIDTH;
++	pi->height_mm = TMD_PANEL_HEIGHT;
++
++	return 0;
++}
++
++/* ************************************************************************* *\
++ * FUNCTION: mdfld_init_TMD_MIPI
++ *
++ * DESCRIPTION:  This function is called only by mrst_dsi_mode_set and
++ *               restore_display_registers.  since this function does not
++ *               acquire the mutex, it is important that the calling function
++ *               does!
++\* ************************************************************************* */
++
++/* FIXME: make the below data u8 instead of u32; note byte order! */
++static u32 tmd_cmd_mcap_off[] = {0x000000b2};
++static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
++static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
++static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
++static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
++static u32 tmd_cmd_set_mode[] = {0x000000b3};
++static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
++static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
++static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
++static u32 tmd_cmd_set_video_mode[] = {0x00000153};
++/*no auto_bl,need add in furture*/
++static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
++static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
++
++static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
++				      int pipe)
++{
++	struct mdfld_dsi_pkg_sender *sender
++			= mdfld_dsi_get_pkg_sender(dsi_config);
++
++	DRM_INFO("Enter mdfld init TMD MIPI display.\n");
++
++	if (!sender) {
++		DRM_ERROR("Cannot get sender\n");
++		return;
++	}
++
++	if (dsi_config->dvr_ic_inited)
++		return;
++
++	msleep(3);
++
++	/* FIXME: make the below data u8 instead of u32; note byte order! */
++
++	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_mcap_off,
++				sizeof(tmd_cmd_mcap_off), false);
++	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_lane_switch,
++				sizeof(tmd_cmd_enable_lane_switch), false);
++	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_lane_num,
++				sizeof(tmd_cmd_set_lane_num), false);
++	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock0,
++				sizeof(tmd_cmd_pushing_clock0), false);
++	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_pushing_clock1,
++				sizeof(tmd_cmd_pushing_clock1), false);
++	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_mode,
++				sizeof(tmd_cmd_set_mode), false);
++	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_sync_pulse_mode,
++				sizeof(tmd_cmd_set_sync_pulse_mode), false);
++	mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_column,
++				sizeof(tmd_cmd_set_column), false);
++	mdfld_dsi_send_mcs_long(sender, (u8 *) tmd_cmd_set_page,
++				sizeof(tmd_cmd_set_page), false);
++	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_video_mode,
++				sizeof(tmd_cmd_set_video_mode), false);
++	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_enable_backlight,
++				sizeof(tmd_cmd_enable_backlight), false);
++	mdfld_dsi_send_gen_long(sender, (u8 *) tmd_cmd_set_backlight_dimming,
++				sizeof(tmd_cmd_set_backlight_dimming), false);
++
++	dsi_config->dvr_ic_inited = 1;
++}
++
++/*TPO DPI encoder helper funcs*/
++static const struct drm_encoder_helper_funcs
++				mdfld_tpo_dpi_encoder_helper_funcs = {
++	.dpms = mdfld_dsi_dpi_dpms,
++	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
++	.prepare = mdfld_dsi_dpi_prepare,
++	.mode_set = mdfld_dsi_dpi_mode_set,
++	.commit = mdfld_dsi_dpi_commit,
++};
++
++/*TPO DPI encoder funcs*/
++static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
++	.destroy = drm_encoder_cleanup,
++};
++
++const struct panel_funcs mdfld_tmd_vid_funcs = {
++	.encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
++	.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
++	.get_config_mode = &tmd_vid_get_config_mode,
++	.get_panel_info = tmd_vid_get_panel_info,
++	.reset = mdfld_dsi_panel_reset,
++	.drv_ic_init = mdfld_dsi_tmd_drv_ic_init,
++};
+diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
+new file mode 100644
+index 0000000..d8d4170
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
+@@ -0,0 +1,124 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * jim liu <jim.liu at intel.com>
++ * Jackie Li<yaodong.li at intel.com>
++ */
++
++#include "mdfld_dsi_dpi.h"
++
++static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
++{
++	struct drm_display_mode *mode;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
++	bool use_gct = false;
++
++	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++	if (!mode)
++		return NULL;
++
++	if (use_gct) {
++		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++		mode->hsync_start = mode->hdisplay +
++				((ti->hsync_offset_hi << 8) |
++				ti->hsync_offset_lo);
++		mode->hsync_end = mode->hsync_start +
++				((ti->hsync_pulse_width_hi << 8) |
++				ti->hsync_pulse_width_lo);
++		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) |
++								ti->hblank_lo);
++		mode->vsync_start =
++			mode->vdisplay + ((ti->vsync_offset_hi << 8) |
++						ti->vsync_offset_lo);
++		mode->vsync_end =
++			mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) |
++						ti->vsync_pulse_width_lo);
++		mode->vtotal = mode->vdisplay +
++				((ti->vblank_hi << 8) | ti->vblank_lo);
++		mode->clock = ti->pixel_clock * 10;
++
++		dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
++		dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
++		dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
++		dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
++		dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
++		dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
++		dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
++		dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
++		dev_dbg(dev->dev, "clock is %d\n", mode->clock);
++	} else {
++		mode->hdisplay = 864;
++		mode->vdisplay = 480;
++		mode->hsync_start = 873;
++		mode->hsync_end = 876;
++		mode->htotal = 887;
++		mode->vsync_start = 487;
++		mode->vsync_end = 490;
++		mode->vtotal = 499;
++		mode->clock = 33264;
++	}
++
++	drm_mode_set_name(mode);
++	drm_mode_set_crtcinfo(mode, 0);
++
++	mode->type |= DRM_MODE_TYPE_PREFERRED;
++
++	return mode;
++}
++
++static int tpo_vid_get_panel_info(struct drm_device *dev,
++				int pipe,
++				struct panel_info *pi)
++{
++	if (!dev || !pi)
++		return -EINVAL;
++
++	pi->width_mm = TPO_PANEL_WIDTH;
++	pi->height_mm = TPO_PANEL_HEIGHT;
++
++	return 0;
++}
++
++/*TPO DPI encoder helper funcs*/
++static const struct drm_encoder_helper_funcs
++				mdfld_tpo_dpi_encoder_helper_funcs = {
++	.dpms = mdfld_dsi_dpi_dpms,
++	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
++	.prepare = mdfld_dsi_dpi_prepare,
++	.mode_set = mdfld_dsi_dpi_mode_set,
++	.commit = mdfld_dsi_dpi_commit,
++};
++
++/*TPO DPI encoder funcs*/
++static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
++	.destroy = drm_encoder_cleanup,
++};
++
++const struct panel_funcs mdfld_tpo_vid_funcs = {
++	.encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
++	.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
++	.get_config_mode = &tpo_vid_get_config_mode,
++	.get_panel_info = tpo_vid_get_panel_info,
++};
+diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
+new file mode 100644
+index 0000000..5eee9ad
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mid_bios.c
+@@ -0,0 +1,263 @@
++/**************************************************************************
++ * Copyright (c) 2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++/* TODO
++ * - Split functions by vbt type
++ * - Make them all take drm_device
++ * - Check ioremap failures
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "gma_drm.h"
++#include "psb_drv.h"
++#include "mid_bios.h"
++
++static void mid_get_fuse_settings(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++	uint32_t fuse_value = 0;
++	uint32_t fuse_value_tmp = 0;
++
++#define FB_REG06 0xD0810600
++#define FB_MIPI_DISABLE  (1 << 11)
++#define FB_REG09 0xD0810900
++#define FB_REG09 0xD0810900
++#define FB_SKU_MASK  0x7000
++#define FB_SKU_SHIFT 12
++#define FB_SKU_100 0
++#define FB_SKU_100L 1
++#define FB_SKU_83 2
++	if (pci_root == NULL) {
++		WARN_ON(1);
++		return;
++	}
++
++
++	pci_write_config_dword(pci_root, 0xD0, FB_REG06);
++	pci_read_config_dword(pci_root, 0xD4, &fuse_value);
++
++	/* FB_MIPI_DISABLE doesn't mean LVDS on with Medfield */
++	if (IS_MRST(dev))
++		dev_priv->iLVDS_enable = fuse_value & FB_MIPI_DISABLE;
++
++	DRM_INFO("internal display is %s\n",
++		 dev_priv->iLVDS_enable ? "LVDS display" : "MIPI display");
++
++	 /* Prevent runtime suspend at start*/
++	 if (dev_priv->iLVDS_enable) {
++		dev_priv->is_lvds_on = true;
++		dev_priv->is_mipi_on = false;
++	} else {
++		dev_priv->is_mipi_on = true;
++		dev_priv->is_lvds_on = false;
++	}
++
++	dev_priv->video_device_fuse = fuse_value;
++
++	pci_write_config_dword(pci_root, 0xD0, FB_REG09);
++	pci_read_config_dword(pci_root, 0xD4, &fuse_value);
++
++	dev_dbg(dev->dev, "SKU values is 0x%x.\n", fuse_value);
++	fuse_value_tmp = (fuse_value & FB_SKU_MASK) >> FB_SKU_SHIFT;
++
++	dev_priv->fuse_reg_value = fuse_value;
++
++	switch (fuse_value_tmp) {
++	case FB_SKU_100:
++		dev_priv->core_freq = 200;
++		break;
++	case FB_SKU_100L:
++		dev_priv->core_freq = 100;
++		break;
++	case FB_SKU_83:
++		dev_priv->core_freq = 166;
++		break;
++	default:
++		dev_warn(dev->dev, "Invalid SKU values, SKU value = 0x%08x\n",
++								fuse_value_tmp);
++		dev_priv->core_freq = 0;
++	}
++	dev_dbg(dev->dev, "LNC core clk is %dMHz.\n", dev_priv->core_freq);
++	pci_dev_put(pci_root);
++}
++
++/*
++ *	Get the revison ID, B0:D2:F0;0x08
++ */
++static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
++{
++	uint32_t platform_rev_id = 0;
++	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
++
++	if (pci_gfx_root == NULL) {
++		WARN_ON(1);
++		return;
++	}
++	pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
++	dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
++	pci_dev_put(pci_gfx_root);
++	dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
++					dev_priv->platform_rev_id);
++}
++
++static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
++{
++	struct drm_device *dev = dev_priv->dev;
++	struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
++	u32 addr;
++	u16 new_size;
++	u8 *vbt_virtual;
++	u8 bpi;
++	u8 number_desc = 0;
++	struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
++	struct gct_r10_timing_info ti;
++	void *pGCT;
++	struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
++
++	/* Get the address of the platform config vbt, B0:D2:F0;0xFC */
++	pci_read_config_dword(pci_gfx_root, 0xFC, &addr);
++	pci_dev_put(pci_gfx_root);
++
++	dev_dbg(dev->dev, "drm platform config address is %x\n", addr);
++
++	/* check for platform config address == 0. */
++	/* this means fw doesn't support vbt */
++
++	if (addr == 0) {
++		vbt->size = 0;
++		return;
++	}
++
++	/* get the virtual address of the vbt */
++	vbt_virtual = ioremap(addr, sizeof(*vbt));
++	if (vbt_virtual == NULL) {
++		vbt->size = 0;
++		return;
++	}
++
++	memcpy(vbt, vbt_virtual, sizeof(*vbt));
++	iounmap(vbt_virtual); /* Free virtual address space */
++
++	/* No matching signature don't process the data */
++	if (memcmp(vbt->signature, "$GCT", 4)) {
++		vbt->size = 0;
++		return;
++	}
++
++	dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
++
++	switch (vbt->revision) {
++	case 0:
++		vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
++					vbt->size - sizeof(*vbt) + 4);
++		pGCT = vbt->oaktrail_gct;
++		bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
++		dev_priv->gct_data.bpi = bpi;
++		dev_priv->gct_data.pt =
++			((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
++		memcpy(&dev_priv->gct_data.DTD,
++			&((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
++				sizeof(struct oaktrail_timing_info));
++		dev_priv->gct_data.Panel_Port_Control =
++		  ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
++		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
++			((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
++		break;
++	case 1:
++		vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
++					vbt->size - sizeof(*vbt) + 4);
++		pGCT = vbt->oaktrail_gct;
++		bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
++		dev_priv->gct_data.bpi = bpi;
++		dev_priv->gct_data.pt =
++			((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
++		memcpy(&dev_priv->gct_data.DTD,
++			&((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
++				sizeof(struct oaktrail_timing_info));
++		dev_priv->gct_data.Panel_Port_Control =
++		  ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
++		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
++			((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
++		break;
++	case 0x10:
++		/*header definition changed from rev 01 (v2) to rev 10h. */
++		/*so, some values have changed location*/
++		new_size = vbt->checksum; /*checksum contains lo size byte*/
++		/*LSB of oaktrail_gct contains hi size byte*/
++		new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
++
++		vbt->checksum = vbt->size; /*size contains the checksum*/
++		if (new_size > 0xff)
++			vbt->size = 0xff; /*restrict size to 255*/
++		else
++			vbt->size = new_size;
++
++		/* number of descriptors defined in the GCT */
++		number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
++		bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
++		vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
++				GCT_R10_DISPLAY_DESC_SIZE * number_desc);
++		pGCT = vbt->oaktrail_gct;
++		pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
++		dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
++
++		/*copy the GCT display timings into a temp structure*/
++		memcpy(&ti, pGCT, sizeof(struct gct_r10_timing_info));
++
++		/*now copy the temp struct into the dev_priv->gct_data*/
++		dp_ti->pixel_clock = ti.pixel_clock;
++		dp_ti->hactive_hi = ti.hactive_hi;
++		dp_ti->hactive_lo = ti.hactive_lo;
++		dp_ti->hblank_hi = ti.hblank_hi;
++		dp_ti->hblank_lo = ti.hblank_lo;
++		dp_ti->hsync_offset_hi = ti.hsync_offset_hi;
++		dp_ti->hsync_offset_lo = ti.hsync_offset_lo;
++		dp_ti->hsync_pulse_width_hi = ti.hsync_pulse_width_hi;
++		dp_ti->hsync_pulse_width_lo = ti.hsync_pulse_width_lo;
++		dp_ti->vactive_hi = ti.vactive_hi;
++		dp_ti->vactive_lo = ti.vactive_lo;
++		dp_ti->vblank_hi = ti.vblank_hi;
++		dp_ti->vblank_lo = ti.vblank_lo;
++		dp_ti->vsync_offset_hi = ti.vsync_offset_hi;
++		dp_ti->vsync_offset_lo = ti.vsync_offset_lo;
++		dp_ti->vsync_pulse_width_hi = ti.vsync_pulse_width_hi;
++		dp_ti->vsync_pulse_width_lo = ti.vsync_pulse_width_lo;
++
++		/* Move the MIPI_Display_Descriptor data from GCT to dev priv */
++		dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
++							*((u8 *)pGCT + 0x0d);
++		dev_priv->gct_data.Panel_MIPI_Display_Descriptor |=
++						(*((u8 *)pGCT + 0x0e)) << 8;
++		break;
++	default:
++		dev_err(dev->dev, "Unknown revision of GCT!\n");
++		vbt->size = 0;
++	}
++}
++
++int mid_chip_setup(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	mid_get_fuse_settings(dev);
++	mid_get_vbt_data(dev_priv);
++	mid_get_pci_revID(dev_priv);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/gma500/mid_bios.h b/drivers/gpu/drm/gma500/mid_bios.h
+new file mode 100644
+index 0000000..00e7d56
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mid_bios.h
+@@ -0,0 +1,21 @@
++/**************************************************************************
++ * Copyright (c) 2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++extern int mid_chip_setup(struct drm_device *dev);
++
+diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
+new file mode 100644
+index 0000000..49bac41
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/mmu.c
+@@ -0,0 +1,849 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++
++/*
++ * Code for the SGX MMU:
++ */
++
++/*
++ * clflush on one processor only:
++ * clflush should apparently flush the cache line on all processors in an
++ * SMP system.
++ */
++
++/*
++ * kmap atomic:
++ * The usage of the slots must be completely encapsulated within a spinlock, and
++ * no other functions that may be using the locks for other purposed may be
++ * called from within the locked region.
++ * Since the slots are per processor, this will guarantee that we are the only
++ * user.
++ */
++
++/*
++ * TODO: Inserting ptes from an interrupt handler:
++ * This may be desirable for some SGX functionality where the GPU can fault in
++ * needed pages. For that, we need to make an atomic insert_pages function, that
++ * may fail.
++ * If it fails, the caller need to insert the page using a workqueue function,
++ * but on average it should be fast.
++ */
++
++struct psb_mmu_driver {
++	/* protects driver- and pd structures. Always take in read mode
++	 * before taking the page table spinlock.
++	 */
++	struct rw_semaphore sem;
++
++	/* protects page tables, directory tables and pt tables.
++	 * and pt structures.
++	 */
++	spinlock_t lock;
++
++	atomic_t needs_tlbflush;
++
++	uint8_t __iomem *register_map;
++	struct psb_mmu_pd *default_pd;
++	/*uint32_t bif_ctrl;*/
++	int has_clflush;
++	int clflush_add;
++	unsigned long clflush_mask;
++
++	struct drm_psb_private *dev_priv;
++};
++
++struct psb_mmu_pd;
++
++struct psb_mmu_pt {
++	struct psb_mmu_pd *pd;
++	uint32_t index;
++	uint32_t count;
++	struct page *p;
++	uint32_t *v;
++};
++
++struct psb_mmu_pd {
++	struct psb_mmu_driver *driver;
++	int hw_context;
++	struct psb_mmu_pt **tables;
++	struct page *p;
++	struct page *dummy_pt;
++	struct page *dummy_page;
++	uint32_t pd_mask;
++	uint32_t invalid_pde;
++	uint32_t invalid_pte;
++};
++
++static inline uint32_t psb_mmu_pt_index(uint32_t offset)
++{
++	return (offset >> PSB_PTE_SHIFT) & 0x3FF;
++}
++
++static inline uint32_t psb_mmu_pd_index(uint32_t offset)
++{
++	return offset >> PSB_PDE_SHIFT;
++}
++
++static inline void psb_clflush(void *addr)
++{
++	__asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory");
++}
++
++static inline void psb_mmu_clflush(struct psb_mmu_driver *driver,
++				   void *addr)
++{
++	if (!driver->has_clflush)
++		return;
++
++	mb();
++	psb_clflush(addr);
++	mb();
++}
++
++static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
++{
++	uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT;
++	uint32_t clflush_count = PAGE_SIZE / clflush_add;
++	int i;
++	uint8_t *clf;
++
++	clf = kmap_atomic(page);
++	mb();
++	for (i = 0; i < clflush_count; ++i) {
++		psb_clflush(clf);
++		clf += clflush_add;
++	}
++	mb();
++	kunmap_atomic(clf);
++}
++
++static void psb_pages_clflush(struct psb_mmu_driver *driver,
++				struct page *page[], unsigned long num_pages)
++{
++	int i;
++
++	if (!driver->has_clflush)
++		return ;
++
++	for (i = 0; i < num_pages; i++)
++		psb_page_clflush(driver, *page++);
++}
++
++static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver,
++				    int force)
++{
++	atomic_set(&driver->needs_tlbflush, 0);
++}
++
++static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
++{
++	down_write(&driver->sem);
++	psb_mmu_flush_pd_locked(driver, force);
++	up_write(&driver->sem);
++}
++
++void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot)
++{
++	if (rc_prot)
++		down_write(&driver->sem);
++	if (rc_prot)
++		up_write(&driver->sem);
++}
++
++void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
++{
++	/*ttm_tt_cache_flush(&pd->p, 1);*/
++	psb_pages_clflush(pd->driver, &pd->p, 1);
++	down_write(&pd->driver->sem);
++	wmb();
++	psb_mmu_flush_pd_locked(pd->driver, 1);
++	pd->hw_context = hw_context;
++	up_write(&pd->driver->sem);
++
++}
++
++static inline unsigned long psb_pd_addr_end(unsigned long addr,
++					    unsigned long end)
++{
++
++	addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK;
++	return (addr < end) ? addr : end;
++}
++
++static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type)
++{
++	uint32_t mask = PSB_PTE_VALID;
++
++	if (type & PSB_MMU_CACHED_MEMORY)
++		mask |= PSB_PTE_CACHED;
++	if (type & PSB_MMU_RO_MEMORY)
++		mask |= PSB_PTE_RO;
++	if (type & PSB_MMU_WO_MEMORY)
++		mask |= PSB_PTE_WO;
++
++	return (pfn << PAGE_SHIFT) | mask;
++}
++
++struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
++				    int trap_pagefaults, int invalid_type)
++{
++	struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL);
++	uint32_t *v;
++	int i;
++
++	if (!pd)
++		return NULL;
++
++	pd->p = alloc_page(GFP_DMA32);
++	if (!pd->p)
++		goto out_err1;
++	pd->dummy_pt = alloc_page(GFP_DMA32);
++	if (!pd->dummy_pt)
++		goto out_err2;
++	pd->dummy_page = alloc_page(GFP_DMA32);
++	if (!pd->dummy_page)
++		goto out_err3;
++
++	if (!trap_pagefaults) {
++		pd->invalid_pde =
++		    psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt),
++				     invalid_type);
++		pd->invalid_pte =
++		    psb_mmu_mask_pte(page_to_pfn(pd->dummy_page),
++				     invalid_type);
++	} else {
++		pd->invalid_pde = 0;
++		pd->invalid_pte = 0;
++	}
++
++	v = kmap(pd->dummy_pt);
++	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++		v[i] = pd->invalid_pte;
++
++	kunmap(pd->dummy_pt);
++
++	v = kmap(pd->p);
++	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++		v[i] = pd->invalid_pde;
++
++	kunmap(pd->p);
++
++	clear_page(kmap(pd->dummy_page));
++	kunmap(pd->dummy_page);
++
++	pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024);
++	if (!pd->tables)
++		goto out_err4;
++
++	pd->hw_context = -1;
++	pd->pd_mask = PSB_PTE_VALID;
++	pd->driver = driver;
++
++	return pd;
++
++out_err4:
++	__free_page(pd->dummy_page);
++out_err3:
++	__free_page(pd->dummy_pt);
++out_err2:
++	__free_page(pd->p);
++out_err1:
++	kfree(pd);
++	return NULL;
++}
++
++static void psb_mmu_free_pt(struct psb_mmu_pt *pt)
++{
++	__free_page(pt->p);
++	kfree(pt);
++}
++
++void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
++{
++	struct psb_mmu_driver *driver = pd->driver;
++	struct psb_mmu_pt *pt;
++	int i;
++
++	down_write(&driver->sem);
++	if (pd->hw_context != -1)
++		psb_mmu_flush_pd_locked(driver, 1);
++
++	/* Should take the spinlock here, but we don't need to do that
++	   since we have the semaphore in write mode. */
++
++	for (i = 0; i < 1024; ++i) {
++		pt = pd->tables[i];
++		if (pt)
++			psb_mmu_free_pt(pt);
++	}
++
++	vfree(pd->tables);
++	__free_page(pd->dummy_page);
++	__free_page(pd->dummy_pt);
++	__free_page(pd->p);
++	kfree(pd);
++	up_write(&driver->sem);
++}
++
++static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
++{
++	struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL);
++	void *v;
++	uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT;
++	uint32_t clflush_count = PAGE_SIZE / clflush_add;
++	spinlock_t *lock = &pd->driver->lock;
++	uint8_t *clf;
++	uint32_t *ptes;
++	int i;
++
++	if (!pt)
++		return NULL;
++
++	pt->p = alloc_page(GFP_DMA32);
++	if (!pt->p) {
++		kfree(pt);
++		return NULL;
++	}
++
++	spin_lock(lock);
++
++	v = kmap_atomic(pt->p);
++	clf = (uint8_t *) v;
++	ptes = (uint32_t *) v;
++	for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
++		*ptes++ = pd->invalid_pte;
++
++
++	if (pd->driver->has_clflush && pd->hw_context != -1) {
++		mb();
++		for (i = 0; i < clflush_count; ++i) {
++			psb_clflush(clf);
++			clf += clflush_add;
++		}
++		mb();
++	}
++
++	kunmap_atomic(v);
++	spin_unlock(lock);
++
++	pt->count = 0;
++	pt->pd = pd;
++	pt->index = 0;
++
++	return pt;
++}
++
++static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
++					     unsigned long addr)
++{
++	uint32_t index = psb_mmu_pd_index(addr);
++	struct psb_mmu_pt *pt;
++	uint32_t *v;
++	spinlock_t *lock = &pd->driver->lock;
++
++	spin_lock(lock);
++	pt = pd->tables[index];
++	while (!pt) {
++		spin_unlock(lock);
++		pt = psb_mmu_alloc_pt(pd);
++		if (!pt)
++			return NULL;
++		spin_lock(lock);
++
++		if (pd->tables[index]) {
++			spin_unlock(lock);
++			psb_mmu_free_pt(pt);
++			spin_lock(lock);
++			pt = pd->tables[index];
++			continue;
++		}
++
++		v = kmap_atomic(pd->p);
++		pd->tables[index] = pt;
++		v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
++		pt->index = index;
++		kunmap_atomic((void *) v);
++
++		if (pd->hw_context != -1) {
++			psb_mmu_clflush(pd->driver, (void *) &v[index]);
++			atomic_set(&pd->driver->needs_tlbflush, 1);
++		}
++	}
++	pt->v = kmap_atomic(pt->p);
++	return pt;
++}
++
++static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
++					      unsigned long addr)
++{
++	uint32_t index = psb_mmu_pd_index(addr);
++	struct psb_mmu_pt *pt;
++	spinlock_t *lock = &pd->driver->lock;
++
++	spin_lock(lock);
++	pt = pd->tables[index];
++	if (!pt) {
++		spin_unlock(lock);
++		return NULL;
++	}
++	pt->v = kmap_atomic(pt->p);
++	return pt;
++}
++
++static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
++{
++	struct psb_mmu_pd *pd = pt->pd;
++	uint32_t *v;
++
++	kunmap_atomic(pt->v);
++	if (pt->count == 0) {
++		v = kmap_atomic(pd->p);
++		v[pt->index] = pd->invalid_pde;
++		pd->tables[pt->index] = NULL;
++
++		if (pd->hw_context != -1) {
++			psb_mmu_clflush(pd->driver,
++					(void *) &v[pt->index]);
++			atomic_set(&pd->driver->needs_tlbflush, 1);
++		}
++		kunmap_atomic(pt->v);
++		spin_unlock(&pd->driver->lock);
++		psb_mmu_free_pt(pt);
++		return;
++	}
++	spin_unlock(&pd->driver->lock);
++}
++
++static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt,
++				   unsigned long addr, uint32_t pte)
++{
++	pt->v[psb_mmu_pt_index(addr)] = pte;
++}
++
++static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt *pt,
++					  unsigned long addr)
++{
++	pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte;
++}
++
++
++void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
++			uint32_t mmu_offset, uint32_t gtt_start,
++			uint32_t gtt_pages)
++{
++	uint32_t *v;
++	uint32_t start = psb_mmu_pd_index(mmu_offset);
++	struct psb_mmu_driver *driver = pd->driver;
++	int num_pages = gtt_pages;
++
++	down_read(&driver->sem);
++	spin_lock(&driver->lock);
++
++	v = kmap_atomic(pd->p);
++	v += start;
++
++	while (gtt_pages--) {
++		*v++ = gtt_start | pd->pd_mask;
++		gtt_start += PAGE_SIZE;
++	}
++
++	/*ttm_tt_cache_flush(&pd->p, num_pages);*/
++	psb_pages_clflush(pd->driver, &pd->p, num_pages);
++	kunmap_atomic(v);
++	spin_unlock(&driver->lock);
++
++	if (pd->hw_context != -1)
++		atomic_set(&pd->driver->needs_tlbflush, 1);
++
++	up_read(&pd->driver->sem);
++	psb_mmu_flush_pd(pd->driver, 0);
++}
++
++struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
++{
++	struct psb_mmu_pd *pd;
++
++	/* down_read(&driver->sem); */
++	pd = driver->default_pd;
++	/* up_read(&driver->sem); */
++
++	return pd;
++}
++
++void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
++{
++	psb_mmu_free_pagedir(driver->default_pd);
++	kfree(driver);
++}
++
++struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
++					int trap_pagefaults,
++					int invalid_type,
++					struct drm_psb_private *dev_priv)
++{
++	struct psb_mmu_driver *driver;
++
++	driver = kmalloc(sizeof(*driver), GFP_KERNEL);
++
++	if (!driver)
++		return NULL;
++	driver->dev_priv = dev_priv;
++
++	driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults,
++					      invalid_type);
++	if (!driver->default_pd)
++		goto out_err1;
++
++	spin_lock_init(&driver->lock);
++	init_rwsem(&driver->sem);
++	down_write(&driver->sem);
++	driver->register_map = registers;
++	atomic_set(&driver->needs_tlbflush, 1);
++
++	driver->has_clflush = 0;
++
++	if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
++		uint32_t tfms, misc, cap0, cap4, clflush_size;
++
++		/*
++		 * clflush size is determined at kernel setup for x86_64
++		 *  but not for i386. We have to do it here.
++		 */
++
++		cpuid(0x00000001, &tfms, &misc, &cap0, &cap4);
++		clflush_size = ((misc >> 8) & 0xff) * 8;
++		driver->has_clflush = 1;
++		driver->clflush_add =
++		    PAGE_SIZE * clflush_size / sizeof(uint32_t);
++		driver->clflush_mask = driver->clflush_add - 1;
++		driver->clflush_mask = ~driver->clflush_mask;
++	}
++
++	up_write(&driver->sem);
++	return driver;
++
++out_err1:
++	kfree(driver);
++	return NULL;
++}
++
++static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd,
++			       unsigned long address, uint32_t num_pages,
++			       uint32_t desired_tile_stride,
++			       uint32_t hw_tile_stride)
++{
++	struct psb_mmu_pt *pt;
++	uint32_t rows = 1;
++	uint32_t i;
++	unsigned long addr;
++	unsigned long end;
++	unsigned long next;
++	unsigned long add;
++	unsigned long row_add;
++	unsigned long clflush_add = pd->driver->clflush_add;
++	unsigned long clflush_mask = pd->driver->clflush_mask;
++
++	if (!pd->driver->has_clflush) {
++		/*ttm_tt_cache_flush(&pd->p, num_pages);*/
++		psb_pages_clflush(pd->driver, &pd->p, num_pages);
++		return;
++	}
++
++	if (hw_tile_stride)
++		rows = num_pages / desired_tile_stride;
++	else
++		desired_tile_stride = num_pages;
++
++	add = desired_tile_stride << PAGE_SHIFT;
++	row_add = hw_tile_stride << PAGE_SHIFT;
++	mb();
++	for (i = 0; i < rows; ++i) {
++
++		addr = address;
++		end = addr + add;
++
++		do {
++			next = psb_pd_addr_end(addr, end);
++			pt = psb_mmu_pt_map_lock(pd, addr);
++			if (!pt)
++				continue;
++			do {
++				psb_clflush(&pt->v
++					    [psb_mmu_pt_index(addr)]);
++			} while (addr +=
++				 clflush_add,
++				 (addr & clflush_mask) < next);
++
++			psb_mmu_pt_unmap_unlock(pt);
++		} while (addr = next, next != end);
++		address += row_add;
++	}
++	mb();
++}
++
++void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
++				 unsigned long address, uint32_t num_pages)
++{
++	struct psb_mmu_pt *pt;
++	unsigned long addr;
++	unsigned long end;
++	unsigned long next;
++	unsigned long f_address = address;
++
++	down_read(&pd->driver->sem);
++
++	addr = address;
++	end = addr + (num_pages << PAGE_SHIFT);
++
++	do {
++		next = psb_pd_addr_end(addr, end);
++		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++		if (!pt)
++			goto out;
++		do {
++			psb_mmu_invalidate_pte(pt, addr);
++			--pt->count;
++		} while (addr += PAGE_SIZE, addr < next);
++		psb_mmu_pt_unmap_unlock(pt);
++
++	} while (addr = next, next != end);
++
++out:
++	if (pd->hw_context != -1)
++		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
++
++	up_read(&pd->driver->sem);
++
++	if (pd->hw_context != -1)
++		psb_mmu_flush(pd->driver, 0);
++
++	return;
++}
++
++void psb_mmu_remove_pages(struct psb_mmu_pd *pd, unsigned long address,
++			  uint32_t num_pages, uint32_t desired_tile_stride,
++			  uint32_t hw_tile_stride)
++{
++	struct psb_mmu_pt *pt;
++	uint32_t rows = 1;
++	uint32_t i;
++	unsigned long addr;
++	unsigned long end;
++	unsigned long next;
++	unsigned long add;
++	unsigned long row_add;
++	unsigned long f_address = address;
++
++	if (hw_tile_stride)
++		rows = num_pages / desired_tile_stride;
++	else
++		desired_tile_stride = num_pages;
++
++	add = desired_tile_stride << PAGE_SHIFT;
++	row_add = hw_tile_stride << PAGE_SHIFT;
++
++	/* down_read(&pd->driver->sem); */
++
++	/* Make sure we only need to flush this processor's cache */
++
++	for (i = 0; i < rows; ++i) {
++
++		addr = address;
++		end = addr + add;
++
++		do {
++			next = psb_pd_addr_end(addr, end);
++			pt = psb_mmu_pt_map_lock(pd, addr);
++			if (!pt)
++				continue;
++			do {
++				psb_mmu_invalidate_pte(pt, addr);
++				--pt->count;
++
++			} while (addr += PAGE_SIZE, addr < next);
++			psb_mmu_pt_unmap_unlock(pt);
++
++		} while (addr = next, next != end);
++		address += row_add;
++	}
++	if (pd->hw_context != -1)
++		psb_mmu_flush_ptes(pd, f_address, num_pages,
++				   desired_tile_stride, hw_tile_stride);
++
++	/* up_read(&pd->driver->sem); */
++
++	if (pd->hw_context != -1)
++		psb_mmu_flush(pd->driver, 0);
++}
++
++int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn,
++				unsigned long address, uint32_t num_pages,
++				int type)
++{
++	struct psb_mmu_pt *pt;
++	uint32_t pte;
++	unsigned long addr;
++	unsigned long end;
++	unsigned long next;
++	unsigned long f_address = address;
++	int ret = 0;
++
++	down_read(&pd->driver->sem);
++
++	addr = address;
++	end = addr + (num_pages << PAGE_SHIFT);
++
++	do {
++		next = psb_pd_addr_end(addr, end);
++		pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++		if (!pt) {
++			ret = -ENOMEM;
++			goto out;
++		}
++		do {
++			pte = psb_mmu_mask_pte(start_pfn++, type);
++			psb_mmu_set_pte(pt, addr, pte);
++			pt->count++;
++		} while (addr += PAGE_SIZE, addr < next);
++		psb_mmu_pt_unmap_unlock(pt);
++
++	} while (addr = next, next != end);
++
++out:
++	if (pd->hw_context != -1)
++		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
++
++	up_read(&pd->driver->sem);
++
++	if (pd->hw_context != -1)
++		psb_mmu_flush(pd->driver, 1);
++
++	return ret;
++}
++
++int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
++			 unsigned long address, uint32_t num_pages,
++			 uint32_t desired_tile_stride,
++			 uint32_t hw_tile_stride, int type)
++{
++	struct psb_mmu_pt *pt;
++	uint32_t rows = 1;
++	uint32_t i;
++	uint32_t pte;
++	unsigned long addr;
++	unsigned long end;
++	unsigned long next;
++	unsigned long add;
++	unsigned long row_add;
++	unsigned long f_address = address;
++	int ret = 0;
++
++	if (hw_tile_stride) {
++		if (num_pages % desired_tile_stride != 0)
++			return -EINVAL;
++		rows = num_pages / desired_tile_stride;
++	} else {
++		desired_tile_stride = num_pages;
++	}
++
++	add = desired_tile_stride << PAGE_SHIFT;
++	row_add = hw_tile_stride << PAGE_SHIFT;
++
++	down_read(&pd->driver->sem);
++
++	for (i = 0; i < rows; ++i) {
++
++		addr = address;
++		end = addr + add;
++
++		do {
++			next = psb_pd_addr_end(addr, end);
++			pt = psb_mmu_pt_alloc_map_lock(pd, addr);
++			if (!pt) {
++				ret = -ENOMEM;
++				goto out;
++			}
++			do {
++				pte =
++				    psb_mmu_mask_pte(page_to_pfn(*pages++),
++						     type);
++				psb_mmu_set_pte(pt, addr, pte);
++				pt->count++;
++			} while (addr += PAGE_SIZE, addr < next);
++			psb_mmu_pt_unmap_unlock(pt);
++
++		} while (addr = next, next != end);
++
++		address += row_add;
++	}
++out:
++	if (pd->hw_context != -1)
++		psb_mmu_flush_ptes(pd, f_address, num_pages,
++				   desired_tile_stride, hw_tile_stride);
++
++	up_read(&pd->driver->sem);
++
++	if (pd->hw_context != -1)
++		psb_mmu_flush(pd->driver, 1);
++
++	return ret;
++}
++
++int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
++			   unsigned long *pfn)
++{
++	int ret;
++	struct psb_mmu_pt *pt;
++	uint32_t tmp;
++	spinlock_t *lock = &pd->driver->lock;
++
++	down_read(&pd->driver->sem);
++	pt = psb_mmu_pt_map_lock(pd, virtual);
++	if (!pt) {
++		uint32_t *v;
++
++		spin_lock(lock);
++		v = kmap_atomic(pd->p);
++		tmp = v[psb_mmu_pd_index(virtual)];
++		kunmap_atomic(v);
++		spin_unlock(lock);
++
++		if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
++		    !(pd->invalid_pte & PSB_PTE_VALID)) {
++			ret = -EINVAL;
++			goto out;
++		}
++		ret = 0;
++		*pfn = pd->invalid_pte >> PAGE_SHIFT;
++		goto out;
++	}
++	tmp = pt->v[psb_mmu_pt_index(virtual)];
++	if (!(tmp & PSB_PTE_VALID)) {
++		ret = -EINVAL;
++	} else {
++		ret = 0;
++		*pfn = tmp >> PAGE_SHIFT;
++	}
++	psb_mmu_pt_unmap_unlock(pt);
++out:
++	up_read(&pd->driver->sem);
++	return ret;
++}
+diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
+new file mode 100644
+index 0000000..2da1f36
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/oaktrail.h
+@@ -0,0 +1,252 @@
++/**************************************************************************
++ * Copyright (c) 2007-2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++/* MID device specific descriptors */
++
++struct oaktrail_vbt {
++	s8 signature[4];	/*4 bytes,"$GCT" */
++	u8 revision;
++	u8 size;
++	u8 checksum;
++	void *oaktrail_gct;
++} __packed;
++
++struct oaktrail_timing_info {
++	u16 pixel_clock;
++	u8 hactive_lo;
++	u8 hblank_lo;
++	u8 hblank_hi:4;
++	u8 hactive_hi:4;
++	u8 vactive_lo;
++	u8 vblank_lo;
++	u8 vblank_hi:4;
++	u8 vactive_hi:4;
++	u8 hsync_offset_lo;
++	u8 hsync_pulse_width_lo;
++	u8 vsync_pulse_width_lo:4;
++	u8 vsync_offset_lo:4;
++	u8 vsync_pulse_width_hi:2;
++	u8 vsync_offset_hi:2;
++	u8 hsync_pulse_width_hi:2;
++	u8 hsync_offset_hi:2;
++	u8 width_mm_lo;
++	u8 height_mm_lo;
++	u8 height_mm_hi:4;
++	u8 width_mm_hi:4;
++	u8 hborder;
++	u8 vborder;
++	u8 unknown0:1;
++	u8 hsync_positive:1;
++	u8 vsync_positive:1;
++	u8 separate_sync:2;
++	u8 stereo:1;
++	u8 unknown6:1;
++	u8 interlaced:1;
++} __packed;
++
++struct gct_r10_timing_info {
++	u16 pixel_clock;
++	u32 hactive_lo:8;
++	u32 hactive_hi:4;
++	u32 hblank_lo:8;
++	u32 hblank_hi:4;
++	u32 hsync_offset_lo:8;
++	u16 hsync_offset_hi:2;
++	u16 hsync_pulse_width_lo:8;
++	u16 hsync_pulse_width_hi:2;
++	u16 hsync_positive:1;
++	u16 rsvd_1:3;
++	u8  vactive_lo:8;
++	u16 vactive_hi:4;
++	u16 vblank_lo:8;
++	u16 vblank_hi:4;
++	u16 vsync_offset_lo:4;
++	u16 vsync_offset_hi:2;
++	u16 vsync_pulse_width_lo:4;
++	u16 vsync_pulse_width_hi:2;
++	u16 vsync_positive:1;
++	u16 rsvd_2:3;
++} __packed;
++
++struct oaktrail_panel_descriptor_v1 {
++	u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
++				/* 0x61190 if MIPI */
++	u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
++	u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++	u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
++						/* Register 0x61210 */
++	struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
++	u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
++				/* Bit 0, Frequency, 15 bits,0 - 32767Hz */
++			/* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
++	u16 Panel_MIPI_Display_Descriptor;
++			/*16 bits, Defined as follows: */
++			/* if MIPI, 0x0000 if LVDS */
++			/* Bit 0, Type, 2 bits, */
++			/* 0: Type-1, */
++			/* 1: Type-2, */
++			/* 2: Type-3, */
++			/* 3: Type-4 */
++			/* Bit 2, Pixel Format, 4 bits */
++			/* Bit0: 16bpp (not supported in LNC), */
++			/* Bit1: 18bpp loosely packed, */
++			/* Bit2: 18bpp packed, */
++			/* Bit3: 24bpp */
++			/* Bit 6, Reserved, 2 bits, 00b */
++			/* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
++			/* Bit 14, Reserved, 2 bits, 00b */
++} __packed;
++
++struct oaktrail_panel_descriptor_v2 {
++	u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
++				/* 0x61190 if MIPI */
++	u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
++	u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++	u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
++						/* Register 0x61210 */
++	struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
++	u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
++				/*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
++	u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
++			/*Bit 7, Polarity, 1 bit,0: Normal, 1: Inverted*/
++	u16 Panel_MIPI_Display_Descriptor;
++			/*16 bits, Defined as follows: */
++			/* if MIPI, 0x0000 if LVDS */
++			/* Bit 0, Type, 2 bits, */
++			/* 0: Type-1, */
++			/* 1: Type-2, */
++			/* 2: Type-3, */
++			/* 3: Type-4 */
++			/* Bit 2, Pixel Format, 4 bits */
++			/* Bit0: 16bpp (not supported in LNC), */
++			/* Bit1: 18bpp loosely packed, */
++			/* Bit2: 18bpp packed, */
++			/* Bit3: 24bpp */
++			/* Bit 6, Reserved, 2 bits, 00b */
++			/* Bit 8, Minimum Supported Frame Rate, 6 bits, 0 - 63Hz */
++			/* Bit 14, Reserved, 2 bits, 00b */
++} __packed;
++
++union oaktrail_panel_rx {
++	struct {
++		u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
++			/* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
++		u16 MaxLaneFreq:3; /* 0: 100MHz, 1: 200MHz, 2: 300MHz, */
++		/*3: 400MHz, 4: 500MHz, 5: 600MHz, 6: 700MHz, 7: 800MHz.*/
++		u16 SupportedVideoTransferMode:2; /*0: Non-burst only */
++					/* 1: Burst and non-burst */
++					/* 2/3: Reserved */
++		u16 HSClkBehavior:1; /*0: Continuous, 1: Non-continuous*/
++		u16 DuoDisplaySupport:1; /*1 bit,0: No, 1: Yes*/
++		u16 ECC_ChecksumCapabilities:1;/*1 bit,0: No, 1: Yes*/
++		u16 BidirectionalCommunication:1;/*1 bit,0: No, 1: Yes */
++		u16 Rsvd:5;/*5 bits,00000b */
++	} panelrx;
++	u16 panel_receiver;
++} __packed;
++
++struct oaktrail_gct_v1 {
++	union { /*8 bits,Defined as follows: */
++		struct {
++			u8 PanelType:4; /*4 bits, Bit field for panels*/
++					/* 0 - 3: 0 = LVDS, 1 = MIPI*/
++					/*2 bits,Specifies which of the*/
++			u8 BootPanelIndex:2;
++					/* 4 panels to use by default*/
++			u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
++					/* the 4 MIPI DSI receivers to use*/
++		} PD;
++		u8 PanelDescriptor;
++	};
++	struct oaktrail_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
++	union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
++} __packed;
++
++struct oaktrail_gct_v2 {
++	union { /*8 bits,Defined as follows: */
++		struct {
++			u8 PanelType:4; /*4 bits, Bit field for panels*/
++					/* 0 - 3: 0 = LVDS, 1 = MIPI*/
++					/*2 bits,Specifies which of the*/
++			u8 BootPanelIndex:2;
++					/* 4 panels to use by default*/
++			u8 BootMIPI_DSI_RxIndex:2;/*Specifies which of*/
++					/* the 4 MIPI DSI receivers to use*/
++		} PD;
++		u8 PanelDescriptor;
++	};
++	struct oaktrail_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
++	union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
++} __packed;
++
++struct oaktrail_gct_data {
++	u8 bpi; /* boot panel index, number of panel used during boot */
++	u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
++	struct oaktrail_timing_info DTD; /* timing info for the selected panel */
++	u32 Panel_Port_Control;
++	u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
++	u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
++	u32 PP_Cycle_Delay;
++	u16 Panel_Backlight_Inverter_Descriptor;
++	u16 Panel_MIPI_Display_Descriptor;
++} __packed;
++
++#define MODE_SETTING_IN_CRTC		0x1
++#define MODE_SETTING_IN_ENCODER		0x2
++#define MODE_SETTING_ON_GOING		0x3
++#define MODE_SETTING_IN_DSR		0x4
++#define MODE_SETTING_ENCODER_DONE	0x8
++
++#define GCT_R10_HEADER_SIZE		16
++#define GCT_R10_DISPLAY_DESC_SIZE	28
++
++/*
++ *	Moorestown HDMI interfaces
++ */
++
++struct oaktrail_hdmi_dev {
++	struct pci_dev *dev;
++	void __iomem *regs;
++	unsigned int mmio, mmio_len;
++	int dpms_mode;
++	struct hdmi_i2c_dev *i2c_dev;
++
++	/* register state */
++	u32 saveDPLL_CTRL;
++	u32 saveDPLL_DIV_CTRL;
++	u32 saveDPLL_ADJUST;
++	u32 saveDPLL_UPDATE;
++	u32 saveDPLL_CLK_ENABLE;
++	u32 savePCH_HTOTAL_B;
++	u32 savePCH_HBLANK_B;
++	u32 savePCH_HSYNC_B;
++	u32 savePCH_VTOTAL_B;
++	u32 savePCH_VBLANK_B;
++	u32 savePCH_VSYNC_B;
++	u32 savePCH_PIPEBCONF;
++	u32 savePCH_PIPEBSRC;
++};
++
++extern void oaktrail_hdmi_setup(struct drm_device *dev);
++extern void oaktrail_hdmi_teardown(struct drm_device *dev);
++extern int  oaktrail_hdmi_i2c_init(struct pci_dev *dev);
++extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
++extern void oaktrail_hdmi_save(struct drm_device *dev);
++extern void oaktrail_hdmi_restore(struct drm_device *dev);
++extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
+diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
+new file mode 100644
+index 0000000..a39b0d0
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
+@@ -0,0 +1,592 @@
++/*
++ * Copyright © 2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/i2c.h>
++#include <linux/pm_runtime.h>
++
++#include <drm/drmP.h>
++#include "framebuffer.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_display.h"
++#include "power.h"
++
++struct psb_intel_range_t {
++	int min, max;
++};
++
++struct oaktrail_limit_t {
++	struct psb_intel_range_t dot, m, p1;
++};
++
++struct oaktrail_clock_t {
++	/* derived values */
++	int dot;
++	int m;
++	int p1;
++};
++
++#define MRST_LIMIT_LVDS_100L	    0
++#define MRST_LIMIT_LVDS_83	    1
++#define MRST_LIMIT_LVDS_100	    2
++
++#define MRST_DOT_MIN		  19750
++#define MRST_DOT_MAX		  120000
++#define MRST_M_MIN_100L		    20
++#define MRST_M_MIN_100		    10
++#define MRST_M_MIN_83		    12
++#define MRST_M_MAX_100L		    34
++#define MRST_M_MAX_100		    17
++#define MRST_M_MAX_83		    20
++#define MRST_P1_MIN		    2
++#define MRST_P1_MAX_0		    7
++#define MRST_P1_MAX_1		    8
++
++static const struct oaktrail_limit_t oaktrail_limits[] = {
++	{			/* MRST_LIMIT_LVDS_100L */
++	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++	 .m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
++	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
++	 },
++	{			/* MRST_LIMIT_LVDS_83L */
++	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++	 .m = {.min = MRST_M_MIN_83, .max = MRST_M_MAX_83},
++	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_0},
++	 },
++	{			/* MRST_LIMIT_LVDS_100 */
++	 .dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
++	 .m = {.min = MRST_M_MIN_100, .max = MRST_M_MAX_100},
++	 .p1 = {.min = MRST_P1_MIN, .max = MRST_P1_MAX_1},
++	 },
++};
++
++#define MRST_M_MIN	    10
++static const u32 oaktrail_m_converts[] = {
++	0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
++	0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
++	0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
++};
++
++static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
++{
++	const struct oaktrail_limit_t *limit = NULL;
++	struct drm_device *dev = crtc->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
++	    || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
++		switch (dev_priv->core_freq) {
++		case 100:
++			limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
++			break;
++		case 166:
++			limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
++			break;
++		case 200:
++			limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
++			break;
++		}
++	} else {
++		limit = NULL;
++		dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
++	}
++
++	return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
++{
++	clock->dot = (refclk * clock->m) / (14 * clock->p1);
++}
++
++static void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
++{
++	pr_debug("%s: dotclock = %d,  m = %d, p1 = %d.\n",
++	     prefix, clock->dot, clock->m, clock->p1);
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given refclk,
++ * or FALSE.  Divisor values are the actual divisors for
++ */
++static bool
++mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
++		struct oaktrail_clock_t *best_clock)
++{
++	struct oaktrail_clock_t clock;
++	const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
++	int err = target;
++
++	memset(best_clock, 0, sizeof(*best_clock));
++
++	for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
++		for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
++		     clock.p1++) {
++			int this_err;
++
++			oaktrail_clock(refclk, &clock);
++
++			this_err = abs(clock.dot - target);
++			if (this_err < err) {
++				*best_clock = clock;
++				err = this_err;
++			}
++		}
++	}
++	dev_dbg(crtc->dev->dev, "mrstFindBestPLL err = %d.\n", err);
++	return err != target;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
++	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++	int dspbase_reg = (pipe == 0) ? MRST_DSPABASE : DSPBBASE;
++	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++	u32 temp;
++
++	if (!gma_power_begin(dev, true))
++		return;
++
++	/* XXX: When our outputs are all unaware of DPMS modes other than off
++	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++	 */
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++	case DRM_MODE_DPMS_STANDBY:
++	case DRM_MODE_DPMS_SUSPEND:
++		/* Enable the DPLL */
++		temp = REG_READ(dpll_reg);
++		if ((temp & DPLL_VCO_ENABLE) == 0) {
++			REG_WRITE(dpll_reg, temp);
++			REG_READ(dpll_reg);
++			/* Wait for the clocks to stabilize. */
++			udelay(150);
++			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++			REG_READ(dpll_reg);
++			/* Wait for the clocks to stabilize. */
++			udelay(150);
++			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++			REG_READ(dpll_reg);
++			/* Wait for the clocks to stabilize. */
++			udelay(150);
++		}
++		/* Enable the pipe */
++		temp = REG_READ(pipeconf_reg);
++		if ((temp & PIPEACONF_ENABLE) == 0)
++			REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++		/* Enable the plane */
++		temp = REG_READ(dspcntr_reg);
++		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++			REG_WRITE(dspcntr_reg,
++				  temp | DISPLAY_PLANE_ENABLE);
++			/* Flush the plane changes */
++			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++		}
++
++		psb_intel_crtc_load_lut(crtc);
++
++		/* Give the overlay scaler a chance to enable
++		   if it's on this pipe */
++		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
++		break;
++	case DRM_MODE_DPMS_OFF:
++		/* Give the overlay scaler a chance to disable
++		 * if it's on this pipe */
++		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++		/* Disable the VGA plane that we never use */
++		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++		/* Disable display plane */
++		temp = REG_READ(dspcntr_reg);
++		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++			REG_WRITE(dspcntr_reg,
++				  temp & ~DISPLAY_PLANE_ENABLE);
++			/* Flush the plane changes */
++			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++			REG_READ(dspbase_reg);
++		}
++
++		/* Next, disable display pipes */
++		temp = REG_READ(pipeconf_reg);
++		if ((temp & PIPEACONF_ENABLE) != 0) {
++			REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++			REG_READ(pipeconf_reg);
++		}
++		/* Wait for for the pipe disable to take effect. */
++		psb_intel_wait_for_vblank(dev);
++
++		temp = REG_READ(dpll_reg);
++		if ((temp & DPLL_VCO_ENABLE) != 0) {
++			REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++			REG_READ(dpll_reg);
++		}
++
++		/* Wait for the clocks to turn off. */
++		udelay(150);
++		break;
++	}
++
++	/*Set FIFO Watermarks*/
++	REG_WRITE(DSPARB, 0x3FFF);
++	REG_WRITE(DSPFW1, 0x3F88080A);
++	REG_WRITE(DSPFW2, 0x0b060808);
++	REG_WRITE(DSPFW3, 0x0);
++	REG_WRITE(DSPFW4, 0x08030404);
++	REG_WRITE(DSPFW5, 0x04040404);
++	REG_WRITE(DSPFW6, 0x78);
++	REG_WRITE(0x70400, REG_READ(0x70400) | 0x4000);
++	/* Must write Bit 14 of the Chicken Bit Register */
++
++	gma_power_end(dev);
++}
++
++/**
++ * Return the pipe currently connected to the panel fitter,
++ * or -1 if the panel fitter is not present or not in use
++ */
++static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
++{
++	u32 pfit_control;
++
++	pfit_control = REG_READ(PFIT_CONTROL);
++
++	/* See if the panel fitter is in use */
++	if ((pfit_control & PFIT_ENABLE) == 0)
++		return -1;
++	return (pfit_control >> 29) & 3;
++}
++
++static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
++			      struct drm_display_mode *mode,
++			      struct drm_display_mode *adjusted_mode,
++			      int x, int y,
++			      struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int pipe = psb_intel_crtc->pipe;
++	int fp_reg = (pipe == 0) ? MRST_FPA0 : FPB0;
++	int dpll_reg = (pipe == 0) ? MRST_DPLL_A : DPLL_B;
++	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++	int refclk = 0;
++	struct oaktrail_clock_t clock;
++	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
++	bool ok, is_sdvo = false;
++	bool is_lvds = false;
++	bool is_mipi = false;
++	struct drm_mode_config *mode_config = &dev->mode_config;
++	struct psb_intel_encoder *psb_intel_encoder = NULL;
++	uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
++	struct drm_connector *connector;
++
++	if (!gma_power_begin(dev, true))
++		return 0;
++
++	memcpy(&psb_intel_crtc->saved_mode,
++		mode,
++		sizeof(struct drm_display_mode));
++	memcpy(&psb_intel_crtc->saved_adjusted_mode,
++		adjusted_mode,
++		sizeof(struct drm_display_mode));
++
++	list_for_each_entry(connector, &mode_config->connector_list, head) {
++		if (!connector->encoder || connector->encoder->crtc != crtc)
++			continue;
++
++		psb_intel_encoder = psb_intel_attached_encoder(connector);
++
++		switch (psb_intel_encoder->type) {
++		case INTEL_OUTPUT_LVDS:
++			is_lvds = true;
++			break;
++		case INTEL_OUTPUT_SDVO:
++			is_sdvo = true;
++			break;
++		case INTEL_OUTPUT_MIPI:
++			is_mipi = true;
++			break;
++		}
++	}
++
++	/* Disable the VGA plane that we never use */
++	REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++	/* Disable the panel fitter if it was on our pipe */
++	if (oaktrail_panel_fitter_pipe(dev) == pipe)
++		REG_WRITE(PFIT_CONTROL, 0);
++
++	REG_WRITE(pipesrc_reg,
++		  ((mode->crtc_hdisplay - 1) << 16) |
++		  (mode->crtc_vdisplay - 1));
++
++	if (psb_intel_encoder)
++		drm_connector_property_get_value(connector,
++			dev->mode_config.scaling_mode_property, &scalingType);
++
++	if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
++		/* Moorestown doesn't have register support for centering so
++		 * we need to mess with the h/vblank and h/vsync start and
++		 * ends to get centering */
++		int offsetX = 0, offsetY = 0;
++
++		offsetX = (adjusted_mode->crtc_hdisplay -
++			   mode->crtc_hdisplay) / 2;
++		offsetY = (adjusted_mode->crtc_vdisplay -
++			   mode->crtc_vdisplay) / 2;
++
++		REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
++			((adjusted_mode->crtc_htotal - 1) << 16));
++		REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
++			((adjusted_mode->crtc_vtotal - 1) << 16));
++		REG_WRITE(hblank_reg,
++			(adjusted_mode->crtc_hblank_start - offsetX - 1) |
++			((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
++		REG_WRITE(hsync_reg,
++			(adjusted_mode->crtc_hsync_start - offsetX - 1) |
++			((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
++		REG_WRITE(vblank_reg,
++			(adjusted_mode->crtc_vblank_start - offsetY - 1) |
++			((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
++		REG_WRITE(vsync_reg,
++			(adjusted_mode->crtc_vsync_start - offsetY - 1) |
++			((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
++	} else {
++		REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++			((adjusted_mode->crtc_htotal - 1) << 16));
++		REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++			((adjusted_mode->crtc_vtotal - 1) << 16));
++		REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++			((adjusted_mode->crtc_hblank_end - 1) << 16));
++		REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++			((adjusted_mode->crtc_hsync_end - 1) << 16));
++		REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++			((adjusted_mode->crtc_vblank_end - 1) << 16));
++		REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++			((adjusted_mode->crtc_vsync_end - 1) << 16));
++	}
++
++	/* Flush the plane changes */
++	{
++		struct drm_crtc_helper_funcs *crtc_funcs =
++		    crtc->helper_private;
++		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++	}
++
++	/* setup pipeconf */
++	pipeconf = REG_READ(pipeconf_reg);
++
++	/* Set up the display plane register */
++	dspcntr = REG_READ(dspcntr_reg);
++	dspcntr |= DISPPLANE_GAMMA_ENABLE;
++
++	if (pipe == 0)
++		dspcntr |= DISPPLANE_SEL_PIPE_A;
++	else
++		dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++	if (is_mipi)
++		goto oaktrail_crtc_mode_set_exit;
++
++	refclk = dev_priv->core_freq * 1000;
++
++	dpll = 0;		/*BIT16 = 0 for 100MHz reference */
++
++	ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
++
++	if (!ok) {
++		dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
++	} else {
++		dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
++			 "m = %x, p1 = %x.\n", clock.dot, clock.m,
++			 clock.p1);
++	}
++
++	fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
++
++	dpll |= DPLL_VGA_MODE_DIS;
++
++
++	dpll |= DPLL_VCO_ENABLE;
++
++	if (is_lvds)
++		dpll |= DPLLA_MODE_LVDS;
++	else
++		dpll |= DPLLB_MODE_DAC_SERIAL;
++
++	if (is_sdvo) {
++		int sdvo_pixel_multiply =
++		    adjusted_mode->clock / mode->clock;
++
++		dpll |= DPLL_DVO_HIGH_SPEED;
++		dpll |=
++		    (sdvo_pixel_multiply -
++		     1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++	}
++
++
++	/* compute bitmask from p1 value */
++	dpll |= (1 << (clock.p1 - 2)) << 17;
++
++	dpll |= DPLL_VCO_ENABLE;
++
++	mrstPrintPll("chosen", &clock);
++
++	if (dpll & DPLL_VCO_ENABLE) {
++		REG_WRITE(fp_reg, fp);
++		REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
++		REG_READ(dpll_reg);
++		/* Check the DPLLA lock bit PIPEACONF[29] */
++		udelay(150);
++	}
++
++	REG_WRITE(fp_reg, fp);
++	REG_WRITE(dpll_reg, dpll);
++	REG_READ(dpll_reg);
++	/* Wait for the clocks to stabilize. */
++	udelay(150);
++
++	/* write it again -- the BIOS does, after all */
++	REG_WRITE(dpll_reg, dpll);
++	REG_READ(dpll_reg);
++	/* Wait for the clocks to stabilize. */
++	udelay(150);
++
++	REG_WRITE(pipeconf_reg, pipeconf);
++	REG_READ(pipeconf_reg);
++	psb_intel_wait_for_vblank(dev);
++
++	REG_WRITE(dspcntr_reg, dspcntr);
++	psb_intel_wait_for_vblank(dev);
++
++oaktrail_crtc_mode_set_exit:
++	gma_power_end(dev);
++	return 0;
++}
++
++static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
++				  struct drm_display_mode *mode,
++				  struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
++			    int x, int y, struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++	int pipe = psb_intel_crtc->pipe;
++	unsigned long start, offset;
++
++	int dspbase = (pipe == 0 ? DSPALINOFF : DSPBBASE);
++	int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
++	int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
++	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++	u32 dspcntr;
++	int ret = 0;
++
++	/* no fb bound */
++	if (!crtc->fb) {
++		dev_dbg(dev->dev, "No FB bound\n");
++		return 0;
++	}
++
++	if (!gma_power_begin(dev, true))
++		return 0;
++
++	start = psbfb->gtt->offset;
++	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
++
++	REG_WRITE(dspstride, crtc->fb->pitches[0]);
++
++	dspcntr = REG_READ(dspcntr_reg);
++	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
++
++	switch (crtc->fb->bits_per_pixel) {
++	case 8:
++		dspcntr |= DISPPLANE_8BPP;
++		break;
++	case 16:
++		if (crtc->fb->depth == 15)
++			dspcntr |= DISPPLANE_15_16BPP;
++		else
++			dspcntr |= DISPPLANE_16BPP;
++		break;
++	case 24:
++	case 32:
++		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++		break;
++	default:
++		dev_err(dev->dev, "Unknown color depth\n");
++		ret = -EINVAL;
++		goto pipe_set_base_exit;
++	}
++	REG_WRITE(dspcntr_reg, dspcntr);
++
++	REG_WRITE(dspbase, offset);
++	REG_READ(dspbase);
++	REG_WRITE(dspsurf, start);
++	REG_READ(dspsurf);
++
++pipe_set_base_exit:
++	gma_power_end(dev);
++	return ret;
++}
++
++static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
++{
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++}
++
++static void oaktrail_crtc_commit(struct drm_crtc *crtc)
++{
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++}
++
++const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
++	.dpms = oaktrail_crtc_dpms,
++	.mode_fixup = oaktrail_crtc_mode_fixup,
++	.mode_set = oaktrail_crtc_mode_set,
++	.mode_set_base = oaktrail_pipe_set_base,
++	.prepare = oaktrail_crtc_prepare,
++	.commit = oaktrail_crtc_commit,
++};
++
+diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
+new file mode 100644
+index 0000000..41d1924
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/oaktrail_device.c
+@@ -0,0 +1,509 @@
++/**************************************************************************
++ * Copyright (c) 2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <linux/backlight.h>
++#include <linux/module.h>
++#include <linux/dmi.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "gma_drm.h"
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include <asm/mrst.h>
++#include <asm/intel_scu_ipc.h>
++#include "mid_bios.h"
++#include "intel_bios.h"
++
++static int oaktrail_output_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	if (dev_priv->iLVDS_enable)
++		oaktrail_lvds_init(dev, &dev_priv->mode_dev);
++	else
++		dev_err(dev->dev, "DSI is not supported\n");
++	if (dev_priv->hdmi_priv)
++		oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
++	return 0;
++}
++
++/*
++ *	Provide the low level interfaces for the Moorestown backlight
++ */
++
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++
++#define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
++#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++#define BLC_ADJUSTMENT_MAX 100
++
++static struct backlight_device *oaktrail_backlight_device;
++static int oaktrail_brightness;
++
++static int oaktrail_set_brightness(struct backlight_device *bd)
++{
++	struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int level = bd->props.brightness;
++	u32 blc_pwm_ctl;
++	u32 max_pwm_blc;
++
++	/* Percentage 1-100% being valid */
++	if (level < 1)
++		level = 1;
++
++	if (gma_power_begin(dev, 0)) {
++		/* Calculate and set the brightness value */
++		max_pwm_blc = REG_READ(BLC_PWM_CTL) >> 16;
++		blc_pwm_ctl = level * max_pwm_blc / 100;
++
++		/* Adjust the backlight level with the percent in
++		 * dev_priv->blc_adj1;
++		 */
++		blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj1;
++		blc_pwm_ctl = blc_pwm_ctl / 100;
++
++		/* Adjust the backlight level with the percent in
++		 * dev_priv->blc_adj2;
++		 */
++		blc_pwm_ctl = blc_pwm_ctl * dev_priv->blc_adj2;
++		blc_pwm_ctl = blc_pwm_ctl / 100;
++
++		/* force PWM bit on */
++		REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
++		REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
++		gma_power_end(dev);
++	}
++	oaktrail_brightness = level;
++	return 0;
++}
++
++static int oaktrail_get_brightness(struct backlight_device *bd)
++{
++	/* return locally cached var instead of HW read (due to DPST etc.) */
++	/* FIXME: ideally return actual value in case firmware fiddled with
++	   it */
++	return oaktrail_brightness;
++}
++
++static int device_backlight_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	unsigned long core_clock;
++	u16 bl_max_freq;
++	uint32_t value;
++	uint32_t blc_pwm_precision_factor;
++
++	dev_priv->blc_adj1 = BLC_ADJUSTMENT_MAX;
++	dev_priv->blc_adj2 = BLC_ADJUSTMENT_MAX;
++	bl_max_freq = 256;
++	/* this needs to be set elsewhere */
++	blc_pwm_precision_factor = BLC_PWM_PRECISION_FACTOR;
++
++	core_clock = dev_priv->core_freq;
++
++	value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
++	value *= blc_pwm_precision_factor;
++	value /= bl_max_freq;
++	value /= blc_pwm_precision_factor;
++
++	if (value > (unsigned long long)MRST_BLC_MAX_PWM_REG_FREQ)
++			return -ERANGE;
++
++	if (gma_power_begin(dev, false)) {
++		REG_WRITE(BLC_PWM_CTL2, (0x80000000 | REG_READ(BLC_PWM_CTL2)));
++		REG_WRITE(BLC_PWM_CTL, value | (value << 16));
++		gma_power_end(dev);
++	}
++	return 0;
++}
++
++static const struct backlight_ops oaktrail_ops = {
++	.get_brightness = oaktrail_get_brightness,
++	.update_status  = oaktrail_set_brightness,
++};
++
++static int oaktrail_backlight_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int ret;
++	struct backlight_properties props;
++
++	memset(&props, 0, sizeof(struct backlight_properties));
++	props.max_brightness = 100;
++	props.type = BACKLIGHT_PLATFORM;
++
++	oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
++				NULL, (void *)dev, &oaktrail_ops, &props);
++
++	if (IS_ERR(oaktrail_backlight_device))
++		return PTR_ERR(oaktrail_backlight_device);
++
++	ret = device_backlight_init(dev);
++	if (ret < 0) {
++		backlight_device_unregister(oaktrail_backlight_device);
++		return ret;
++	}
++	oaktrail_backlight_device->props.brightness = 100;
++	oaktrail_backlight_device->props.max_brightness = 100;
++	backlight_update_status(oaktrail_backlight_device);
++	dev_priv->backlight_device = oaktrail_backlight_device;
++	return 0;
++}
++
++#endif
++
++/*
++ *	Provide the Moorestown specific chip logic and low level methods
++ *	for power management
++ */
++
++/**
++ *	oaktrail_save_display_registers	-	save registers lost on suspend
++ *	@dev: our DRM device
++ *
++ *	Save the state we need in order to be able to restore the interface
++ *	upon resume from suspend
++ */
++static int oaktrail_save_display_registers(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_save_area *regs = &dev_priv->regs;
++	int i;
++	u32 pp_stat;
++
++	/* Display arbitration control + watermarks */
++	regs->psb.saveDSPARB = PSB_RVDC32(DSPARB);
++	regs->psb.saveDSPFW1 = PSB_RVDC32(DSPFW1);
++	regs->psb.saveDSPFW2 = PSB_RVDC32(DSPFW2);
++	regs->psb.saveDSPFW3 = PSB_RVDC32(DSPFW3);
++	regs->psb.saveDSPFW4 = PSB_RVDC32(DSPFW4);
++	regs->psb.saveDSPFW5 = PSB_RVDC32(DSPFW5);
++	regs->psb.saveDSPFW6 = PSB_RVDC32(DSPFW6);
++	regs->psb.saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
++
++	/* Pipe & plane A info */
++	regs->psb.savePIPEACONF = PSB_RVDC32(PIPEACONF);
++	regs->psb.savePIPEASRC = PSB_RVDC32(PIPEASRC);
++	regs->psb.saveFPA0 = PSB_RVDC32(MRST_FPA0);
++	regs->psb.saveFPA1 = PSB_RVDC32(MRST_FPA1);
++	regs->psb.saveDPLL_A = PSB_RVDC32(MRST_DPLL_A);
++	regs->psb.saveHTOTAL_A = PSB_RVDC32(HTOTAL_A);
++	regs->psb.saveHBLANK_A = PSB_RVDC32(HBLANK_A);
++	regs->psb.saveHSYNC_A = PSB_RVDC32(HSYNC_A);
++	regs->psb.saveVTOTAL_A = PSB_RVDC32(VTOTAL_A);
++	regs->psb.saveVBLANK_A = PSB_RVDC32(VBLANK_A);
++	regs->psb.saveVSYNC_A = PSB_RVDC32(VSYNC_A);
++	regs->psb.saveBCLRPAT_A = PSB_RVDC32(BCLRPAT_A);
++	regs->psb.saveDSPACNTR = PSB_RVDC32(DSPACNTR);
++	regs->psb.saveDSPASTRIDE = PSB_RVDC32(DSPASTRIDE);
++	regs->psb.saveDSPAADDR = PSB_RVDC32(DSPABASE);
++	regs->psb.saveDSPASURF = PSB_RVDC32(DSPASURF);
++	regs->psb.saveDSPALINOFF = PSB_RVDC32(DSPALINOFF);
++	regs->psb.saveDSPATILEOFF = PSB_RVDC32(DSPATILEOFF);
++
++	/* Save cursor regs */
++	regs->psb.saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
++	regs->psb.saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
++	regs->psb.saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
++
++	/* Save palette (gamma) */
++	for (i = 0; i < 256; i++)
++		regs->psb.save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
++
++	if (dev_priv->hdmi_priv)
++		oaktrail_hdmi_save(dev);
++
++	/* Save performance state */
++	regs->psb.savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
++
++	/* LVDS state */
++	regs->psb.savePP_CONTROL = PSB_RVDC32(PP_CONTROL);
++	regs->psb.savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
++	regs->psb.savePFIT_AUTO_RATIOS = PSB_RVDC32(PFIT_AUTO_RATIOS);
++	regs->saveBLC_PWM_CTL = PSB_RVDC32(BLC_PWM_CTL);
++	regs->saveBLC_PWM_CTL2 = PSB_RVDC32(BLC_PWM_CTL2);
++	regs->psb.saveLVDS = PSB_RVDC32(LVDS);
++	regs->psb.savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
++	regs->psb.savePP_ON_DELAYS = PSB_RVDC32(LVDSPP_ON);
++	regs->psb.savePP_OFF_DELAYS = PSB_RVDC32(LVDSPP_OFF);
++	regs->psb.savePP_DIVISOR = PSB_RVDC32(PP_CYCLE);
++
++	/* HW overlay */
++	regs->psb.saveOV_OVADD = PSB_RVDC32(OV_OVADD);
++	regs->psb.saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
++	regs->psb.saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
++	regs->psb.saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
++	regs->psb.saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
++	regs->psb.saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
++	regs->psb.saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
++
++	/* DPST registers */
++	regs->psb.saveHISTOGRAM_INT_CONTROL_REG =
++					PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++	regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG =
++					PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++	regs->psb.savePWM_CONTROL_LOGIC = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++	if (dev_priv->iLVDS_enable) {
++		/* Shut down the panel */
++		PSB_WVDC32(0, PP_CONTROL);
++
++		do {
++			pp_stat = PSB_RVDC32(PP_STATUS);
++		} while (pp_stat & 0x80000000);
++
++		/* Turn off the plane */
++		PSB_WVDC32(0x58000000, DSPACNTR);
++		/* Trigger the plane disable */
++		PSB_WVDC32(0, DSPASURF);
++
++		/* Wait ~4 ticks */
++		msleep(4);
++
++		/* Turn off pipe */
++		PSB_WVDC32(0x0, PIPEACONF);
++		/* Wait ~8 ticks */
++		msleep(8);
++
++		/* Turn off PLLs */
++		PSB_WVDC32(0, MRST_DPLL_A);
++	}
++	return 0;
++}
++
++/**
++ *	oaktrail_restore_display_registers	-	restore lost register state
++ *	@dev: our DRM device
++ *
++ *	Restore register state that was lost during suspend and resume.
++ */
++static int oaktrail_restore_display_registers(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_save_area *regs = &dev_priv->regs;
++	u32 pp_stat;
++	int i;
++
++	/* Display arbitration + watermarks */
++	PSB_WVDC32(regs->psb.saveDSPARB, DSPARB);
++	PSB_WVDC32(regs->psb.saveDSPFW1, DSPFW1);
++	PSB_WVDC32(regs->psb.saveDSPFW2, DSPFW2);
++	PSB_WVDC32(regs->psb.saveDSPFW3, DSPFW3);
++	PSB_WVDC32(regs->psb.saveDSPFW4, DSPFW4);
++	PSB_WVDC32(regs->psb.saveDSPFW5, DSPFW5);
++	PSB_WVDC32(regs->psb.saveDSPFW6, DSPFW6);
++	PSB_WVDC32(regs->psb.saveCHICKENBIT, DSPCHICKENBIT);
++
++	/* Make sure VGA plane is off. it initializes to on after reset!*/
++	PSB_WVDC32(0x80000000, VGACNTRL);
++
++	/* set the plls */
++	PSB_WVDC32(regs->psb.saveFPA0, MRST_FPA0);
++	PSB_WVDC32(regs->psb.saveFPA1, MRST_FPA1);
++
++	/* Actually enable it */
++	PSB_WVDC32(regs->psb.saveDPLL_A, MRST_DPLL_A);
++	DRM_UDELAY(150);
++
++	/* Restore mode */
++	PSB_WVDC32(regs->psb.saveHTOTAL_A, HTOTAL_A);
++	PSB_WVDC32(regs->psb.saveHBLANK_A, HBLANK_A);
++	PSB_WVDC32(regs->psb.saveHSYNC_A, HSYNC_A);
++	PSB_WVDC32(regs->psb.saveVTOTAL_A, VTOTAL_A);
++	PSB_WVDC32(regs->psb.saveVBLANK_A, VBLANK_A);
++	PSB_WVDC32(regs->psb.saveVSYNC_A, VSYNC_A);
++	PSB_WVDC32(regs->psb.savePIPEASRC, PIPEASRC);
++	PSB_WVDC32(regs->psb.saveBCLRPAT_A, BCLRPAT_A);
++
++	/* Restore performance mode*/
++	PSB_WVDC32(regs->psb.savePERF_MODE, MRST_PERF_MODE);
++
++	/* Enable the pipe*/
++	if (dev_priv->iLVDS_enable)
++		PSB_WVDC32(regs->psb.savePIPEACONF, PIPEACONF);
++
++	/* Set up the plane*/
++	PSB_WVDC32(regs->psb.saveDSPALINOFF, DSPALINOFF);
++	PSB_WVDC32(regs->psb.saveDSPASTRIDE, DSPASTRIDE);
++	PSB_WVDC32(regs->psb.saveDSPATILEOFF, DSPATILEOFF);
++
++	/* Enable the plane */
++	PSB_WVDC32(regs->psb.saveDSPACNTR, DSPACNTR);
++	PSB_WVDC32(regs->psb.saveDSPASURF, DSPASURF);
++
++	/* Enable Cursor A */
++	PSB_WVDC32(regs->psb.saveDSPACURSOR_CTRL, CURACNTR);
++	PSB_WVDC32(regs->psb.saveDSPACURSOR_POS, CURAPOS);
++	PSB_WVDC32(regs->psb.saveDSPACURSOR_BASE, CURABASE);
++
++	/* Restore palette (gamma) */
++	for (i = 0; i < 256; i++)
++		PSB_WVDC32(regs->psb.save_palette_a[i], PALETTE_A + (i << 2));
++
++	if (dev_priv->hdmi_priv)
++		oaktrail_hdmi_restore(dev);
++
++	if (dev_priv->iLVDS_enable) {
++		PSB_WVDC32(regs->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
++		PSB_WVDC32(regs->psb.saveLVDS, LVDS); /*port 61180h*/
++		PSB_WVDC32(regs->psb.savePFIT_CONTROL, PFIT_CONTROL);
++		PSB_WVDC32(regs->psb.savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
++		PSB_WVDC32(regs->psb.savePFIT_AUTO_RATIOS, PFIT_AUTO_RATIOS);
++		PSB_WVDC32(regs->saveBLC_PWM_CTL, BLC_PWM_CTL);
++		PSB_WVDC32(regs->psb.savePP_ON_DELAYS, LVDSPP_ON);
++		PSB_WVDC32(regs->psb.savePP_OFF_DELAYS, LVDSPP_OFF);
++		PSB_WVDC32(regs->psb.savePP_DIVISOR, PP_CYCLE);
++		PSB_WVDC32(regs->psb.savePP_CONTROL, PP_CONTROL);
++	}
++
++	/* Wait for cycle delay */
++	do {
++		pp_stat = PSB_RVDC32(PP_STATUS);
++	} while (pp_stat & 0x08000000);
++
++	/* Wait for panel power up */
++	do {
++		pp_stat = PSB_RVDC32(PP_STATUS);
++	} while (pp_stat & 0x10000000);
++
++	/* Restore HW overlay */
++	PSB_WVDC32(regs->psb.saveOV_OVADD, OV_OVADD);
++	PSB_WVDC32(regs->psb.saveOV_OGAMC0, OV_OGAMC0);
++	PSB_WVDC32(regs->psb.saveOV_OGAMC1, OV_OGAMC1);
++	PSB_WVDC32(regs->psb.saveOV_OGAMC2, OV_OGAMC2);
++	PSB_WVDC32(regs->psb.saveOV_OGAMC3, OV_OGAMC3);
++	PSB_WVDC32(regs->psb.saveOV_OGAMC4, OV_OGAMC4);
++	PSB_WVDC32(regs->psb.saveOV_OGAMC5, OV_OGAMC5);
++
++	/* DPST registers */
++	PSB_WVDC32(regs->psb.saveHISTOGRAM_INT_CONTROL_REG,
++						HISTOGRAM_INT_CONTROL);
++	PSB_WVDC32(regs->psb.saveHISTOGRAM_LOGIC_CONTROL_REG,
++						HISTOGRAM_LOGIC_CONTROL);
++	PSB_WVDC32(regs->psb.savePWM_CONTROL_LOGIC, PWM_CONTROL_LOGIC);
++
++	return 0;
++}
++
++/**
++ *	oaktrail_power_down	-	power down the display island
++ *	@dev: our DRM device
++ *
++ *	Power down the display interface of our device
++ */
++static int oaktrail_power_down(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 pwr_mask ;
++	u32 pwr_sts;
++
++	pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++	outl(pwr_mask, dev_priv->ospm_base + PSB_PM_SSC);
++
++	while (true) {
++		pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++		if ((pwr_sts & pwr_mask) == pwr_mask)
++			break;
++		else
++			udelay(10);
++	}
++	return 0;
++}
++
++/*
++ * oaktrail_power_up
++ *
++ * Restore power to the specified island(s) (powergating)
++ */
++static int oaktrail_power_up(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
++	u32 pwr_sts, pwr_cnt;
++
++	pwr_cnt = inl(dev_priv->ospm_base + PSB_PM_SSC);
++	pwr_cnt &= ~pwr_mask;
++	outl(pwr_cnt, (dev_priv->ospm_base + PSB_PM_SSC));
++
++	while (true) {
++		pwr_sts = inl(dev_priv->ospm_base + PSB_PM_SSS);
++		if ((pwr_sts & pwr_mask) == 0)
++			break;
++		else
++			udelay(10);
++	}
++	return 0;
++}
++
++
++static int oaktrail_chip_setup(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
++	int ret;
++	
++	ret = mid_chip_setup(dev);
++	if (ret < 0)
++		return ret;
++	if (vbt->size == 0) {
++		/* Now pull the BIOS data */
++		gma_intel_opregion_init(dev);
++		psb_intel_init_bios(dev);
++	}
++	return 0;
++}
++
++static void oaktrail_teardown(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
++
++	oaktrail_hdmi_teardown(dev);
++	if (vbt->size == 0)
++		psb_intel_destroy_bios(dev);
++}
++
++const struct psb_ops oaktrail_chip_ops = {
++	.name = "Oaktrail",
++	.accel_2d = 1,
++	.pipes = 2,
++	.crtcs = 2,
++	.sgx_offset = MRST_SGX_OFFSET,
++
++	.chip_setup = oaktrail_chip_setup,
++	.chip_teardown = oaktrail_teardown,
++	.crtc_helper = &oaktrail_helper_funcs,
++	.crtc_funcs = &psb_intel_crtc_funcs,
++
++	.output_init = oaktrail_output_init,
++
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++	.backlight_init = oaktrail_backlight_init,
++#endif
++
++	.save_regs = oaktrail_save_display_registers,
++	.restore_regs = oaktrail_restore_display_registers,
++	.power_down = oaktrail_power_down,
++	.power_up = oaktrail_power_up,
++
++	.i2c_bus = 1,
++};
+diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+new file mode 100644
+index 0000000..f8b367b
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+@@ -0,0 +1,540 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *	Li Peng <peng.li at intel.com>
++ */
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_drv.h"
++
++#define HDMI_READ(reg)		readl(hdmi_dev->regs + (reg))
++#define HDMI_WRITE(reg, val)	writel(val, hdmi_dev->regs + (reg))
++
++#define HDMI_HCR	0x1000
++#define HCR_ENABLE_HDCP		(1 << 5)
++#define HCR_ENABLE_AUDIO	(1 << 2)
++#define HCR_ENABLE_PIXEL	(1 << 1)
++#define HCR_ENABLE_TMDS		(1 << 0)
++
++#define HDMI_HICR	0x1004
++#define HDMI_HSR	0x1008
++#define HDMI_HISR	0x100C
++#define HDMI_DETECT_HDP		(1 << 0)
++
++#define HDMI_VIDEO_REG	0x3000
++#define HDMI_UNIT_EN		(1 << 7)
++#define HDMI_MODE_OUTPUT	(1 << 0)
++#define HDMI_HBLANK_A	0x3100
++
++#define HDMI_AUDIO_CTRL	0x4000
++#define HDMI_ENABLE_AUDIO	(1 << 0)
++
++#define PCH_HTOTAL_B	0x3100
++#define PCH_HBLANK_B	0x3104
++#define PCH_HSYNC_B	0x3108
++#define PCH_VTOTAL_B	0x310C
++#define PCH_VBLANK_B	0x3110
++#define PCH_VSYNC_B	0x3114
++#define PCH_PIPEBSRC	0x311C
++
++#define PCH_PIPEB_DSL	0x3800
++#define PCH_PIPEB_SLC	0x3804
++#define PCH_PIPEBCONF	0x3808
++#define PCH_PIPEBSTAT	0x3824
++
++#define CDVO_DFT	0x5000
++#define CDVO_SLEWRATE	0x5004
++#define CDVO_STRENGTH	0x5008
++#define CDVO_RCOMP	0x500C
++
++#define DPLL_CTRL       0x6000
++#define DPLL_PDIV_SHIFT		16
++#define DPLL_PDIV_MASK		(0xf << 16)
++#define DPLL_PWRDN		(1 << 4)
++#define DPLL_RESET		(1 << 3)
++#define DPLL_FASTEN		(1 << 2)
++#define DPLL_ENSTAT		(1 << 1)
++#define DPLL_DITHEN		(1 << 0)
++
++#define DPLL_DIV_CTRL   0x6004
++#define DPLL_CLKF_MASK		0xffffffc0
++#define DPLL_CLKR_MASK		(0x3f)
++
++#define DPLL_CLK_ENABLE 0x6008
++#define DPLL_EN_DISP		(1 << 31)
++#define DPLL_SEL_HDMI		(1 << 8)
++#define DPLL_EN_HDMI		(1 << 1)
++#define DPLL_EN_VGA		(1 << 0)
++
++#define DPLL_ADJUST     0x600C
++#define DPLL_STATUS     0x6010
++#define DPLL_UPDATE     0x6014
++#define DPLL_DFT        0x6020
++
++struct intel_range {
++	int	min, max;
++};
++
++struct oaktrail_hdmi_limit {
++	struct intel_range vco, np, nr, nf;
++};
++
++struct oaktrail_hdmi_clock {
++	int np;
++	int nr;
++	int nf;
++	int dot;
++};
++
++#define VCO_MIN		320000
++#define VCO_MAX		1650000
++#define	NP_MIN		1
++#define	NP_MAX		15
++#define	NR_MIN		1
++#define	NR_MAX		64
++#define NF_MIN		2
++#define NF_MAX		4095
++
++static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
++	.vco = { .min = VCO_MIN,		.max = VCO_MAX },
++	.np  = { .min = NP_MIN,			.max = NP_MAX  },
++	.nr  = { .min = NR_MIN,			.max = NR_MAX  },
++	.nf  = { .min = NF_MIN,			.max = NF_MAX  },
++};
++
++static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
++
++	HDMI_WRITE(HDMI_HCR, 0x67);
++	HDMI_READ(HDMI_HCR);
++
++	HDMI_WRITE(0x51a8, 0x10);
++	HDMI_READ(0x51a8);
++
++	HDMI_WRITE(HDMI_AUDIO_CTRL, 0x1);
++	HDMI_READ(HDMI_AUDIO_CTRL);
++}
++
++static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
++
++	HDMI_WRITE(0x51a8, 0x0);
++	HDMI_READ(0x51a8);
++
++	HDMI_WRITE(HDMI_AUDIO_CTRL, 0x0);
++	HDMI_READ(HDMI_AUDIO_CTRL);
++
++	HDMI_WRITE(HDMI_HCR, 0x47);
++	HDMI_READ(HDMI_HCR);
++}
++
++static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
++{
++	static int dpms_mode = -1;
++
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
++	u32 temp;
++
++	if (dpms_mode == mode)
++		return;
++
++	if (mode != DRM_MODE_DPMS_ON)
++		temp = 0x0;
++	else
++		temp = 0x99;
++
++	dpms_mode = mode;
++	HDMI_WRITE(HDMI_VIDEO_REG, temp);
++}
++
++static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
++				struct drm_display_mode *mode)
++{
++	struct drm_psb_private *dev_priv = connector->dev->dev_private;
++	if (mode->clock > 165000)
++		return MODE_CLOCK_HIGH;
++	if (mode->clock < 20000)
++		return MODE_CLOCK_LOW;
++
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		return MODE_NO_DBLESCAN;
++
++	/* We assume worst case scenario of 32 bpp here, since we don't know */
++	if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
++	    dev_priv->vram_stolen_size)
++		return MODE_MEM;
++
++	return MODE_OK;
++}
++
++static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
++				 struct drm_display_mode *mode,
++				 struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++static enum drm_connector_status
++oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
++{
++	enum drm_connector_status status;
++	struct drm_device *dev = connector->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
++	u32 temp;
++
++	temp = HDMI_READ(HDMI_HSR);
++	DRM_DEBUG_KMS("HDMI_HSR %x\n", temp);
++
++	if ((temp & HDMI_DETECT_HDP) != 0)
++		status = connector_status_connected;
++	else
++		status = connector_status_disconnected;
++
++	return status;
++}
++
++static const unsigned char raw_edid[] = {
++	0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x10, 0xac, 0x2f, 0xa0,
++	0x53, 0x55, 0x33, 0x30, 0x16, 0x13, 0x01, 0x03, 0x0e, 0x3a, 0x24, 0x78,
++	0xea, 0xe9, 0xf5, 0xac, 0x51, 0x30, 0xb4, 0x25, 0x11, 0x50, 0x54, 0xa5,
++	0x4b, 0x00, 0x81, 0x80, 0xa9, 0x40, 0x71, 0x4f, 0xb3, 0x00, 0x01, 0x01,
++	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
++	0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x46, 0x6c, 0x21, 0x00, 0x00, 0x1a,
++	0x00, 0x00, 0x00, 0xff, 0x00, 0x47, 0x4e, 0x37, 0x32, 0x31, 0x39, 0x35,
++	0x52, 0x30, 0x33, 0x55, 0x53, 0x0a, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x44,
++	0x45, 0x4c, 0x4c, 0x20, 0x32, 0x37, 0x30, 0x39, 0x57, 0x0a, 0x20, 0x20,
++	0x00, 0x00, 0x00, 0xfd, 0x00, 0x38, 0x4c, 0x1e, 0x53, 0x11, 0x00, 0x0a,
++	0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
++};
++
++static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct i2c_adapter *i2c_adap;
++	struct edid *edid;
++	struct drm_display_mode *mode, *t;
++	int i = 0, ret = 0;
++
++	i2c_adap = i2c_get_adapter(3);
++	if (i2c_adap == NULL) {
++		DRM_ERROR("No ddc adapter available!\n");
++		edid = (struct edid *)raw_edid;
++	} else {
++		edid = (struct edid *)raw_edid;
++		/* FIXME ? edid = drm_get_edid(connector, i2c_adap); */
++	}
++
++	if (edid) {
++		drm_mode_connector_update_edid_property(connector, edid);
++		ret = drm_add_edid_modes(connector, edid);
++		connector->display_info.raw_edid = NULL;
++	}
++
++	/*
++	 * prune modes that require frame buffer bigger than stolen mem
++	 */
++	list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
++		if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
++			i++;
++			drm_mode_remove(connector, mode);
++		}
++	}
++	return ret - i;
++}
++
++static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
++			       struct drm_display_mode *mode,
++			       struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++
++	oaktrail_hdmi_audio_enable(dev);
++	return;
++}
++
++static void oaktrail_hdmi_destroy(struct drm_connector *connector)
++{
++	return;
++}
++
++static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
++	.dpms = oaktrail_hdmi_dpms,
++	.mode_fixup = oaktrail_hdmi_mode_fixup,
++	.prepare = psb_intel_encoder_prepare,
++	.mode_set = oaktrail_hdmi_mode_set,
++	.commit = psb_intel_encoder_commit,
++};
++
++static const struct drm_connector_helper_funcs
++					oaktrail_hdmi_connector_helper_funcs = {
++	.get_modes = oaktrail_hdmi_get_modes,
++	.mode_valid = oaktrail_hdmi_mode_valid,
++	.best_encoder = psb_intel_best_encoder,
++};
++
++static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
++	.dpms = drm_helper_connector_dpms,
++	.detect = oaktrail_hdmi_detect,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.destroy = oaktrail_hdmi_destroy,
++};
++
++static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
++{
++	drm_encoder_cleanup(encoder);
++}
++
++static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
++	.destroy = oaktrail_hdmi_enc_destroy,
++};
++
++void oaktrail_hdmi_init(struct drm_device *dev,
++					struct psb_intel_mode_device *mode_dev)
++{
++	struct psb_intel_encoder *psb_intel_encoder;
++	struct psb_intel_connector *psb_intel_connector;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++
++	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
++	if (!psb_intel_encoder)
++		return;
++
++	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
++	if (!psb_intel_connector)
++		goto failed_connector;
++
++	connector = &psb_intel_connector->base;
++	encoder = &psb_intel_encoder->base;
++	drm_connector_init(dev, connector,
++			   &oaktrail_hdmi_connector_funcs,
++			   DRM_MODE_CONNECTOR_DVID);
++
++	drm_encoder_init(dev, encoder,
++			 &oaktrail_hdmi_enc_funcs,
++			 DRM_MODE_ENCODER_TMDS);
++
++	psb_intel_connector_attach_encoder(psb_intel_connector,
++					   psb_intel_encoder);
++
++	psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
++	drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
++	drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
++
++	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++	connector->interlace_allowed = false;
++	connector->doublescan_allowed = false;
++	drm_sysfs_connector_add(connector);
++
++	return;
++
++failed_connector:
++	kfree(psb_intel_encoder);
++}
++
++static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
++	{ 0 }
++};
++
++void oaktrail_hdmi_setup(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct pci_dev *pdev;
++	struct oaktrail_hdmi_dev *hdmi_dev;
++	int ret;
++
++	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
++	if (!pdev)
++		return;
++
++	hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL);
++	if (!hdmi_dev) {
++		dev_err(dev->dev, "failed to allocate memory\n");
++		goto out;
++	}
++
++
++	ret = pci_enable_device(pdev);
++	if (ret) {
++		dev_err(dev->dev, "failed to enable hdmi controller\n");
++		goto free;
++	}
++
++	hdmi_dev->mmio = pci_resource_start(pdev, 0);
++	hdmi_dev->mmio_len = pci_resource_len(pdev, 0);
++	hdmi_dev->regs = ioremap(hdmi_dev->mmio, hdmi_dev->mmio_len);
++	if (!hdmi_dev->regs) {
++		dev_err(dev->dev, "failed to map hdmi mmio\n");
++		goto free;
++	}
++
++	hdmi_dev->dev = pdev;
++	pci_set_drvdata(pdev, hdmi_dev);
++
++	/* Initialize i2c controller */
++	ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev);
++	if (ret)
++		dev_err(dev->dev, "HDMI I2C initialization failed\n");
++
++	dev_priv->hdmi_priv = hdmi_dev;
++	oaktrail_hdmi_audio_disable(dev);
++	return;
++
++free:
++	kfree(hdmi_dev);
++out:
++	return;
++}
++
++void oaktrail_hdmi_teardown(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
++	struct pci_dev *pdev;
++
++	if (hdmi_dev) {
++		pdev = hdmi_dev->dev;
++		pci_set_drvdata(pdev, NULL);
++		oaktrail_hdmi_i2c_exit(pdev);
++		iounmap(hdmi_dev->regs);
++		kfree(hdmi_dev);
++		pci_dev_put(pdev);
++	}
++}
++
++/* save HDMI register state */
++void oaktrail_hdmi_save(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
++	struct psb_state *regs = &dev_priv->regs.psb;
++	int i;
++
++	/* dpll */
++	hdmi_dev->saveDPLL_CTRL = PSB_RVDC32(DPLL_CTRL);
++	hdmi_dev->saveDPLL_DIV_CTRL = PSB_RVDC32(DPLL_DIV_CTRL);
++	hdmi_dev->saveDPLL_ADJUST = PSB_RVDC32(DPLL_ADJUST);
++	hdmi_dev->saveDPLL_UPDATE = PSB_RVDC32(DPLL_UPDATE);
++	hdmi_dev->saveDPLL_CLK_ENABLE = PSB_RVDC32(DPLL_CLK_ENABLE);
++
++	/* pipe B */
++	regs->savePIPEBCONF = PSB_RVDC32(PIPEBCONF);
++	regs->savePIPEBSRC  = PSB_RVDC32(PIPEBSRC);
++	regs->saveHTOTAL_B  = PSB_RVDC32(HTOTAL_B);
++	regs->saveHBLANK_B  = PSB_RVDC32(HBLANK_B);
++	regs->saveHSYNC_B   = PSB_RVDC32(HSYNC_B);
++	regs->saveVTOTAL_B  = PSB_RVDC32(VTOTAL_B);
++	regs->saveVBLANK_B  = PSB_RVDC32(VBLANK_B);
++	regs->saveVSYNC_B   = PSB_RVDC32(VSYNC_B);
++
++	hdmi_dev->savePCH_PIPEBCONF = PSB_RVDC32(PCH_PIPEBCONF);
++	hdmi_dev->savePCH_PIPEBSRC = PSB_RVDC32(PCH_PIPEBSRC);
++	hdmi_dev->savePCH_HTOTAL_B = PSB_RVDC32(PCH_HTOTAL_B);
++	hdmi_dev->savePCH_HBLANK_B = PSB_RVDC32(PCH_HBLANK_B);
++	hdmi_dev->savePCH_HSYNC_B  = PSB_RVDC32(PCH_HSYNC_B);
++	hdmi_dev->savePCH_VTOTAL_B = PSB_RVDC32(PCH_VTOTAL_B);
++	hdmi_dev->savePCH_VBLANK_B = PSB_RVDC32(PCH_VBLANK_B);
++	hdmi_dev->savePCH_VSYNC_B  = PSB_RVDC32(PCH_VSYNC_B);
++
++	/* plane */
++	regs->saveDSPBCNTR = PSB_RVDC32(DSPBCNTR);
++	regs->saveDSPBSTRIDE = PSB_RVDC32(DSPBSTRIDE);
++	regs->saveDSPBADDR = PSB_RVDC32(DSPBBASE);
++	regs->saveDSPBSURF = PSB_RVDC32(DSPBSURF);
++	regs->saveDSPBLINOFF = PSB_RVDC32(DSPBLINOFF);
++	regs->saveDSPBTILEOFF = PSB_RVDC32(DSPBTILEOFF);
++
++	/* cursor B */
++	regs->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
++	regs->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
++	regs->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
++
++	/* save palette */
++	for (i = 0; i < 256; i++)
++		regs->save_palette_b[i] = PSB_RVDC32(PALETTE_B + (i << 2));
++}
++
++/* restore HDMI register state */
++void oaktrail_hdmi_restore(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
++	struct psb_state *regs = &dev_priv->regs.psb;
++	int i;
++
++	/* dpll */
++	PSB_WVDC32(hdmi_dev->saveDPLL_CTRL, DPLL_CTRL);
++	PSB_WVDC32(hdmi_dev->saveDPLL_DIV_CTRL, DPLL_DIV_CTRL);
++	PSB_WVDC32(hdmi_dev->saveDPLL_ADJUST, DPLL_ADJUST);
++	PSB_WVDC32(hdmi_dev->saveDPLL_UPDATE, DPLL_UPDATE);
++	PSB_WVDC32(hdmi_dev->saveDPLL_CLK_ENABLE, DPLL_CLK_ENABLE);
++	DRM_UDELAY(150);
++
++	/* pipe */
++	PSB_WVDC32(regs->savePIPEBSRC, PIPEBSRC);
++	PSB_WVDC32(regs->saveHTOTAL_B, HTOTAL_B);
++	PSB_WVDC32(regs->saveHBLANK_B, HBLANK_B);
++	PSB_WVDC32(regs->saveHSYNC_B,  HSYNC_B);
++	PSB_WVDC32(regs->saveVTOTAL_B, VTOTAL_B);
++	PSB_WVDC32(regs->saveVBLANK_B, VBLANK_B);
++	PSB_WVDC32(regs->saveVSYNC_B,  VSYNC_B);
++
++	PSB_WVDC32(hdmi_dev->savePCH_PIPEBSRC, PCH_PIPEBSRC);
++	PSB_WVDC32(hdmi_dev->savePCH_HTOTAL_B, PCH_HTOTAL_B);
++	PSB_WVDC32(hdmi_dev->savePCH_HBLANK_B, PCH_HBLANK_B);
++	PSB_WVDC32(hdmi_dev->savePCH_HSYNC_B,  PCH_HSYNC_B);
++	PSB_WVDC32(hdmi_dev->savePCH_VTOTAL_B, PCH_VTOTAL_B);
++	PSB_WVDC32(hdmi_dev->savePCH_VBLANK_B, PCH_VBLANK_B);
++	PSB_WVDC32(hdmi_dev->savePCH_VSYNC_B,  PCH_VSYNC_B);
++
++	PSB_WVDC32(regs->savePIPEBCONF, PIPEBCONF);
++	PSB_WVDC32(hdmi_dev->savePCH_PIPEBCONF, PCH_PIPEBCONF);
++
++	/* plane */
++	PSB_WVDC32(regs->saveDSPBLINOFF, DSPBLINOFF);
++	PSB_WVDC32(regs->saveDSPBSTRIDE, DSPBSTRIDE);
++	PSB_WVDC32(regs->saveDSPBTILEOFF, DSPBTILEOFF);
++	PSB_WVDC32(regs->saveDSPBCNTR, DSPBCNTR);
++	PSB_WVDC32(regs->saveDSPBSURF, DSPBSURF);
++
++	/* cursor B */
++	PSB_WVDC32(regs->saveDSPBCURSOR_CTRL, CURBCNTR);
++	PSB_WVDC32(regs->saveDSPBCURSOR_POS, CURBPOS);
++	PSB_WVDC32(regs->saveDSPBCURSOR_BASE, CURBBASE);
++
++	/* restore palette */
++	for (i = 0; i < 256; i++)
++		PSB_WVDC32(regs->save_palette_b[i], PALETTE_B + (i << 2));
++}
+diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+new file mode 100644
+index 0000000..5e84fbd
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+@@ -0,0 +1,328 @@
++/*
++ * Copyright © 2010 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *	Li Peng <peng.li at intel.com>
++ */
++
++#include <linux/export.h>
++#include <linux/mutex.h>
++#include <linux/pci.h>
++#include <linux/i2c.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include "psb_drv.h"
++
++#define HDMI_READ(reg)		readl(hdmi_dev->regs + (reg))
++#define HDMI_WRITE(reg, val)	writel(val, hdmi_dev->regs + (reg))
++
++#define HDMI_HCR	0x1000
++#define HCR_DETECT_HDP		(1 << 6)
++#define HCR_ENABLE_HDCP		(1 << 5)
++#define HCR_ENABLE_AUDIO	(1 << 2)
++#define HCR_ENABLE_PIXEL	(1 << 1)
++#define HCR_ENABLE_TMDS		(1 << 0)
++#define HDMI_HICR	0x1004
++#define HDMI_INTR_I2C_ERROR	(1 << 4)
++#define HDMI_INTR_I2C_FULL	(1 << 3)
++#define HDMI_INTR_I2C_DONE	(1 << 2)
++#define HDMI_INTR_HPD		(1 << 0)
++#define HDMI_HSR	0x1008
++#define HDMI_HISR	0x100C
++#define HDMI_HI2CRDB0	0x1200
++#define HDMI_HI2CHCR	0x1240
++#define HI2C_HDCP_WRITE		(0 << 2)
++#define HI2C_HDCP_RI_READ	(1 << 2)
++#define HI2C_HDCP_READ		(2 << 2)
++#define HI2C_EDID_READ		(3 << 2)
++#define HI2C_READ_CONTINUE	(1 << 1)
++#define HI2C_ENABLE_TRANSACTION	(1 << 0)
++
++#define HDMI_ICRH	0x1100
++#define HDMI_HI2CTDR0	0x1244
++#define HDMI_HI2CTDR1	0x1248
++
++#define I2C_STAT_INIT		0
++#define I2C_READ_DONE		1
++#define I2C_TRANSACTION_DONE	2
++
++struct hdmi_i2c_dev {
++	struct i2c_adapter *adap;
++	struct mutex i2c_lock;
++	struct completion complete;
++	int status;
++	struct i2c_msg *msg;
++	int buf_offset;
++};
++
++static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev)
++{
++	u32 temp;
++
++	temp = HDMI_READ(HDMI_HICR);
++	temp |= (HDMI_INTR_I2C_ERROR | HDMI_INTR_I2C_FULL | HDMI_INTR_I2C_DONE);
++	HDMI_WRITE(HDMI_HICR, temp);
++	HDMI_READ(HDMI_HICR);
++}
++
++static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev)
++{
++	HDMI_WRITE(HDMI_HICR, 0x0);
++	HDMI_READ(HDMI_HICR);
++}
++
++static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
++{
++	struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
++	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
++	u32 temp;
++
++	i2c_dev->status = I2C_STAT_INIT;
++	i2c_dev->msg = pmsg;
++	i2c_dev->buf_offset = 0;
++	INIT_COMPLETION(i2c_dev->complete);
++
++	/* Enable I2C transaction */
++	temp = ((pmsg->len) << 20) | HI2C_EDID_READ | HI2C_ENABLE_TRANSACTION;
++	HDMI_WRITE(HDMI_HI2CHCR, temp);
++	HDMI_READ(HDMI_HI2CHCR);
++
++	while (i2c_dev->status != I2C_TRANSACTION_DONE)
++		wait_for_completion_interruptible_timeout(&i2c_dev->complete,
++								10 * HZ);
++
++	return 0;
++}
++
++static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
++{
++	/*
++	 * XXX: i2c write seems isn't useful for EDID probe, don't do anything
++	 */
++	return 0;
++}
++
++static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
++				struct i2c_msg *pmsg,
++				int num)
++{
++	struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
++	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
++	int i;
++
++	mutex_lock(&i2c_dev->i2c_lock);
++
++	/* Enable i2c unit */
++	HDMI_WRITE(HDMI_ICRH, 0x00008760);
++
++	/* Enable irq */
++	hdmi_i2c_irq_enable(hdmi_dev);
++	for (i = 0; i < num; i++) {
++		if (pmsg->len && pmsg->buf) {
++			if (pmsg->flags & I2C_M_RD)
++				xfer_read(adap, pmsg);
++			else
++				xfer_write(adap, pmsg);
++		}
++		pmsg++;         /* next message */
++	}
++
++	/* Disable irq */
++	hdmi_i2c_irq_disable(hdmi_dev);
++
++	mutex_unlock(&i2c_dev->i2c_lock);
++
++	return i;
++}
++
++static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter)
++{
++	return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
++}
++
++static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = {
++	.master_xfer	= oaktrail_hdmi_i2c_access,
++	.functionality  = oaktrail_hdmi_i2c_func,
++};
++
++static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
++	.name		= "oaktrail_hdmi_i2c",
++	.nr		= 3,
++	.owner		= THIS_MODULE,
++	.class		= I2C_CLASS_DDC,
++	.algo		= &oaktrail_hdmi_i2c_algorithm,
++};
++
++static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev)
++{
++	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
++	struct i2c_msg *msg = i2c_dev->msg;
++	u8 *buf = msg->buf;
++	u32 temp;
++	int i, offset;
++
++	offset = i2c_dev->buf_offset;
++	for (i = 0; i < 0x10; i++) {
++		temp = HDMI_READ(HDMI_HI2CRDB0 + (i * 4));
++		memcpy(buf + (offset + i * 4), &temp, 4);
++	}
++	i2c_dev->buf_offset += (0x10 * 4);
++
++	/* clearing read buffer full intr */
++	temp = HDMI_READ(HDMI_HISR);
++	HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_FULL);
++	HDMI_READ(HDMI_HISR);
++
++	/* continue read transaction */
++	temp = HDMI_READ(HDMI_HI2CHCR);
++	HDMI_WRITE(HDMI_HI2CHCR, temp | HI2C_READ_CONTINUE);
++	HDMI_READ(HDMI_HI2CHCR);
++
++	i2c_dev->status = I2C_READ_DONE;
++	return;
++}
++
++static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev)
++{
++	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
++	u32 temp;
++
++	/* clear transaction done intr */
++	temp = HDMI_READ(HDMI_HISR);
++	HDMI_WRITE(HDMI_HISR, temp | HDMI_INTR_I2C_DONE);
++	HDMI_READ(HDMI_HISR);
++
++
++	temp = HDMI_READ(HDMI_HI2CHCR);
++	HDMI_WRITE(HDMI_HI2CHCR, temp & ~HI2C_ENABLE_TRANSACTION);
++	HDMI_READ(HDMI_HI2CHCR);
++
++	i2c_dev->status = I2C_TRANSACTION_DONE;
++	return;
++}
++
++static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
++{
++	struct oaktrail_hdmi_dev *hdmi_dev = dev;
++	struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
++	u32 stat;
++
++	stat = HDMI_READ(HDMI_HISR);
++
++	if (stat & HDMI_INTR_HPD) {
++		HDMI_WRITE(HDMI_HISR, stat | HDMI_INTR_HPD);
++		HDMI_READ(HDMI_HISR);
++	}
++
++	if (stat & HDMI_INTR_I2C_FULL)
++		hdmi_i2c_read(hdmi_dev);
++
++	if (stat & HDMI_INTR_I2C_DONE)
++		hdmi_i2c_transaction_done(hdmi_dev);
++
++	complete(&i2c_dev->complete);
++
++	return IRQ_HANDLED;
++}
++
++/*
++ * choose alternate function 2 of GPIO pin 52, 53,
++ * which is used by HDMI I2C logic
++ */
++static void oaktrail_hdmi_i2c_gpio_fix(void)
++{
++	void *base;
++	unsigned int gpio_base = 0xff12c000;
++	int gpio_len = 0x1000;
++	u32 temp;
++
++	base = ioremap((resource_size_t)gpio_base, gpio_len);
++	if (base == NULL) {
++		DRM_ERROR("gpio ioremap fail\n");
++		return;
++	}
++
++	temp = readl(base + 0x44);
++	DRM_DEBUG_DRIVER("old gpio val %x\n", temp);
++	writel((temp | 0x00000a00), (base +  0x44));
++	temp = readl(base + 0x44);
++	DRM_DEBUG_DRIVER("new gpio val %x\n", temp);
++
++	iounmap(base);
++}
++
++int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
++{
++	struct oaktrail_hdmi_dev *hdmi_dev;
++	struct hdmi_i2c_dev *i2c_dev;
++	int ret;
++
++	hdmi_dev = pci_get_drvdata(dev);
++
++	i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
++	if (i2c_dev == NULL) {
++		DRM_ERROR("Can't allocate interface\n");
++		ret = -ENOMEM;
++		goto exit;
++	}
++
++	i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
++	i2c_dev->status = I2C_STAT_INIT;
++	init_completion(&i2c_dev->complete);
++	mutex_init(&i2c_dev->i2c_lock);
++	i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev);
++	hdmi_dev->i2c_dev = i2c_dev;
++
++	/* Enable HDMI I2C function on gpio */
++	oaktrail_hdmi_i2c_gpio_fix();
++
++	/* request irq */
++	ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED,
++			  oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
++	if (ret) {
++		DRM_ERROR("Failed to request IRQ for I2C controller\n");
++		goto err;
++	}
++
++	/* Adapter registration */
++	ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
++	return ret;
++
++err:
++	kfree(i2c_dev);
++exit:
++	return ret;
++}
++
++void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
++{
++	struct oaktrail_hdmi_dev *hdmi_dev;
++	struct hdmi_i2c_dev *i2c_dev;
++
++	hdmi_dev = pci_get_drvdata(dev);
++	if (i2c_del_adapter(&oaktrail_hdmi_i2c_adapter))
++		DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
++
++	i2c_dev = hdmi_dev->i2c_dev;
++	kfree(i2c_dev);
++	free_irq(dev->irq, hdmi_dev);
++}
+diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
+new file mode 100644
+index 0000000..654f32b
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
+@@ -0,0 +1,448 @@
++/*
++ * Copyright © 2006-2009 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ *	Dave Airlie <airlied at linux.ie>
++ *	Jesse Barnes <jesse.barnes at intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <drm/drmP.h>
++#include <asm/mrst.h>
++
++#include "intel_bios.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "power.h"
++#include <linux/pm_runtime.h>
++
++/* The max/min PWM frequency in BPCR[31:17] - */
++/* The smallest number is 1 (not 0) that can fit in the
++ * 15-bit field of the and then*/
++/* shifts to the left by one bit to get the actual 16-bit
++ * value that the 15-bits correspond to.*/
++#define MRST_BLC_MAX_PWM_REG_FREQ	    0xFFFF
++#define BRIGHTNESS_MAX_LEVEL 100
++
++/**
++ * Sets the power state for the panel.
++ */
++static void oaktrail_lvds_set_power(struct drm_device *dev,
++				struct psb_intel_encoder *psb_intel_encoder,
++				bool on)
++{
++	u32 pp_status;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (!gma_power_begin(dev, true))
++		return;
++
++	if (on) {
++		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++			  POWER_TARGET_ON);
++		do {
++			pp_status = REG_READ(PP_STATUS);
++		} while ((pp_status & (PP_ON | PP_READY)) == PP_READY);
++		dev_priv->is_lvds_on = true;
++		if (dev_priv->ops->lvds_bl_power)
++			dev_priv->ops->lvds_bl_power(dev, true);
++	} else {
++		if (dev_priv->ops->lvds_bl_power)
++			dev_priv->ops->lvds_bl_power(dev, false);
++		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++			  ~POWER_TARGET_ON);
++		do {
++			pp_status = REG_READ(PP_STATUS);
++		} while (pp_status & PP_ON);
++		dev_priv->is_lvds_on = false;
++		pm_request_idle(&dev->pdev->dev);
++	}
++	gma_power_end(dev);
++}
++
++static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct psb_intel_encoder *psb_intel_encoder =
++						to_psb_intel_encoder(encoder);
++
++	if (mode == DRM_MODE_DPMS_ON)
++		oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
++	else
++		oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
++
++	/* XXX: We never power down the LVDS pairs. */
++}
++
++static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
++			       struct drm_display_mode *mode,
++			       struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++	struct drm_mode_config *mode_config = &dev->mode_config;
++	struct drm_connector *connector = NULL;
++	struct drm_crtc *crtc = encoder->crtc;
++	u32 lvds_port;
++	uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
++
++	if (!gma_power_begin(dev, true))
++		return;
++
++	/*
++	 * The LVDS pin pair will already have been turned on in the
++	 * psb_intel_crtc_mode_set since it has a large impact on the DPLL
++	 * settings.
++	 */
++	lvds_port = (REG_READ(LVDS) &
++		    (~LVDS_PIPEB_SELECT)) |
++		    LVDS_PORT_EN |
++		    LVDS_BORDER_EN;
++
++	/* If the firmware says dither on Moorestown, or the BIOS does
++	   on Oaktrail then enable dithering */
++	if (mode_dev->panel_wants_dither || dev_priv->lvds_dither)
++		lvds_port |= MRST_PANEL_8TO6_DITHER_ENABLE;
++
++	REG_WRITE(LVDS, lvds_port);
++
++	/* Find the connector we're trying to set up */
++	list_for_each_entry(connector, &mode_config->connector_list, head) {
++		if (!connector->encoder || connector->encoder->crtc != crtc)
++			continue;
++	}
++
++	if (!connector) {
++		DRM_ERROR("Couldn't find connector when setting mode");
++		return;
++	}
++
++	drm_connector_property_get_value(
++		connector,
++		dev->mode_config.scaling_mode_property,
++		&v);
++
++	if (v == DRM_MODE_SCALE_NO_SCALE)
++		REG_WRITE(PFIT_CONTROL, 0);
++	else if (v == DRM_MODE_SCALE_ASPECT) {
++		if ((mode->vdisplay != adjusted_mode->crtc_vdisplay) ||
++		    (mode->hdisplay != adjusted_mode->crtc_hdisplay)) {
++			if ((adjusted_mode->crtc_hdisplay * mode->vdisplay) ==
++			    (mode->hdisplay * adjusted_mode->crtc_vdisplay))
++				REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++			else if ((adjusted_mode->crtc_hdisplay *
++				mode->vdisplay) > (mode->hdisplay *
++				adjusted_mode->crtc_vdisplay))
++				REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++					  PFIT_SCALING_MODE_PILLARBOX);
++			else
++				REG_WRITE(PFIT_CONTROL, PFIT_ENABLE |
++					  PFIT_SCALING_MODE_LETTERBOX);
++		} else
++			REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++	} else /*(v == DRM_MODE_SCALE_FULLSCREEN)*/
++		REG_WRITE(PFIT_CONTROL, PFIT_ENABLE);
++
++	gma_power_end(dev);
++}
++
++static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_encoder *psb_intel_encoder =
++						to_psb_intel_encoder(encoder);
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++
++	if (!gma_power_begin(dev, true))
++		return;
++
++	mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++	mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++					  BACKLIGHT_DUTY_CYCLE_MASK);
++	oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
++	gma_power_end(dev);
++}
++
++static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 ret;
++
++	if (gma_power_begin(dev, false)) {
++		ret = ((REG_READ(BLC_PWM_CTL) &
++			  BACKLIGHT_MODULATION_FREQ_MASK) >>
++			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++
++		gma_power_end(dev);
++	} else
++		ret = ((dev_priv->regs.saveBLC_PWM_CTL &
++			  BACKLIGHT_MODULATION_FREQ_MASK) >>
++			  BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
++
++	return ret;
++}
++
++static void oaktrail_lvds_commit(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_encoder *psb_intel_encoder =
++						to_psb_intel_encoder(encoder);
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++
++	if (mode_dev->backlight_duty_cycle == 0)
++		mode_dev->backlight_duty_cycle =
++					oaktrail_lvds_get_max_backlight(dev);
++	oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
++}
++
++static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
++	.dpms = oaktrail_lvds_dpms,
++	.mode_fixup = psb_intel_lvds_mode_fixup,
++	.prepare = oaktrail_lvds_prepare,
++	.mode_set = oaktrail_lvds_mode_set,
++	.commit = oaktrail_lvds_commit,
++};
++
++static struct drm_display_mode lvds_configuration_modes[] = {
++	/* hard coded fixed mode for TPO LTPS LPJ040K001A */
++	{ DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 33264, 800, 836,
++		   846, 1056, 0, 480, 489, 491, 525, 0, 0) },
++	/* hard coded fixed mode for LVDS 800x480 */
++	{ DRM_MODE("800x480",  DRM_MODE_TYPE_DRIVER, 30994, 800, 801,
++		   802, 1024, 0, 480, 481, 482, 525, 0, 0) },
++	/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600 at 75 */
++	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1072,
++		   1104, 1184, 0, 600, 603, 604, 608, 0, 0) },
++	/* hard coded fixed mode for Samsung 480wsvga LVDS 1024x600 at 75 */
++	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 53990, 1024, 1104,
++		   1136, 1184, 0, 600, 603, 604, 608, 0, 0) },
++	/* hard coded fixed mode for Sharp wsvga LVDS 1024x600 */
++	{ DRM_MODE("1024x600", DRM_MODE_TYPE_DRIVER, 48885, 1024, 1124,
++		   1204, 1312, 0, 600, 607, 610, 621, 0, 0) },
++	/* hard coded fixed mode for LVDS 1024x768 */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
++		   1184, 1344, 0, 768, 771, 777, 806, 0, 0) },
++	/* hard coded fixed mode for LVDS 1366x768 */
++	{ DRM_MODE("1366x768", DRM_MODE_TYPE_DRIVER, 77500, 1366, 1430,
++		   1558, 1664, 0, 768, 769, 770, 776, 0, 0) },
++};
++
++/* Returns the panel fixed mode from configuration. */
++
++static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
++					struct psb_intel_mode_device *mode_dev)
++{
++	struct drm_display_mode *mode = NULL;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
++
++	mode_dev->panel_fixed_mode = NULL;
++
++	/* Use the firmware provided data on Moorestown */
++	if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
++		mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++		if (!mode)
++			return;
++
++		mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
++		mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
++		mode->hsync_start = mode->hdisplay + \
++				((ti->hsync_offset_hi << 8) | \
++				ti->hsync_offset_lo);
++		mode->hsync_end = mode->hsync_start + \
++				((ti->hsync_pulse_width_hi << 8) | \
++				ti->hsync_pulse_width_lo);
++		mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
++							ti->hblank_lo);
++		mode->vsync_start = \
++			mode->vdisplay + ((ti->vsync_offset_hi << 4) | \
++						ti->vsync_offset_lo);
++		mode->vsync_end = \
++			mode->vsync_start + ((ti->vsync_pulse_width_hi << 4) | \
++						ti->vsync_pulse_width_lo);
++		mode->vtotal = mode->vdisplay + \
++				((ti->vblank_hi << 8) | ti->vblank_lo);
++		mode->clock = ti->pixel_clock * 10;
++#if 0
++		printk(KERN_INFO "hdisplay is %d\n", mode->hdisplay);
++		printk(KERN_INFO "vdisplay is %d\n", mode->vdisplay);
++		printk(KERN_INFO "HSS is %d\n", mode->hsync_start);
++		printk(KERN_INFO "HSE is %d\n", mode->hsync_end);
++		printk(KERN_INFO "htotal is %d\n", mode->htotal);
++		printk(KERN_INFO "VSS is %d\n", mode->vsync_start);
++		printk(KERN_INFO "VSE is %d\n", mode->vsync_end);
++		printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
++		printk(KERN_INFO "clock is %d\n", mode->clock);
++#endif
++		mode_dev->panel_fixed_mode = mode;
++	}
++
++	/* Use the BIOS VBT mode if available */
++	if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode)
++		mode_dev->panel_fixed_mode = drm_mode_duplicate(dev,
++						mode_dev->vbt_mode);
++
++	/* Then try the LVDS VBT mode */
++	if (mode_dev->panel_fixed_mode == NULL)
++		if (dev_priv->lfp_lvds_vbt_mode)
++			mode_dev->panel_fixed_mode =
++				drm_mode_duplicate(dev,
++					dev_priv->lfp_lvds_vbt_mode);
++	/* Then guess */
++	if (mode_dev->panel_fixed_mode == NULL)
++		mode_dev->panel_fixed_mode
++			= drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
++
++	drm_mode_set_name(mode_dev->panel_fixed_mode);
++	drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
++}
++
++/**
++ * oaktrail_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void oaktrail_lvds_init(struct drm_device *dev,
++		    struct psb_intel_mode_device *mode_dev)
++{
++	struct psb_intel_encoder *psb_intel_encoder;
++	struct psb_intel_connector *psb_intel_connector;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct edid *edid;
++	struct i2c_adapter *i2c_adap;
++	struct drm_display_mode *scan;	/* *modes, *bios_mode; */
++
++	psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
++	if (!psb_intel_encoder)
++		return;
++
++	psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
++	if (!psb_intel_connector)
++		goto failed_connector;
++
++	connector = &psb_intel_connector->base;
++	encoder = &psb_intel_encoder->base;
++	dev_priv->is_lvds_on = true;
++	drm_connector_init(dev, connector,
++			   &psb_intel_lvds_connector_funcs,
++			   DRM_MODE_CONNECTOR_LVDS);
++
++	drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
++			 DRM_MODE_ENCODER_LVDS);
++
++	psb_intel_connector_attach_encoder(psb_intel_connector,
++					   psb_intel_encoder);
++	psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
++
++	drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
++	drm_connector_helper_add(connector,
++				 &psb_intel_lvds_connector_helper_funcs);
++	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++	connector->interlace_allowed = false;
++	connector->doublescan_allowed = false;
++
++	drm_connector_attach_property(connector,
++					dev->mode_config.scaling_mode_property,
++					DRM_MODE_SCALE_FULLSCREEN);
++	drm_connector_attach_property(connector,
++					dev_priv->backlight_property,
++					BRIGHTNESS_MAX_LEVEL);
++
++	mode_dev->panel_wants_dither = false;
++	if (dev_priv->vbt_data.size != 0x00)
++		mode_dev->panel_wants_dither = (dev_priv->gct_data.
++			Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
++        if (dev_priv->lvds_dither)
++                mode_dev->panel_wants_dither = 1;
++
++	/*
++	 * LVDS discovery:
++	 * 1) check for EDID on DDC
++	 * 2) check for VBT data
++	 * 3) check to see if LVDS is already on
++	 *    if none of the above, no panel
++	 * 4) make sure lid is open
++	 *    if closed, act like it's not there for now
++	 */
++
++	i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
++	if (i2c_adap == NULL)
++		dev_err(dev->dev, "No ddc adapter available!\n");
++	/*
++	 * Attempt to get the fixed panel mode from DDC.  Assume that the
++	 * preferred mode is the right one.
++	 */
++	if (i2c_adap) {
++		edid = drm_get_edid(connector, i2c_adap);
++		if (edid) {
++			drm_mode_connector_update_edid_property(connector,
++									edid);
++			drm_add_edid_modes(connector, edid);
++			kfree(edid);
++		}
++
++		list_for_each_entry(scan, &connector->probed_modes, head) {
++			if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++				mode_dev->panel_fixed_mode =
++				    drm_mode_duplicate(dev, scan);
++				goto out;	/* FIXME: check for quirks */
++			}
++		}
++	}
++	/*
++	 * If we didn't get EDID, try geting panel timing
++	 * from configuration data
++	 */
++	oaktrail_lvds_get_configuration_mode(dev, mode_dev);
++
++	if (mode_dev->panel_fixed_mode) {
++		mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
++		goto out;	/* FIXME: check for quirks */
++	}
++
++	/* If we still don't have a mode after all that, give up. */
++	if (!mode_dev->panel_fixed_mode) {
++		dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
++		goto failed_find;
++	}
++
++out:
++	drm_sysfs_connector_add(connector);
++	return;
++
++failed_find:
++	dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
++	if (psb_intel_encoder->ddc_bus)
++		psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
++
++/* failed_ddc: */
++
++	drm_encoder_cleanup(encoder);
++	drm_connector_cleanup(connector);
++	kfree(psb_intel_connector);
++failed_connector:
++	kfree(psb_intel_encoder);
++}
++
+diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
+new file mode 100644
+index 0000000..889b854
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/power.c
+@@ -0,0 +1,315 @@
++/**************************************************************************
++ * Copyright (c) 2009-2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * Authors:
++ *    Benjamin Defnet <benjamin.r.defnet at intel.com>
++ *    Rajesh Poornachandran <rajesh.poornachandran at intel.com>
++ * Massively reworked
++ *    Alan Cox <alan at linux.intel.com>
++ */
++
++#include "power.h"
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include <linux/mutex.h>
++#include <linux/pm_runtime.h>
++
++static struct mutex power_mutex;	/* Serialize power ops */
++static spinlock_t power_ctrl_lock;	/* Serialize power claim */
++
++/**
++ *	gma_power_init		-	initialise power manager
++ *	@dev: our device
++ *
++ *	Set up for power management tracking of our hardware.
++ */
++void gma_power_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	/* FIXME: Move APM/OSPM base into relevant device code */
++	dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
++	dev_priv->ospm_base &= 0xffff;
++
++	dev_priv->display_power = true;	/* We start active */
++	dev_priv->display_count = 0;	/* Currently no users */
++	dev_priv->suspended = false;	/* And not suspended */
++	spin_lock_init(&power_ctrl_lock);
++	mutex_init(&power_mutex);
++
++	if (dev_priv->ops->init_pm)
++		dev_priv->ops->init_pm(dev);
++}
++
++/**
++ *	gma_power_uninit	-	end power manager
++ *	@dev: device to end for
++ *
++ *	Undo the effects of gma_power_init
++ */
++void gma_power_uninit(struct drm_device *dev)
++{
++	pm_runtime_disable(&dev->pdev->dev);
++	pm_runtime_set_suspended(&dev->pdev->dev);
++}
++
++/**
++ *	gma_suspend_display	-	suspend the display logic
++ *	@dev: our DRM device
++ *
++ *	Suspend the display logic of the graphics interface
++ */
++static void gma_suspend_display(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->suspended)
++		return;
++	dev_priv->ops->save_regs(dev);
++	dev_priv->ops->power_down(dev);
++	dev_priv->display_power = false;
++}
++
++/**
++ *	gma_resume_display	-	resume display side logic
++ *
++ *	Resume the display hardware restoring state and enabling
++ *	as necessary.
++ */
++static void gma_resume_display(struct pci_dev *pdev)
++{
++	struct drm_device *dev = pci_get_drvdata(pdev);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	/* turn on the display power island */
++	dev_priv->ops->power_up(dev);
++	dev_priv->suspended = false;
++	dev_priv->display_power = true;
++
++	PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
++	pci_write_config_word(pdev, PSB_GMCH_CTRL,
++			dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
++	dev_priv->ops->restore_regs(dev);
++}
++
++/**
++ *	gma_suspend_pci		-	suspend PCI side
++ *	@pdev: PCI device
++ *
++ *	Perform the suspend processing on our PCI device state
++ */
++static void gma_suspend_pci(struct pci_dev *pdev)
++{
++	struct drm_device *dev = pci_get_drvdata(pdev);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int bsm, vbt;
++
++	if (dev_priv->suspended)
++		return;
++
++	pci_save_state(pdev);
++	pci_read_config_dword(pdev, 0x5C, &bsm);
++	dev_priv->regs.saveBSM = bsm;
++	pci_read_config_dword(pdev, 0xFC, &vbt);
++	dev_priv->regs.saveVBT = vbt;
++	pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
++	pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
++
++	pci_disable_device(pdev);
++	pci_set_power_state(pdev, PCI_D3hot);
++
++	dev_priv->suspended = true;
++}
++
++/**
++ *	gma_resume_pci		-	resume helper
++ *	@dev: our PCI device
++ *
++ *	Perform the resume processing on our PCI device state - rewrite
++ *	register state and re-enable the PCI device
++ */
++static bool gma_resume_pci(struct pci_dev *pdev)
++{
++	struct drm_device *dev = pci_get_drvdata(pdev);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int ret;
++
++	if (!dev_priv->suspended)
++		return true;
++
++	pci_set_power_state(pdev, PCI_D0);
++	pci_restore_state(pdev);
++	pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
++	pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
++	/* restoring MSI address and data in PCIx space */
++	pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
++	pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
++	ret = pci_enable_device(pdev);
++
++	if (ret != 0)
++		dev_err(&pdev->dev, "pci_enable failed: %d\n", ret);
++	else
++		dev_priv->suspended = false;
++	return !dev_priv->suspended;
++}
++
++/**
++ *	gma_power_suspend		-	bus callback for suspend
++ *	@pdev: our PCI device
++ *	@state: suspend type
++ *
++ *	Called back by the PCI layer during a suspend of the system. We
++ *	perform the necessary shut down steps and save enough state that
++ *	we can undo this when resume is called.
++ */
++int gma_power_suspend(struct device *_dev)
++{
++	struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
++	struct drm_device *dev = pci_get_drvdata(pdev);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	mutex_lock(&power_mutex);
++	if (!dev_priv->suspended) {
++		if (dev_priv->display_count) {
++			mutex_unlock(&power_mutex);
++			dev_err(dev->dev, "GPU hardware busy, cannot suspend\n");
++			return -EBUSY;
++		}
++		psb_irq_uninstall(dev);
++		gma_suspend_display(dev);
++		gma_suspend_pci(pdev);
++	}
++	mutex_unlock(&power_mutex);
++	return 0;
++}
++
++/**
++ *	gma_power_resume		-	resume power
++ *	@pdev: PCI device
++ *
++ *	Resume the PCI side of the graphics and then the displays
++ */
++int gma_power_resume(struct device *_dev)
++{
++	struct pci_dev *pdev = container_of(_dev, struct pci_dev, dev);
++	struct drm_device *dev = pci_get_drvdata(pdev);
++
++	mutex_lock(&power_mutex);
++	gma_resume_pci(pdev);
++	gma_resume_display(pdev);
++	psb_irq_preinstall(dev);
++	psb_irq_postinstall(dev);
++	mutex_unlock(&power_mutex);
++	return 0;
++}
++
++/**
++ *	gma_power_is_on		-	returne true if power is on
++ *	@dev: our DRM device
++ *
++ *	Returns true if the display island power is on at this moment
++ */
++bool gma_power_is_on(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	return dev_priv->display_power;
++}
++
++/**
++ *	gma_power_begin		-	begin requiring power
++ *	@dev: our DRM device
++ *	@force_on: true to force power on
++ *
++ *	Begin an action that requires the display power island is enabled.
++ *	We refcount the islands.
++ */
++bool gma_power_begin(struct drm_device *dev, bool force_on)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int ret;
++	unsigned long flags;
++
++	spin_lock_irqsave(&power_ctrl_lock, flags);
++	/* Power already on ? */
++	if (dev_priv->display_power) {
++		dev_priv->display_count++;
++		pm_runtime_get(&dev->pdev->dev);
++		spin_unlock_irqrestore(&power_ctrl_lock, flags);
++		return true;
++	}
++	if (force_on == false)
++		goto out_false;
++
++	/* Ok power up needed */
++	ret = gma_resume_pci(dev->pdev);
++	if (ret == 0) {
++		psb_irq_preinstall(dev);
++		psb_irq_postinstall(dev);
++		pm_runtime_get(&dev->pdev->dev);
++		dev_priv->display_count++;
++		spin_unlock_irqrestore(&power_ctrl_lock, flags);
++		return true;
++	}
++out_false:
++	spin_unlock_irqrestore(&power_ctrl_lock, flags);
++	return false;
++}
++
++/**
++ *	gma_power_end		-	end use of power
++ *	@dev: Our DRM device
++ *
++ *	Indicate that one of our gma_power_begin() requested periods when
++ *	the diplay island power is needed has completed.
++ */
++void gma_power_end(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	unsigned long flags;
++	spin_lock_irqsave(&power_ctrl_lock, flags);
++	dev_priv->display_count--;
++	WARN_ON(dev_priv->display_count < 0);
++	spin_unlock_irqrestore(&power_ctrl_lock, flags);
++	pm_runtime_put(&dev->pdev->dev);
++}
++
++int psb_runtime_suspend(struct device *dev)
++{
++	return gma_power_suspend(dev);
++}
++
++int psb_runtime_resume(struct device *dev)
++{
++	return gma_power_resume(dev);
++}
++
++int psb_runtime_idle(struct device *dev)
++{
++	struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
++	struct drm_psb_private *dev_priv = drmdev->dev_private;
++	if (dev_priv->display_count)
++		return 0;
++	else
++		return 1;
++}
+diff --git a/drivers/gpu/drm/gma500/power.h b/drivers/gpu/drm/gma500/power.h
+new file mode 100644
+index 0000000..1969d2e
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/power.h
+@@ -0,0 +1,67 @@
++/**************************************************************************
++ * Copyright (c) 2009-2011, Intel Corporation.
++ * All Rights Reserved.
++
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * Authors:
++ *    Benjamin Defnet <benjamin.r.defnet at intel.com>
++ *    Rajesh Poornachandran <rajesh.poornachandran at intel.com>
++ * Massively reworked
++ *    Alan Cox <alan at linux.intel.com>
++ */
++#ifndef _PSB_POWERMGMT_H_
++#define _PSB_POWERMGMT_H_
++
++#include <linux/pci.h>
++#include <drm/drmP.h>
++
++void gma_power_init(struct drm_device *dev);
++void gma_power_uninit(struct drm_device *dev);
++
++/*
++ * The kernel bus power management  will call these functions
++ */
++int gma_power_suspend(struct device *dev);
++int gma_power_resume(struct device *dev);
++
++/*
++ * These are the functions the driver should use to wrap all hw access
++ * (i.e. register reads and writes)
++ */
++bool gma_power_begin(struct drm_device *dev, bool force);
++void gma_power_end(struct drm_device *dev);
++
++/*
++ * Use this function to do an instantaneous check for if the hw is on.
++ * Only use this in cases where you know the mutex is already held such
++ * as in irq install/uninstall and you need to
++ * prevent a deadlock situation.  Otherwise use gma_power_begin().
++ */
++bool gma_power_is_on(struct drm_device *dev);
++
++/*
++ * GFX-Runtime PM callbacks
++ */
++int psb_runtime_suspend(struct device *dev);
++int psb_runtime_resume(struct device *dev);
++int psb_runtime_idle(struct device *dev);
++
++#endif /*_PSB_POWERMGMT_H_*/
+diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
+new file mode 100644
+index 0000000..328a193
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_device.c
+@@ -0,0 +1,332 @@
++/**************************************************************************
++ * Copyright (c) 2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <linux/backlight.h>
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "gma_drm.h"
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "intel_bios.h"
++
++
++static int psb_output_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	psb_intel_lvds_init(dev, &dev_priv->mode_dev);
++	psb_intel_sdvo_init(dev, SDVOB);
++	return 0;
++}
++
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++
++/*
++ *	Poulsbo Backlight Interfaces
++ */
++
++#define BLC_PWM_PRECISION_FACTOR 100	/* 10000000 */
++#define BLC_PWM_FREQ_CALC_CONSTANT 32
++#define MHz 1000000
++
++#define PSB_BLC_PWM_PRECISION_FACTOR    10
++#define PSB_BLC_MAX_PWM_REG_FREQ        0xFFFE
++#define PSB_BLC_MIN_PWM_REG_FREQ        0x2
++
++#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
++
++static int psb_brightness;
++static struct backlight_device *psb_backlight_device;
++
++static int psb_get_brightness(struct backlight_device *bd)
++{
++	/* return locally cached var instead of HW read (due to DPST etc.) */
++	/* FIXME: ideally return actual value in case firmware fiddled with
++	   it */
++	return psb_brightness;
++}
++
++
++static int psb_backlight_setup(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	unsigned long core_clock;
++	/* u32 bl_max_freq; */
++	/* unsigned long value; */
++	u16 bl_max_freq;
++	uint32_t value;
++	uint32_t blc_pwm_precision_factor;
++
++	/* get bl_max_freq and pol from dev_priv*/
++	if (!dev_priv->lvds_bl) {
++		dev_err(dev->dev, "Has no valid LVDS backlight info\n");
++		return -ENOENT;
++	}
++	bl_max_freq = dev_priv->lvds_bl->freq;
++	blc_pwm_precision_factor = PSB_BLC_PWM_PRECISION_FACTOR;
++
++	core_clock = dev_priv->core_freq;
++
++	value = (core_clock * MHz) / BLC_PWM_FREQ_CALC_CONSTANT;
++	value *= blc_pwm_precision_factor;
++	value /= bl_max_freq;
++	value /= blc_pwm_precision_factor;
++
++	if (value > (unsigned long long)PSB_BLC_MAX_PWM_REG_FREQ ||
++		 value < (unsigned long long)PSB_BLC_MIN_PWM_REG_FREQ)
++				return -ERANGE;
++	else {
++		value &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
++		REG_WRITE(BLC_PWM_CTL,
++			(value << PSB_BACKLIGHT_PWM_CTL_SHIFT) | (value));
++	}
++	return 0;
++}
++
++static int psb_set_brightness(struct backlight_device *bd)
++{
++	struct drm_device *dev = bl_get_data(psb_backlight_device);
++	int level = bd->props.brightness;
++
++	/* Percentage 1-100% being valid */
++	if (level < 1)
++		level = 1;
++
++	psb_intel_lvds_set_brightness(dev, level);
++	psb_brightness = level;
++	return 0;
++}
++
++static const struct backlight_ops psb_ops = {
++	.get_brightness = psb_get_brightness,
++	.update_status  = psb_set_brightness,
++};
++
++static int psb_backlight_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	int ret;
++	struct backlight_properties props;
++
++	memset(&props, 0, sizeof(struct backlight_properties));
++	props.max_brightness = 100;
++	props.type = BACKLIGHT_PLATFORM;
++
++	psb_backlight_device = backlight_device_register("psb-bl",
++					NULL, (void *)dev, &psb_ops, &props);
++	if (IS_ERR(psb_backlight_device))
++		return PTR_ERR(psb_backlight_device);
++
++	ret = psb_backlight_setup(dev);
++	if (ret < 0) {
++		backlight_device_unregister(psb_backlight_device);
++		psb_backlight_device = NULL;
++		return ret;
++	}
++	psb_backlight_device->props.brightness = 100;
++	psb_backlight_device->props.max_brightness = 100;
++	backlight_update_status(psb_backlight_device);
++	dev_priv->backlight_device = psb_backlight_device;
++	return 0;
++}
++
++#endif
++
++/*
++ *	Provide the Poulsbo specific chip logic and low level methods
++ *	for power management
++ */
++
++static void psb_init_pm(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
++	gating &= ~3;	/* Disable 2D clock gating */
++	gating |= 1;
++	PSB_WSGX32(gating, PSB_CR_CLKGATECTL);
++	PSB_RSGX32(PSB_CR_CLKGATECTL);
++}
++
++/**
++ *	psb_save_display_registers	-	save registers lost on suspend
++ *	@dev: our DRM device
++ *
++ *	Save the state we need in order to be able to restore the interface
++ *	upon resume from suspend
++ */
++static int psb_save_display_registers(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct drm_crtc *crtc;
++	struct drm_connector *connector;
++	struct psb_state *regs = &dev_priv->regs.psb;
++
++	/* Display arbitration control + watermarks */
++	regs->saveDSPARB = PSB_RVDC32(DSPARB);
++	regs->saveDSPFW1 = PSB_RVDC32(DSPFW1);
++	regs->saveDSPFW2 = PSB_RVDC32(DSPFW2);
++	regs->saveDSPFW3 = PSB_RVDC32(DSPFW3);
++	regs->saveDSPFW4 = PSB_RVDC32(DSPFW4);
++	regs->saveDSPFW5 = PSB_RVDC32(DSPFW5);
++	regs->saveDSPFW6 = PSB_RVDC32(DSPFW6);
++	regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
++
++	/* Save crtc and output state */
++	mutex_lock(&dev->mode_config.mutex);
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		if (drm_helper_crtc_in_use(crtc))
++			crtc->funcs->save(crtc);
++	}
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		if (connector->funcs->save)
++			connector->funcs->save(connector);
++
++	mutex_unlock(&dev->mode_config.mutex);
++	return 0;
++}
++
++/**
++ *	psb_restore_display_registers	-	restore lost register state
++ *	@dev: our DRM device
++ *
++ *	Restore register state that was lost during suspend and resume.
++ */
++static int psb_restore_display_registers(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct drm_crtc *crtc;
++	struct drm_connector *connector;
++	struct psb_state *regs = &dev_priv->regs.psb;
++
++	/* Display arbitration + watermarks */
++	PSB_WVDC32(regs->saveDSPARB, DSPARB);
++	PSB_WVDC32(regs->saveDSPFW1, DSPFW1);
++	PSB_WVDC32(regs->saveDSPFW2, DSPFW2);
++	PSB_WVDC32(regs->saveDSPFW3, DSPFW3);
++	PSB_WVDC32(regs->saveDSPFW4, DSPFW4);
++	PSB_WVDC32(regs->saveDSPFW5, DSPFW5);
++	PSB_WVDC32(regs->saveDSPFW6, DSPFW6);
++	PSB_WVDC32(regs->saveCHICKENBIT, DSPCHICKENBIT);
++
++	/*make sure VGA plane is off. it initializes to on after reset!*/
++	PSB_WVDC32(0x80000000, VGACNTRL);
++
++	mutex_lock(&dev->mode_config.mutex);
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++		if (drm_helper_crtc_in_use(crtc))
++			crtc->funcs->restore(crtc);
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
++		if (connector->funcs->restore)
++			connector->funcs->restore(connector);
++
++	mutex_unlock(&dev->mode_config.mutex);
++	return 0;
++}
++
++static int psb_power_down(struct drm_device *dev)
++{
++	return 0;
++}
++
++static int psb_power_up(struct drm_device *dev)
++{
++	return 0;
++}
++
++static void psb_get_core_freq(struct drm_device *dev)
++{
++	uint32_t clock;
++	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
++	/*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
++
++	pci_write_config_dword(pci_root, 0xD0, 0xD0050300);
++	pci_read_config_dword(pci_root, 0xD4, &clock);
++	pci_dev_put(pci_root);
++
++	switch (clock & 0x07) {
++	case 0:
++		dev_priv->core_freq = 100;
++		break;
++	case 1:
++		dev_priv->core_freq = 133;
++		break;
++	case 2:
++		dev_priv->core_freq = 150;
++		break;
++	case 3:
++		dev_priv->core_freq = 178;
++		break;
++	case 4:
++		dev_priv->core_freq = 200;
++		break;
++	case 5:
++	case 6:
++	case 7:
++		dev_priv->core_freq = 266;
++	default:
++		dev_priv->core_freq = 0;
++	}
++}
++
++static int psb_chip_setup(struct drm_device *dev)
++{
++	psb_get_core_freq(dev);
++	gma_intel_setup_gmbus(dev);
++	gma_intel_opregion_init(dev);
++	psb_intel_init_bios(dev);
++	return 0;
++}
++
++static void psb_chip_teardown(struct drm_device *dev)
++{
++	gma_intel_teardown_gmbus(dev);
++}
++
++const struct psb_ops psb_chip_ops = {
++	.name = "Poulsbo",
++	.accel_2d = 1,
++	.pipes = 2,
++	.crtcs = 2,
++	.sgx_offset = PSB_SGX_OFFSET,
++	.chip_setup = psb_chip_setup,
++	.chip_teardown = psb_chip_teardown,
++
++	.crtc_helper = &psb_intel_helper_funcs,
++	.crtc_funcs = &psb_intel_crtc_funcs,
++
++	.output_init = psb_output_init,
++
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++	.backlight_init = psb_backlight_init,
++#endif
++
++	.init_pm = psb_init_pm,
++	.save_regs = psb_save_display_registers,
++	.restore_regs = psb_restore_display_registers,
++	.power_down = psb_power_down,
++	.power_up = psb_power_up,
++};
++
+diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
+new file mode 100644
+index 0000000..09af2ff
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_drv.c
+@@ -0,0 +1,706 @@
++/**************************************************************************
++ * Copyright (c) 2007-2011, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include <drm/drm.h>
++#include "gma_drm.h"
++#include "psb_drv.h"
++#include "framebuffer.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "intel_bios.h"
++#include "mid_bios.h"
++#include <drm/drm_pciids.h>
++#include "power.h"
++#include <linux/cpu.h>
++#include <linux/notifier.h>
++#include <linux/spinlock.h>
++#include <linux/pm_runtime.h>
++#include <acpi/video.h>
++#include <linux/module.h>
++
++static int drm_psb_trap_pagefaults;
++
++static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
++
++MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
++module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
++
++
++static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
++	{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
++	{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
++#if defined(CONFIG_DRM_GMA600)
++	{ 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
++	{ 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
++	{ 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
++	{ 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
++	{ 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
++	{ 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
++	{ 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
++	{ 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
++	/* Atom E620 */
++	{ 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
++#endif
++#if defined(CONFIG_DRM_MEDFIELD)
++	{0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
++	{0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
++	{0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
++	{0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
++	{0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
++	{0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
++	{0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
++	{0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
++#endif
++#if defined(CONFIG_DRM_GMA3600)
++	{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
++	{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
++	{ 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
++	{ 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
++	{ 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
++	{ 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
++	{ 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
++	{ 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
++#endif
++	{ 0, }
++};
++MODULE_DEVICE_TABLE(pci, pciidlist);
++
++/*
++ * Standard IOCTLs.
++ */
++
++#define DRM_IOCTL_GMA_ADB	\
++		DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
++#define DRM_IOCTL_GMA_MODE_OPERATION	\
++		DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
++			 struct drm_psb_mode_operation_arg)
++#define DRM_IOCTL_GMA_STOLEN_MEMORY	\
++		DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
++			 struct drm_psb_stolen_memory_arg)
++#define DRM_IOCTL_GMA_GAMMA	\
++		DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
++			 struct drm_psb_dpst_lut_arg)
++#define DRM_IOCTL_GMA_DPST_BL	\
++		DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
++			 uint32_t)
++#define DRM_IOCTL_GMA_GET_PIPE_FROM_CRTC_ID	\
++		DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
++			 struct drm_psb_get_pipe_from_crtc_id_arg)
++#define DRM_IOCTL_GMA_GEM_CREATE	\
++		DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
++			 struct drm_psb_gem_create)
++#define DRM_IOCTL_GMA_GEM_MMAP	\
++		DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
++			 struct drm_psb_gem_mmap)
++
++static int psb_adb_ioctl(struct drm_device *dev, void *data,
++			 struct drm_file *file_priv);
++static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
++				    struct drm_file *file_priv);
++static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
++				   struct drm_file *file_priv);
++static int psb_gamma_ioctl(struct drm_device *dev, void *data,
++			   struct drm_file *file_priv);
++static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
++			     struct drm_file *file_priv);
++
++static struct drm_ioctl_desc psb_ioctls[] = {
++	DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
++	DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
++		      DRM_AUTH),
++	DRM_IOCTL_DEF_DRV(GMA_STOLEN_MEMORY, psb_stolen_memory_ioctl,
++		      DRM_AUTH),
++	DRM_IOCTL_DEF_DRV(GMA_GAMMA, psb_gamma_ioctl, DRM_AUTH),
++	DRM_IOCTL_DEF_DRV(GMA_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
++	DRM_IOCTL_DEF_DRV(GMA_GET_PIPE_FROM_CRTC_ID,
++					psb_intel_get_pipe_from_crtc_id, 0),
++	DRM_IOCTL_DEF_DRV(GMA_GEM_CREATE, psb_gem_create_ioctl,
++						DRM_UNLOCKED | DRM_AUTH),
++	DRM_IOCTL_DEF_DRV(GMA_GEM_MMAP, psb_gem_mmap_ioctl,
++						DRM_UNLOCKED | DRM_AUTH),
++};
++
++static void psb_lastclose(struct drm_device *dev)
++{
++	return;
++}
++
++static void psb_do_takedown(struct drm_device *dev)
++{
++}
++
++static int psb_do_init(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_gtt *pg = &dev_priv->gtt;
++
++	uint32_t stolen_gtt;
++
++	int ret = -ENOMEM;
++
++	if (pg->mmu_gatt_start & 0x0FFFFFFF) {
++		dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
++		ret = -EINVAL;
++		goto out_err;
++	}
++
++
++	stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
++	stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	stolen_gtt =
++	    (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
++
++	dev_priv->gatt_free_offset = pg->mmu_gatt_start +
++	    (stolen_gtt << PAGE_SHIFT) * 1024;
++
++	if (1 || drm_debug) {
++		uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
++		uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
++		DRM_INFO("SGX core id = 0x%08x\n", core_id);
++		DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
++			 (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
++			 _PSB_CC_REVISION_MAJOR_SHIFT,
++			 (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
++			 _PSB_CC_REVISION_MINOR_SHIFT);
++		DRM_INFO
++		    ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
++		     (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
++		     _PSB_CC_REVISION_MAINTENANCE_SHIFT,
++		     (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
++		     _PSB_CC_REVISION_DESIGNER_SHIFT);
++	}
++
++
++	spin_lock_init(&dev_priv->irqmask_lock);
++	spin_lock_init(&dev_priv->lock_2d);
++
++	PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
++	PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
++	PSB_RSGX32(PSB_CR_BIF_BANK1);
++	PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
++							PSB_CR_BIF_CTRL);
++	psb_spank(dev_priv);
++
++	/* mmu_gatt ?? */
++	PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
++	return 0;
++out_err:
++	psb_do_takedown(dev);
++	return ret;
++}
++
++static int psb_driver_unload(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	/* Kill vblank etc here */
++
++	gma_backlight_exit(dev);
++
++	psb_modeset_cleanup(dev);
++
++	if (dev_priv) {
++		psb_lid_timer_takedown(dev_priv);
++		gma_intel_opregion_exit(dev);
++
++		if (dev_priv->ops->chip_teardown)
++			dev_priv->ops->chip_teardown(dev);
++		psb_do_takedown(dev);
++
++
++		if (dev_priv->pf_pd) {
++			psb_mmu_free_pagedir(dev_priv->pf_pd);
++			dev_priv->pf_pd = NULL;
++		}
++		if (dev_priv->mmu) {
++			struct psb_gtt *pg = &dev_priv->gtt;
++
++			down_read(&pg->sem);
++			psb_mmu_remove_pfn_sequence(
++				psb_mmu_get_default_pd
++				(dev_priv->mmu),
++				pg->mmu_gatt_start,
++				dev_priv->vram_stolen_size >> PAGE_SHIFT);
++			up_read(&pg->sem);
++			psb_mmu_driver_takedown(dev_priv->mmu);
++			dev_priv->mmu = NULL;
++		}
++		psb_gtt_takedown(dev);
++		if (dev_priv->scratch_page) {
++			__free_page(dev_priv->scratch_page);
++			dev_priv->scratch_page = NULL;
++		}
++		if (dev_priv->vdc_reg) {
++			iounmap(dev_priv->vdc_reg);
++			dev_priv->vdc_reg = NULL;
++		}
++		if (dev_priv->sgx_reg) {
++			iounmap(dev_priv->sgx_reg);
++			dev_priv->sgx_reg = NULL;
++		}
++
++		kfree(dev_priv);
++		dev->dev_private = NULL;
++
++		/*destroy VBT data*/
++		psb_intel_destroy_bios(dev);
++	}
++
++	gma_power_uninit(dev);
++
++	return 0;
++}
++
++
++static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++	struct drm_psb_private *dev_priv;
++	unsigned long resource_start;
++	unsigned long irqflags;
++	int ret = -ENOMEM;
++	struct drm_connector *connector;
++	struct psb_intel_encoder *psb_intel_encoder;
++
++	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
++	if (dev_priv == NULL)
++		return -ENOMEM;
++
++	dev_priv->ops = (struct psb_ops *)chipset;
++	dev_priv->dev = dev;
++	dev->dev_private = (void *) dev_priv;
++
++	pci_set_master(dev->pdev);
++
++	if (!IS_PSB(dev)) {
++		if (pci_enable_msi(dev->pdev))
++			dev_warn(dev->dev, "Enabling MSI failed!\n");
++	}
++
++	dev_priv->num_pipe = dev_priv->ops->pipes;
++
++	resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
++
++	dev_priv->vdc_reg =
++	    ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
++	if (!dev_priv->vdc_reg)
++		goto out_err;
++
++	dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
++							PSB_SGX_SIZE);
++	if (!dev_priv->sgx_reg)
++		goto out_err;
++
++	ret = dev_priv->ops->chip_setup(dev);
++	if (ret)
++		goto out_err;
++
++	/* Init OSPM support */
++	gma_power_init(dev);
++
++	ret = -ENOMEM;
++
++	dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
++	if (!dev_priv->scratch_page)
++		goto out_err;
++
++	set_pages_uc(dev_priv->scratch_page, 1);
++
++	ret = psb_gtt_init(dev, 0);
++	if (ret)
++		goto out_err;
++
++	dev_priv->mmu = psb_mmu_driver_init((void *)0,
++					drm_psb_trap_pagefaults, 0,
++					dev_priv);
++	if (!dev_priv->mmu)
++		goto out_err;
++
++	dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
++	if (!dev_priv->pf_pd)
++		goto out_err;
++
++	psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
++	psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
++
++	ret = psb_do_init(dev);
++	if (ret)
++		return ret;
++
++	PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
++	PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
++
++/*	igd_opregion_init(&dev_priv->opregion_dev); */
++/*	acpi_video_register(); */
++	if (dev_priv->lid_state)
++		psb_lid_timer_init(dev_priv);
++
++	ret = drm_vblank_init(dev, dev_priv->num_pipe);
++	if (ret)
++		goto out_err;
++
++	/*
++	 * Install interrupt handlers prior to powering off SGX or else we will
++	 * crash.
++	 */
++	dev_priv->vdc_irq_mask = 0;
++	dev_priv->pipestat[0] = 0;
++	dev_priv->pipestat[1] = 0;
++	dev_priv->pipestat[2] = 0;
++	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++	PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
++	PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
++	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++	if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
++		drm_irq_install(dev);
++
++	dev->vblank_disable_allowed = 1;
++
++	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
++
++	dev->driver->get_vblank_counter = psb_get_vblank_counter;
++
++	psb_modeset_init(dev);
++	psb_fbdev_init(dev);
++	drm_kms_helper_poll_init(dev);
++
++	/* Only add backlight support if we have LVDS output */
++	list_for_each_entry(connector, &dev->mode_config.connector_list,
++			    head) {
++		psb_intel_encoder = psb_intel_attached_encoder(connector);
++
++		switch (psb_intel_encoder->type) {
++		case INTEL_OUTPUT_LVDS:
++		case INTEL_OUTPUT_MIPI:
++			ret = gma_backlight_init(dev);
++			break;
++		}
++	}
++
++	if (ret)
++		return ret;
++#if 0
++	/*enable runtime pm at last*/
++	pm_runtime_enable(&dev->pdev->dev);
++	pm_runtime_set_active(&dev->pdev->dev);
++#endif
++	/*Intel drm driver load is done, continue doing pvr load*/
++	return 0;
++out_err:
++	psb_driver_unload(dev);
++	return ret;
++}
++
++static int psb_driver_device_is_agp(struct drm_device *dev)
++{
++	return 0;
++}
++
++static inline void get_brightness(struct backlight_device *bd)
++{
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++	if (bd) {
++		bd->props.brightness = bd->ops->get_brightness(bd);
++		backlight_update_status(bd);
++	}
++#endif
++}
++
++static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
++		       struct drm_file *file_priv)
++{
++	struct drm_psb_private *dev_priv = psb_priv(dev);
++	uint32_t *arg = data;
++
++	dev_priv->blc_adj2 = *arg;
++	get_brightness(dev_priv->backlight_device);
++	return 0;
++}
++
++static int psb_adb_ioctl(struct drm_device *dev, void *data,
++			struct drm_file *file_priv)
++{
++	struct drm_psb_private *dev_priv = psb_priv(dev);
++	uint32_t *arg = data;
++
++	dev_priv->blc_adj1 = *arg;
++	get_brightness(dev_priv->backlight_device);
++	return 0;
++}
++
++static int psb_gamma_ioctl(struct drm_device *dev, void *data,
++			   struct drm_file *file_priv)
++{
++	struct drm_psb_dpst_lut_arg *lut_arg = data;
++	struct drm_mode_object *obj;
++	struct drm_crtc *crtc;
++	struct drm_connector *connector;
++	struct psb_intel_crtc *psb_intel_crtc;
++	int i = 0;
++	int32_t obj_id;
++
++	obj_id = lut_arg->output_id;
++	obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
++	if (!obj) {
++		dev_dbg(dev->dev, "Invalid Connector object.\n");
++		return -EINVAL;
++	}
++
++	connector = obj_to_connector(obj);
++	crtc = connector->encoder->crtc;
++	psb_intel_crtc = to_psb_intel_crtc(crtc);
++
++	for (i = 0; i < 256; i++)
++		psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
++
++	psb_intel_crtc_load_lut(crtc);
++
++	return 0;
++}
++
++static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
++				struct drm_file *file_priv)
++{
++	uint32_t obj_id;
++	uint16_t op;
++	struct drm_mode_modeinfo *umode;
++	struct drm_display_mode *mode = NULL;
++	struct drm_psb_mode_operation_arg *arg;
++	struct drm_mode_object *obj;
++	struct drm_connector *connector;
++	struct drm_connector_helper_funcs *connector_funcs;
++	int ret = 0;
++	int resp = MODE_OK;
++
++	arg = (struct drm_psb_mode_operation_arg *)data;
++	obj_id = arg->obj_id;
++	op = arg->operation;
++
++	switch (op) {
++	case PSB_MODE_OPERATION_MODE_VALID:
++		umode = &arg->mode;
++
++		mutex_lock(&dev->mode_config.mutex);
++
++		obj = drm_mode_object_find(dev, obj_id,
++					DRM_MODE_OBJECT_CONNECTOR);
++		if (!obj) {
++			ret = -EINVAL;
++			goto mode_op_out;
++		}
++
++		connector = obj_to_connector(obj);
++
++		mode = drm_mode_create(dev);
++		if (!mode) {
++			ret = -ENOMEM;
++			goto mode_op_out;
++		}
++
++		/* drm_crtc_convert_umode(mode, umode); */
++		{
++			mode->clock = umode->clock;
++			mode->hdisplay = umode->hdisplay;
++			mode->hsync_start = umode->hsync_start;
++			mode->hsync_end = umode->hsync_end;
++			mode->htotal = umode->htotal;
++			mode->hskew = umode->hskew;
++			mode->vdisplay = umode->vdisplay;
++			mode->vsync_start = umode->vsync_start;
++			mode->vsync_end = umode->vsync_end;
++			mode->vtotal = umode->vtotal;
++			mode->vscan = umode->vscan;
++			mode->vrefresh = umode->vrefresh;
++			mode->flags = umode->flags;
++			mode->type = umode->type;
++			strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
++			mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
++		}
++
++		connector_funcs = (struct drm_connector_helper_funcs *)
++				   connector->helper_private;
++
++		if (connector_funcs->mode_valid) {
++			resp = connector_funcs->mode_valid(connector, mode);
++			arg->data = resp;
++		}
++
++		/*do some clean up work*/
++		if (mode)
++			drm_mode_destroy(dev, mode);
++mode_op_out:
++		mutex_unlock(&dev->mode_config.mutex);
++		return ret;
++
++	default:
++		dev_dbg(dev->dev, "Unsupported psb mode operation\n");
++		return -EOPNOTSUPP;
++	}
++
++	return 0;
++}
++
++static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
++				   struct drm_file *file_priv)
++{
++	struct drm_psb_private *dev_priv = psb_priv(dev);
++	struct drm_psb_stolen_memory_arg *arg = data;
++
++	arg->base = dev_priv->stolen_base;
++	arg->size = dev_priv->vram_stolen_size;
++
++	return 0;
++}
++
++static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
++{
++	return 0;
++}
++
++static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
++{
++}
++
++static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
++			       unsigned long arg)
++{
++	struct drm_file *file_priv = filp->private_data;
++	struct drm_device *dev = file_priv->minor->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	static unsigned int runtime_allowed;
++
++	if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
++		runtime_allowed++;
++		pm_runtime_allow(&dev->pdev->dev);
++		dev_priv->rpm_enabled = 1;
++	}
++	return drm_ioctl(filp, cmd, arg);
++	/* FIXME: do we need to wrap the other side of this */
++}
++
++
++/* When a client dies:
++ *    - Check for and clean up flipped page state
++ */
++static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
++{
++}
++
++static void psb_remove(struct pci_dev *pdev)
++{
++	struct drm_device *dev = pci_get_drvdata(pdev);
++	drm_put_dev(dev);
++}
++
++static const struct dev_pm_ops psb_pm_ops = {
++	.resume = gma_power_resume,
++	.suspend = gma_power_suspend,
++	.runtime_suspend = psb_runtime_suspend,
++	.runtime_resume = psb_runtime_resume,
++	.runtime_idle = psb_runtime_idle,
++};
++
++static struct vm_operations_struct psb_gem_vm_ops = {
++	.fault = psb_gem_fault,
++	.open = drm_gem_vm_open,
++	.close = drm_gem_vm_close,
++};
++
++static const struct file_operations psb_gem_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = psb_unlocked_ioctl,
++	.mmap = drm_gem_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++	.read = drm_read,
++};
++
++static struct drm_driver driver = {
++	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
++			   DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
++	.load = psb_driver_load,
++	.unload = psb_driver_unload,
++
++	.ioctls = psb_ioctls,
++	.num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
++	.device_is_agp = psb_driver_device_is_agp,
++	.irq_preinstall = psb_irq_preinstall,
++	.irq_postinstall = psb_irq_postinstall,
++	.irq_uninstall = psb_irq_uninstall,
++	.irq_handler = psb_irq_handler,
++	.enable_vblank = psb_enable_vblank,
++	.disable_vblank = psb_disable_vblank,
++	.get_vblank_counter = psb_get_vblank_counter,
++	.lastclose = psb_lastclose,
++	.open = psb_driver_open,
++	.preclose = psb_driver_preclose,
++	.postclose = psb_driver_close,
++	.reclaim_buffers = drm_core_reclaim_buffers,
++
++	.gem_init_object = psb_gem_init_object,
++	.gem_free_object = psb_gem_free_object,
++	.gem_vm_ops = &psb_gem_vm_ops,
++	.dumb_create = psb_gem_dumb_create,
++	.dumb_map_offset = psb_gem_dumb_map_gtt,
++	.dumb_destroy = psb_gem_dumb_destroy,
++	.fops = &psb_gem_fops,
++	.name = DRIVER_NAME,
++	.desc = DRIVER_DESC,
++	.date = PSB_DRM_DRIVER_DATE,
++	.major = PSB_DRM_DRIVER_MAJOR,
++	.minor = PSB_DRM_DRIVER_MINOR,
++	.patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
++};
++
++static struct pci_driver psb_pci_driver = {
++	.name = DRIVER_NAME,
++	.id_table = pciidlist,
++	.probe = psb_probe,
++	.remove = psb_remove,
++	.driver = {
++		.pm = &psb_pm_ops,
++	}
++};
++
++static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++	return drm_get_pci_dev(pdev, ent, &driver);
++}
++
++static int __init psb_init(void)
++{
++	return drm_pci_init(&driver, &psb_pci_driver);
++}
++
++static void __exit psb_exit(void)
++{
++	drm_pci_exit(&driver, &psb_pci_driver);
++}
++
++late_initcall(psb_init);
++module_exit(psb_exit);
++
++MODULE_AUTHOR("Alan Cox <alan at linux.intel.com> and others");
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL");
+diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
+new file mode 100644
+index 0000000..40ce2c9
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_drv.h
+@@ -0,0 +1,993 @@
++/**************************************************************************
++ * Copyright (c) 2007-2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_DRV_H_
++#define _PSB_DRV_H_
++
++#include <linux/kref.h>
++
++#include <drm/drmP.h>
++#include "drm_global.h"
++#include "gem_glue.h"
++#include "gma_drm.h"
++#include "psb_reg.h"
++#include "psb_intel_drv.h"
++#include "gtt.h"
++#include "power.h"
++#include "oaktrail.h"
++
++/* Append new drm mode definition here, align with libdrm definition */
++#define DRM_MODE_SCALE_NO_SCALE   	2
++
++enum {
++	CHIP_PSB_8108 = 0,		/* Poulsbo */
++	CHIP_PSB_8109 = 1,		/* Poulsbo */
++	CHIP_MRST_4100 = 2,		/* Moorestown/Oaktrail */
++	CHIP_MFLD_0130 = 3,		/* Medfield */
++};
++
++#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
++#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
++#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
++
++/*
++ * Driver definitions
++ */
++
++#define DRIVER_NAME "gma500"
++#define DRIVER_DESC "DRM driver for the Intel GMA500"
++
++#define PSB_DRM_DRIVER_DATE "2011-06-06"
++#define PSB_DRM_DRIVER_MAJOR 1
++#define PSB_DRM_DRIVER_MINOR 0
++#define PSB_DRM_DRIVER_PATCHLEVEL 0
++
++/*
++ *	Hardware offsets
++ */
++#define PSB_VDC_OFFSET		 0x00000000
++#define PSB_VDC_SIZE		 0x000080000
++#define MRST_MMIO_SIZE		 0x0000C0000
++#define MDFLD_MMIO_SIZE          0x000100000
++#define PSB_SGX_SIZE		 0x8000
++#define PSB_SGX_OFFSET		 0x00040000
++#define MRST_SGX_OFFSET		 0x00080000
++/*
++ *	PCI resource identifiers
++ */
++#define PSB_MMIO_RESOURCE	 0
++#define PSB_GATT_RESOURCE	 2
++#define PSB_GTT_RESOURCE	 3
++/*
++ *	PCI configuration
++ */
++#define PSB_GMCH_CTRL		 0x52
++#define PSB_BSM			 0x5C
++#define _PSB_GMCH_ENABLED	 0x4
++#define PSB_PGETBL_CTL		 0x2020
++#define _PSB_PGETBL_ENABLED	 0x00000001
++#define PSB_SGX_2D_SLAVE_PORT	 0x4000
++
++/* To get rid of */
++#define PSB_TT_PRIV0_LIMIT	 (256*1024*1024)
++#define PSB_TT_PRIV0_PLIMIT	 (PSB_TT_PRIV0_LIMIT >> PAGE_SHIFT)
++
++/*
++ *	SGX side MMU definitions (these can probably go)
++ */
++
++/*
++ *	Flags for external memory type field.
++ */
++#define PSB_MMU_CACHED_MEMORY	  0x0001	/* Bind to MMU only */
++#define PSB_MMU_RO_MEMORY	  0x0002	/* MMU RO memory */
++#define PSB_MMU_WO_MEMORY	  0x0004	/* MMU WO memory */
++/*
++ *	PTE's and PDE's
++ */
++#define PSB_PDE_MASK		  0x003FFFFF
++#define PSB_PDE_SHIFT		  22
++#define PSB_PTE_SHIFT		  12
++/*
++ *	Cache control
++ */
++#define PSB_PTE_VALID		  0x0001	/* PTE / PDE valid */
++#define PSB_PTE_WO		  0x0002	/* Write only */
++#define PSB_PTE_RO		  0x0004	/* Read only */
++#define PSB_PTE_CACHED		  0x0008	/* CPU cache coherent */
++
++/*
++ *	VDC registers and bits
++ */
++#define PSB_MSVDX_CLOCKGATING	  0x2064
++#define PSB_TOPAZ_CLOCKGATING	  0x2068
++#define PSB_HWSTAM		  0x2098
++#define PSB_INSTPM		  0x20C0
++#define PSB_INT_IDENTITY_R        0x20A4
++#define _MDFLD_PIPEC_EVENT_FLAG   (1<<2)
++#define _MDFLD_PIPEC_VBLANK_FLAG  (1<<3)
++#define _PSB_DPST_PIPEB_FLAG      (1<<4)
++#define _MDFLD_PIPEB_EVENT_FLAG   (1<<4)
++#define _PSB_VSYNC_PIPEB_FLAG	  (1<<5)
++#define _PSB_DPST_PIPEA_FLAG      (1<<6)
++#define _PSB_PIPEA_EVENT_FLAG     (1<<6)
++#define _PSB_VSYNC_PIPEA_FLAG	  (1<<7)
++#define _MDFLD_MIPIA_FLAG	  (1<<16)
++#define _MDFLD_MIPIC_FLAG	  (1<<17)
++#define _PSB_IRQ_SGX_FLAG	  (1<<18)
++#define _PSB_IRQ_MSVDX_FLAG	  (1<<19)
++#define _LNC_IRQ_TOPAZ_FLAG	  (1<<20)
++
++#define _PSB_PIPE_EVENT_FLAG	(_PSB_VSYNC_PIPEA_FLAG | \
++				 _PSB_VSYNC_PIPEB_FLAG)
++
++/* This flag includes all the display IRQ bits excepts the vblank irqs. */
++#define _MDFLD_DISP_ALL_IRQ_FLAG (_MDFLD_PIPEC_EVENT_FLAG | \
++				  _MDFLD_PIPEB_EVENT_FLAG | \
++				  _PSB_PIPEA_EVENT_FLAG | \
++				  _PSB_VSYNC_PIPEA_FLAG | \
++				  _MDFLD_MIPIA_FLAG | \
++				  _MDFLD_MIPIC_FLAG)
++#define PSB_INT_IDENTITY_R	  0x20A4
++#define PSB_INT_MASK_R		  0x20A8
++#define PSB_INT_ENABLE_R	  0x20A0
++
++#define _PSB_MMU_ER_MASK      0x0001FF00
++#define _PSB_MMU_ER_HOST      (1 << 16)
++#define GPIOA			0x5010
++#define GPIOB			0x5014
++#define GPIOC			0x5018
++#define GPIOD			0x501c
++#define GPIOE			0x5020
++#define GPIOF			0x5024
++#define GPIOG			0x5028
++#define GPIOH			0x502c
++#define GPIO_CLOCK_DIR_MASK		(1 << 0)
++#define GPIO_CLOCK_DIR_IN		(0 << 1)
++#define GPIO_CLOCK_DIR_OUT		(1 << 1)
++#define GPIO_CLOCK_VAL_MASK		(1 << 2)
++#define GPIO_CLOCK_VAL_OUT		(1 << 3)
++#define GPIO_CLOCK_VAL_IN		(1 << 4)
++#define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
++#define GPIO_DATA_DIR_MASK		(1 << 8)
++#define GPIO_DATA_DIR_IN		(0 << 9)
++#define GPIO_DATA_DIR_OUT		(1 << 9)
++#define GPIO_DATA_VAL_MASK		(1 << 10)
++#define GPIO_DATA_VAL_OUT		(1 << 11)
++#define GPIO_DATA_VAL_IN		(1 << 12)
++#define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
++
++#define VCLK_DIVISOR_VGA0   0x6000
++#define VCLK_DIVISOR_VGA1   0x6004
++#define VCLK_POST_DIV	    0x6010
++
++#define PSB_COMM_2D (PSB_ENGINE_2D << 4)
++#define PSB_COMM_3D (PSB_ENGINE_3D << 4)
++#define PSB_COMM_TA (PSB_ENGINE_TA << 4)
++#define PSB_COMM_HP (PSB_ENGINE_HP << 4)
++#define PSB_COMM_USER_IRQ (1024 >> 2)
++#define PSB_COMM_USER_IRQ_LOST (PSB_COMM_USER_IRQ + 1)
++#define PSB_COMM_FW (2048 >> 2)
++
++#define PSB_UIRQ_VISTEST	       1
++#define PSB_UIRQ_OOM_REPLY	       2
++#define PSB_UIRQ_FIRE_TA_REPLY	       3
++#define PSB_UIRQ_FIRE_RASTER_REPLY     4
++
++#define PSB_2D_SIZE (256*1024*1024)
++#define PSB_MAX_RELOC_PAGES 1024
++
++#define PSB_LOW_REG_OFFS 0x0204
++#define PSB_HIGH_REG_OFFS 0x0600
++
++#define PSB_NUM_VBLANKS 2
++
++
++#define PSB_2D_SIZE (256*1024*1024)
++#define PSB_MAX_RELOC_PAGES 1024
++
++#define PSB_LOW_REG_OFFS 0x0204
++#define PSB_HIGH_REG_OFFS 0x0600
++
++#define PSB_NUM_VBLANKS 2
++#define PSB_WATCHDOG_DELAY (DRM_HZ * 2)
++#define PSB_LID_DELAY (DRM_HZ / 10)
++
++#define MDFLD_PNW_B0 0x04
++#define MDFLD_PNW_C0 0x08
++
++#define MDFLD_DSR_2D_3D_0 	(1 << 0)
++#define MDFLD_DSR_2D_3D_2 	(1 << 1)
++#define MDFLD_DSR_CURSOR_0 	(1 << 2)
++#define MDFLD_DSR_CURSOR_2	(1 << 3)
++#define MDFLD_DSR_OVERLAY_0 	(1 << 4)
++#define MDFLD_DSR_OVERLAY_2 	(1 << 5)
++#define MDFLD_DSR_MIPI_CONTROL	(1 << 6)
++#define MDFLD_DSR_DAMAGE_MASK_0	((1 << 0) | (1 << 2) | (1 << 4))
++#define MDFLD_DSR_DAMAGE_MASK_2	((1 << 1) | (1 << 3) | (1 << 5))
++#define MDFLD_DSR_2D_3D 	(MDFLD_DSR_2D_3D_0 | MDFLD_DSR_2D_3D_2)
++
++#define MDFLD_DSR_RR		45
++#define MDFLD_DPU_ENABLE 	(1 << 31)
++#define MDFLD_DSR_FULLSCREEN 	(1 << 30)
++#define MDFLD_DSR_DELAY		(DRM_HZ / MDFLD_DSR_RR)
++
++#define PSB_PWR_STATE_ON		1
++#define PSB_PWR_STATE_OFF		2
++
++#define PSB_PMPOLICY_NOPM		0
++#define PSB_PMPOLICY_CLOCKGATING	1
++#define PSB_PMPOLICY_POWERDOWN		2
++
++#define PSB_PMSTATE_POWERUP		0
++#define PSB_PMSTATE_CLOCKGATED		1
++#define PSB_PMSTATE_POWERDOWN		2
++#define PSB_PCIx_MSI_ADDR_LOC		0x94
++#define PSB_PCIx_MSI_DATA_LOC		0x98
++
++/* Medfield crystal settings */
++#define KSEL_CRYSTAL_19 1
++#define KSEL_BYPASS_19 5
++#define KSEL_BYPASS_25 6
++#define KSEL_BYPASS_83_100 7
++
++struct opregion_header;
++struct opregion_acpi;
++struct opregion_swsci;
++struct opregion_asle;
++
++struct psb_intel_opregion {
++	struct opregion_header *header;
++	struct opregion_acpi *acpi;
++	struct opregion_swsci *swsci;
++	struct opregion_asle *asle;
++	int enabled;
++};
++
++struct sdvo_device_mapping {
++	u8 initialized;
++	u8 dvo_port;
++	u8 slave_addr;
++	u8 dvo_wiring;
++	u8 i2c_pin;
++	u8 i2c_speed;
++	u8 ddc_pin;
++};
++
++struct intel_gmbus {
++	struct i2c_adapter adapter;
++	struct i2c_adapter *force_bit;
++	u32 reg0;
++};
++
++/*
++ *	Register save state. This is used to hold the context when the
++ *	device is powered off. In the case of Oaktrail this can (but does not
++ *	yet) include screen blank. Operations occuring during the save
++ *	update the register cache instead.
++ */
++struct psb_state {
++	uint32_t saveDSPACNTR;
++	uint32_t saveDSPBCNTR;
++	uint32_t savePIPEACONF;
++	uint32_t savePIPEBCONF;
++	uint32_t savePIPEASRC;
++	uint32_t savePIPEBSRC;
++	uint32_t saveFPA0;
++	uint32_t saveFPA1;
++	uint32_t saveDPLL_A;
++	uint32_t saveDPLL_A_MD;
++	uint32_t saveHTOTAL_A;
++	uint32_t saveHBLANK_A;
++	uint32_t saveHSYNC_A;
++	uint32_t saveVTOTAL_A;
++	uint32_t saveVBLANK_A;
++	uint32_t saveVSYNC_A;
++	uint32_t saveDSPASTRIDE;
++	uint32_t saveDSPASIZE;
++	uint32_t saveDSPAPOS;
++	uint32_t saveDSPABASE;
++	uint32_t saveDSPASURF;
++	uint32_t saveDSPASTATUS;
++	uint32_t saveFPB0;
++	uint32_t saveFPB1;
++	uint32_t saveDPLL_B;
++	uint32_t saveDPLL_B_MD;
++	uint32_t saveHTOTAL_B;
++	uint32_t saveHBLANK_B;
++	uint32_t saveHSYNC_B;
++	uint32_t saveVTOTAL_B;
++	uint32_t saveVBLANK_B;
++	uint32_t saveVSYNC_B;
++	uint32_t saveDSPBSTRIDE;
++	uint32_t saveDSPBSIZE;
++	uint32_t saveDSPBPOS;
++	uint32_t saveDSPBBASE;
++	uint32_t saveDSPBSURF;
++	uint32_t saveDSPBSTATUS;
++	uint32_t saveVCLK_DIVISOR_VGA0;
++	uint32_t saveVCLK_DIVISOR_VGA1;
++	uint32_t saveVCLK_POST_DIV;
++	uint32_t saveVGACNTRL;
++	uint32_t saveADPA;
++	uint32_t saveLVDS;
++	uint32_t saveDVOA;
++	uint32_t saveDVOB;
++	uint32_t saveDVOC;
++	uint32_t savePP_ON;
++	uint32_t savePP_OFF;
++	uint32_t savePP_CONTROL;
++	uint32_t savePP_CYCLE;
++	uint32_t savePFIT_CONTROL;
++	uint32_t savePaletteA[256];
++	uint32_t savePaletteB[256];
++	uint32_t saveCLOCKGATING;
++	uint32_t saveDSPARB;
++	uint32_t saveDSPATILEOFF;
++	uint32_t saveDSPBTILEOFF;
++	uint32_t saveDSPAADDR;
++	uint32_t saveDSPBADDR;
++	uint32_t savePFIT_AUTO_RATIOS;
++	uint32_t savePFIT_PGM_RATIOS;
++	uint32_t savePP_ON_DELAYS;
++	uint32_t savePP_OFF_DELAYS;
++	uint32_t savePP_DIVISOR;
++	uint32_t saveBCLRPAT_A;
++	uint32_t saveBCLRPAT_B;
++	uint32_t saveDSPALINOFF;
++	uint32_t saveDSPBLINOFF;
++	uint32_t savePERF_MODE;
++	uint32_t saveDSPFW1;
++	uint32_t saveDSPFW2;
++	uint32_t saveDSPFW3;
++	uint32_t saveDSPFW4;
++	uint32_t saveDSPFW5;
++	uint32_t saveDSPFW6;
++	uint32_t saveCHICKENBIT;
++	uint32_t saveDSPACURSOR_CTRL;
++	uint32_t saveDSPBCURSOR_CTRL;
++	uint32_t saveDSPACURSOR_BASE;
++	uint32_t saveDSPBCURSOR_BASE;
++	uint32_t saveDSPACURSOR_POS;
++	uint32_t saveDSPBCURSOR_POS;
++	uint32_t save_palette_a[256];
++	uint32_t save_palette_b[256];
++	uint32_t saveOV_OVADD;
++	uint32_t saveOV_OGAMC0;
++	uint32_t saveOV_OGAMC1;
++	uint32_t saveOV_OGAMC2;
++	uint32_t saveOV_OGAMC3;
++	uint32_t saveOV_OGAMC4;
++	uint32_t saveOV_OGAMC5;
++	uint32_t saveOVC_OVADD;
++	uint32_t saveOVC_OGAMC0;
++	uint32_t saveOVC_OGAMC1;
++	uint32_t saveOVC_OGAMC2;
++	uint32_t saveOVC_OGAMC3;
++	uint32_t saveOVC_OGAMC4;
++	uint32_t saveOVC_OGAMC5;
++
++	/* DPST register save */
++	uint32_t saveHISTOGRAM_INT_CONTROL_REG;
++	uint32_t saveHISTOGRAM_LOGIC_CONTROL_REG;
++	uint32_t savePWM_CONTROL_LOGIC;
++};
++
++struct medfield_state {
++	uint32_t saveDPLL_A;
++	uint32_t saveFPA0;
++	uint32_t savePIPEACONF;
++	uint32_t saveHTOTAL_A;
++	uint32_t saveHBLANK_A;
++	uint32_t saveHSYNC_A;
++	uint32_t saveVTOTAL_A;
++	uint32_t saveVBLANK_A;
++	uint32_t saveVSYNC_A;
++	uint32_t savePIPEASRC;
++	uint32_t saveDSPASTRIDE;
++	uint32_t saveDSPALINOFF;
++	uint32_t saveDSPATILEOFF;
++	uint32_t saveDSPASIZE;
++	uint32_t saveDSPAPOS;
++	uint32_t saveDSPASURF;
++	uint32_t saveDSPACNTR;
++	uint32_t saveDSPASTATUS;
++	uint32_t save_palette_a[256];
++	uint32_t saveMIPI;
++
++	uint32_t saveDPLL_B;
++	uint32_t saveFPB0;
++	uint32_t savePIPEBCONF;
++	uint32_t saveHTOTAL_B;
++	uint32_t saveHBLANK_B;
++	uint32_t saveHSYNC_B;
++	uint32_t saveVTOTAL_B;
++	uint32_t saveVBLANK_B;
++	uint32_t saveVSYNC_B;
++	uint32_t savePIPEBSRC;
++	uint32_t saveDSPBSTRIDE;
++	uint32_t saveDSPBLINOFF;
++	uint32_t saveDSPBTILEOFF;
++	uint32_t saveDSPBSIZE;
++	uint32_t saveDSPBPOS;
++	uint32_t saveDSPBSURF;
++	uint32_t saveDSPBCNTR;
++	uint32_t saveDSPBSTATUS;
++	uint32_t save_palette_b[256];
++
++	uint32_t savePIPECCONF;
++	uint32_t saveHTOTAL_C;
++	uint32_t saveHBLANK_C;
++	uint32_t saveHSYNC_C;
++	uint32_t saveVTOTAL_C;
++	uint32_t saveVBLANK_C;
++	uint32_t saveVSYNC_C;
++	uint32_t savePIPECSRC;
++	uint32_t saveDSPCSTRIDE;
++	uint32_t saveDSPCLINOFF;
++	uint32_t saveDSPCTILEOFF;
++	uint32_t saveDSPCSIZE;
++	uint32_t saveDSPCPOS;
++	uint32_t saveDSPCSURF;
++	uint32_t saveDSPCCNTR;
++	uint32_t saveDSPCSTATUS;
++	uint32_t save_palette_c[256];
++	uint32_t saveMIPI_C;
++
++	uint32_t savePFIT_CONTROL;
++	uint32_t savePFIT_PGM_RATIOS;
++	uint32_t saveHDMIPHYMISCCTL;
++	uint32_t saveHDMIB_CONTROL;
++};
++
++struct cdv_state {
++	uint32_t saveDSPCLK_GATE_D;
++	uint32_t saveRAMCLK_GATE_D;
++	uint32_t saveDSPARB;
++	uint32_t saveDSPFW[6];
++	uint32_t saveADPA;
++	uint32_t savePP_CONTROL;
++	uint32_t savePFIT_PGM_RATIOS;
++	uint32_t saveLVDS;
++	uint32_t savePFIT_CONTROL;
++	uint32_t savePP_ON_DELAYS;
++	uint32_t savePP_OFF_DELAYS;
++	uint32_t savePP_CYCLE;
++	uint32_t saveVGACNTRL;
++	uint32_t saveIER;
++	uint32_t saveIMR;
++	u8	 saveLBB;
++};
++
++struct psb_save_area {
++	uint32_t saveBSM;
++	uint32_t saveVBT;
++	union {
++	        struct psb_state psb;
++		struct medfield_state mdfld;
++		struct cdv_state cdv;
++	};
++	uint32_t saveBLC_PWM_CTL2;
++	uint32_t saveBLC_PWM_CTL;
++};
++
++struct psb_ops;
++
++#define PSB_NUM_PIPE		3
++
++struct drm_psb_private {
++	struct drm_device *dev;
++	const struct psb_ops *ops;
++
++	struct psb_gtt gtt;
++
++	/* GTT Memory manager */
++	struct psb_gtt_mm *gtt_mm;
++	struct page *scratch_page;
++	u32 *gtt_map;
++	uint32_t stolen_base;
++	void *vram_addr;
++	unsigned long vram_stolen_size;
++	int gtt_initialized;
++	u16 gmch_ctrl;		/* Saved GTT setup */
++	u32 pge_ctl;
++
++	struct mutex gtt_mutex;
++	struct resource *gtt_mem;	/* Our PCI resource */
++
++	struct psb_mmu_driver *mmu;
++	struct psb_mmu_pd *pf_pd;
++
++	/*
++	 * Register base
++	 */
++
++	uint8_t *sgx_reg;
++	uint8_t *vdc_reg;
++	uint32_t gatt_free_offset;
++
++	/*
++	 * Fencing / irq.
++	 */
++
++	uint32_t vdc_irq_mask;
++	uint32_t pipestat[PSB_NUM_PIPE];
++
++	spinlock_t irqmask_lock;
++
++	/*
++	 * Power
++	 */
++
++	bool suspended;
++	bool display_power;
++	int display_count;
++
++	/*
++	 * Modesetting
++	 */
++	struct psb_intel_mode_device mode_dev;
++
++	struct drm_crtc *plane_to_crtc_mapping[PSB_NUM_PIPE];
++	struct drm_crtc *pipe_to_crtc_mapping[PSB_NUM_PIPE];
++	uint32_t num_pipe;
++
++	/*
++	 * OSPM info (Power management base) (can go ?)
++	 */
++	uint32_t ospm_base;
++
++	/*
++	 * Sizes info
++	 */
++
++	u32 fuse_reg_value;
++	u32 video_device_fuse;
++
++	/* PCI revision ID for B0:D2:F0 */
++	uint8_t platform_rev_id;
++
++	/* gmbus */
++	struct intel_gmbus *gmbus;
++
++	/* Used by SDVO */
++	int crt_ddc_pin;
++	/* FIXME: The mappings should be parsed from bios but for now we can
++		  pretend there are no mappings available */
++	struct sdvo_device_mapping sdvo_mappings[2];
++	u32 hotplug_supported_mask;
++	struct drm_property *broadcast_rgb_property;
++	struct drm_property *force_audio_property;
++
++	/*
++	 * LVDS info
++	 */
++	int backlight_duty_cycle;	/* restore backlight to this value */
++	bool panel_wants_dither;
++	struct drm_display_mode *panel_fixed_mode;
++	struct drm_display_mode *lfp_lvds_vbt_mode;
++	struct drm_display_mode *sdvo_lvds_vbt_mode;
++
++	struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
++	struct psb_intel_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
++
++	/* Feature bits from the VBIOS */
++	unsigned int int_tv_support:1;
++	unsigned int lvds_dither:1;
++	unsigned int lvds_vbt:1;
++	unsigned int int_crt_support:1;
++	unsigned int lvds_use_ssc:1;
++	int lvds_ssc_freq;
++	bool is_lvds_on;
++	bool is_mipi_on;
++	u32 mipi_ctrl_display;
++
++	unsigned int core_freq;
++	uint32_t iLVDS_enable;
++
++	/* Runtime PM state */
++	int rpm_enabled;
++
++	/* MID specific */
++	struct oaktrail_vbt vbt_data;
++	struct oaktrail_gct_data gct_data;
++
++	/* Oaktrail HDMI state */
++	struct oaktrail_hdmi_dev *hdmi_priv;
++	
++	/*
++	 * Register state
++	 */
++
++	struct psb_save_area regs;
++
++	/* MSI reg save */
++	uint32_t msi_addr;
++	uint32_t msi_data;
++
++
++	/*
++	 * LID-Switch
++	 */
++	spinlock_t lid_lock;
++	struct timer_list lid_timer;
++	struct psb_intel_opregion opregion;
++	u32 *lid_state;
++	u32 lid_last_state;
++
++	/*
++	 * Watchdog
++	 */
++
++	uint32_t apm_reg;
++	uint16_t apm_base;
++
++	/*
++	 * Used for modifying backlight from
++	 * xrandr -- consider removing and using HAL instead
++	 */
++	struct backlight_device *backlight_device;
++	struct drm_property *backlight_property;
++	uint32_t blc_adj1;
++	uint32_t blc_adj2;
++
++	void *fbdev;
++
++	/* 2D acceleration */
++	spinlock_t lock_2d;
++
++	/*
++	 * Panel brightness
++	 */
++	int brightness;
++	int brightness_adjusted;
++
++	bool dsr_enable;
++	u32 dsr_fb_update;
++	bool dpi_panel_on[3];
++	void *dsi_configs[2];
++	u32 bpp;
++	u32 bpp2;
++
++	u32 pipeconf[3];
++	u32 dspcntr[3];
++
++	int mdfld_panel_id;
++};
++
++
++/*
++ *	Operations for each board type
++ */
++ 
++struct psb_ops {
++	const char *name;
++	unsigned int accel_2d:1;
++	int pipes;		/* Number of output pipes */
++	int crtcs;		/* Number of CRTCs */
++	int sgx_offset;		/* Base offset of SGX device */
++
++	/* Sub functions */
++	struct drm_crtc_helper_funcs const *crtc_helper;
++	struct drm_crtc_funcs const *crtc_funcs;
++
++	/* Setup hooks */
++	int (*chip_setup)(struct drm_device *dev);
++	void (*chip_teardown)(struct drm_device *dev);
++
++	/* Display management hooks */
++	int (*output_init)(struct drm_device *dev);
++	/* Power management hooks */
++	void (*init_pm)(struct drm_device *dev);
++	int (*save_regs)(struct drm_device *dev);
++	int (*restore_regs)(struct drm_device *dev);
++	int (*power_up)(struct drm_device *dev);
++	int (*power_down)(struct drm_device *dev);
++
++	void (*lvds_bl_power)(struct drm_device *dev, bool on);
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++	/* Backlight */
++	int (*backlight_init)(struct drm_device *dev);
++#endif
++	int i2c_bus;		/* I2C bus identifier for Moorestown */
++};
++
++
++
++struct psb_mmu_driver;
++
++extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
++extern int drm_pick_crtcs(struct drm_device *dev);
++
++static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
++{
++	return (struct drm_psb_private *) dev->dev_private;
++}
++
++/*
++ * MMU stuff.
++ */
++
++extern struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
++					int trap_pagefaults,
++					int invalid_type,
++					struct drm_psb_private *dev_priv);
++extern void psb_mmu_driver_takedown(struct psb_mmu_driver *driver);
++extern struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver
++						 *driver);
++extern void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset,
++			       uint32_t gtt_start, uint32_t gtt_pages);
++extern struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver,
++					   int trap_pagefaults,
++					   int invalid_type);
++extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
++extern void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot);
++extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
++					unsigned long address,
++					uint32_t num_pages);
++extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
++				       uint32_t start_pfn,
++				       unsigned long address,
++				       uint32_t num_pages, int type);
++extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
++				  unsigned long *pfn);
++
++/*
++ * Enable / disable MMU for different requestors.
++ */
++
++
++extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
++extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
++				unsigned long address, uint32_t num_pages,
++				uint32_t desired_tile_stride,
++				uint32_t hw_tile_stride, int type);
++extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
++				 unsigned long address, uint32_t num_pages,
++				 uint32_t desired_tile_stride,
++				 uint32_t hw_tile_stride);
++/*
++ *psb_irq.c
++ */
++
++extern irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
++extern int psb_irq_enable_dpst(struct drm_device *dev);
++extern int psb_irq_disable_dpst(struct drm_device *dev);
++extern void psb_irq_preinstall(struct drm_device *dev);
++extern int psb_irq_postinstall(struct drm_device *dev);
++extern void psb_irq_uninstall(struct drm_device *dev);
++extern void psb_irq_turn_on_dpst(struct drm_device *dev);
++extern void psb_irq_turn_off_dpst(struct drm_device *dev);
++
++extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
++extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
++extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
++extern int psb_enable_vblank(struct drm_device *dev, int crtc);
++extern void psb_disable_vblank(struct drm_device *dev, int crtc);
++void
++psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
++
++void
++psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
++
++extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
++
++/*
++ * intel_opregion.c
++ */
++extern int gma_intel_opregion_init(struct drm_device *dev);
++extern int gma_intel_opregion_exit(struct drm_device *dev);
++
++/*
++ * framebuffer.c
++ */
++extern int psbfb_probed(struct drm_device *dev);
++extern int psbfb_remove(struct drm_device *dev,
++			struct drm_framebuffer *fb);
++/*
++ * accel_2d.c
++ */
++extern void psbfb_copyarea(struct fb_info *info,
++					const struct fb_copyarea *region);
++extern int psbfb_sync(struct fb_info *info);
++extern void psb_spank(struct drm_psb_private *dev_priv);
++
++/*
++ * psb_reset.c
++ */
++
++extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
++extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
++extern void psb_print_pagefault(struct drm_psb_private *dev_priv);
++
++/* modesetting */
++extern void psb_modeset_init(struct drm_device *dev);
++extern void psb_modeset_cleanup(struct drm_device *dev);
++extern int psb_fbdev_init(struct drm_device *dev);
++
++/* backlight.c */
++int gma_backlight_init(struct drm_device *dev);
++void gma_backlight_exit(struct drm_device *dev);
++
++/* oaktrail_crtc.c */
++extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
++
++/* oaktrail_lvds.c */
++extern void oaktrail_lvds_init(struct drm_device *dev,
++		    struct psb_intel_mode_device *mode_dev);
++
++/* psb_intel_display.c */
++extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
++extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
++
++/* psb_intel_lvds.c */
++extern const struct drm_connector_helper_funcs
++					psb_intel_lvds_connector_helper_funcs;
++extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
++
++/* gem.c */
++extern int psb_gem_init_object(struct drm_gem_object *obj);
++extern void psb_gem_free_object(struct drm_gem_object *obj);
++extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
++			struct drm_file *file);
++extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
++			struct drm_mode_create_dumb *args);
++extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
++			uint32_t handle);
++extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
++			uint32_t handle, uint64_t *offset);
++extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
++extern int psb_gem_create_ioctl(struct drm_device *dev, void *data,
++			struct drm_file *file);
++extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
++					struct drm_file *file);
++
++/* psb_device.c */
++extern const struct psb_ops psb_chip_ops;
++
++/* oaktrail_device.c */
++extern const struct psb_ops oaktrail_chip_ops;
++
++/* mdlfd_device.c */
++extern const struct psb_ops mdfld_chip_ops;
++
++/* cdv_device.c */
++extern const struct psb_ops cdv_chip_ops;
++
++/*
++ * Debug print bits setting
++ */
++#define PSB_D_GENERAL (1 << 0)
++#define PSB_D_INIT    (1 << 1)
++#define PSB_D_IRQ     (1 << 2)
++#define PSB_D_ENTRY   (1 << 3)
++/* debug the get H/V BP/FP count */
++#define PSB_D_HV      (1 << 4)
++#define PSB_D_DBI_BF  (1 << 5)
++#define PSB_D_PM      (1 << 6)
++#define PSB_D_RENDER  (1 << 7)
++#define PSB_D_REG     (1 << 8)
++#define PSB_D_MSVDX   (1 << 9)
++#define PSB_D_TOPAZ   (1 << 10)
++
++extern int drm_psb_no_fb;
++extern int drm_idle_check_interval;
++
++/*
++ *	Utilities
++ */
++
++static inline u32 MRST_MSG_READ32(uint port, uint offset)
++{
++	int mcr = (0xD0<<24) | (port << 16) | (offset << 8);
++	uint32_t ret_val = 0;
++	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++	pci_write_config_dword(pci_root, 0xD0, mcr);
++	pci_read_config_dword(pci_root, 0xD4, &ret_val);
++	pci_dev_put(pci_root);
++	return ret_val;
++}
++static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value)
++{
++	int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0;
++	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++	pci_write_config_dword(pci_root, 0xD4, value);
++	pci_write_config_dword(pci_root, 0xD0, mcr);
++	pci_dev_put(pci_root);
++}
++static inline u32 MDFLD_MSG_READ32(uint port, uint offset)
++{
++	int mcr = (0x10<<24) | (port << 16) | (offset << 8);
++	uint32_t ret_val = 0;
++	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++	pci_write_config_dword(pci_root, 0xD0, mcr);
++	pci_read_config_dword(pci_root, 0xD4, &ret_val);
++	pci_dev_put(pci_root);
++	return ret_val;
++}
++static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value)
++{
++	int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0;
++	struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0);
++	pci_write_config_dword(pci_root, 0xD4, value);
++	pci_write_config_dword(pci_root, 0xD0, mcr);
++	pci_dev_put(pci_root);
++}
++
++static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	return ioread32(dev_priv->vdc_reg + reg);
++}
++
++#define REG_READ(reg)	       REGISTER_READ(dev, (reg))
++
++static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
++				      uint32_t val)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	iowrite32((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE(reg, val)	REGISTER_WRITE(dev, (reg), (val))
++
++static inline void REGISTER_WRITE16(struct drm_device *dev,
++					uint32_t reg, uint32_t val)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	iowrite16((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE16(reg, val)	  REGISTER_WRITE16(dev, (reg), (val))
++
++static inline void REGISTER_WRITE8(struct drm_device *dev,
++				       uint32_t reg, uint32_t val)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	iowrite8((val), dev_priv->vdc_reg + (reg));
++}
++
++#define REG_WRITE8(reg, val)		REGISTER_WRITE8(dev, (reg), (val))
++
++#define PSB_WVDC32(_val, _offs)		iowrite32(_val, dev_priv->vdc_reg + (_offs))
++#define PSB_RVDC32(_offs)		ioread32(dev_priv->vdc_reg + (_offs))
++
++/* #define TRAP_SGX_PM_FAULT 1 */
++#ifdef TRAP_SGX_PM_FAULT
++#define PSB_RSGX32(_offs)						\
++({									\
++	if (inl(dev_priv->apm_base + PSB_APM_STS) & 0x3) {		\
++		printk(KERN_ERR						\
++			"access sgx when it's off!! (READ) %s, %d\n",	\
++	       __FILE__, __LINE__);					\
++		melay(1000);						\
++	}								\
++	ioread32(dev_priv->sgx_reg + (_offs));				\
++})
++#else
++#define PSB_RSGX32(_offs)		ioread32(dev_priv->sgx_reg + (_offs))
++#endif
++#define PSB_WSGX32(_val, _offs)		iowrite32(_val, dev_priv->sgx_reg + (_offs))
++
++#define MSVDX_REG_DUMP 0
++
++#define PSB_WMSVDX32(_val, _offs)	iowrite32(_val, dev_priv->msvdx_reg + (_offs))
++#define PSB_RMSVDX32(_offs)		ioread32(dev_priv->msvdx_reg + (_offs))
++
++#endif
+diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
+new file mode 100644
+index 0000000..2616558
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_intel_display.c
+@@ -0,0 +1,1436 @@
++/*
++ * Copyright © 2006-2011 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ */
++
++#include <linux/i2c.h>
++#include <linux/pm_runtime.h>
++
++#include <drm/drmP.h>
++#include "framebuffer.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "psb_intel_display.h"
++#include "power.h"
++
++struct psb_intel_clock_t {
++	/* given values */
++	int n;
++	int m1, m2;
++	int p1, p2;
++	/* derived values */
++	int dot;
++	int vco;
++	int m;
++	int p;
++};
++
++struct psb_intel_range_t {
++	int min, max;
++};
++
++struct psb_intel_p2_t {
++	int dot_limit;
++	int p2_slow, p2_fast;
++};
++
++#define INTEL_P2_NUM		      2
++
++struct psb_intel_limit_t {
++	struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
++	struct psb_intel_p2_t p2;
++};
++
++#define I8XX_DOT_MIN		  25000
++#define I8XX_DOT_MAX		 350000
++#define I8XX_VCO_MIN		 930000
++#define I8XX_VCO_MAX		1400000
++#define I8XX_N_MIN		      3
++#define I8XX_N_MAX		     16
++#define I8XX_M_MIN		     96
++#define I8XX_M_MAX		    140
++#define I8XX_M1_MIN		     18
++#define I8XX_M1_MAX		     26
++#define I8XX_M2_MIN		      6
++#define I8XX_M2_MAX		     16
++#define I8XX_P_MIN		      4
++#define I8XX_P_MAX		    128
++#define I8XX_P1_MIN		      2
++#define I8XX_P1_MAX		     33
++#define I8XX_P1_LVDS_MIN	      1
++#define I8XX_P1_LVDS_MAX	      6
++#define I8XX_P2_SLOW		      4
++#define I8XX_P2_FAST		      2
++#define I8XX_P2_LVDS_SLOW	      14
++#define I8XX_P2_LVDS_FAST	      14	/* No fast option */
++#define I8XX_P2_SLOW_LIMIT	 165000
++
++#define I9XX_DOT_MIN		  20000
++#define I9XX_DOT_MAX		 400000
++#define I9XX_VCO_MIN		1400000
++#define I9XX_VCO_MAX		2800000
++#define I9XX_N_MIN		      3
++#define I9XX_N_MAX		      8
++#define I9XX_M_MIN		     70
++#define I9XX_M_MAX		    120
++#define I9XX_M1_MIN		     10
++#define I9XX_M1_MAX		     20
++#define I9XX_M2_MIN		      5
++#define I9XX_M2_MAX		      9
++#define I9XX_P_SDVO_DAC_MIN	      5
++#define I9XX_P_SDVO_DAC_MAX	     80
++#define I9XX_P_LVDS_MIN		      7
++#define I9XX_P_LVDS_MAX		     98
++#define I9XX_P1_MIN		      1
++#define I9XX_P1_MAX		      8
++#define I9XX_P2_SDVO_DAC_SLOW		     10
++#define I9XX_P2_SDVO_DAC_FAST		      5
++#define I9XX_P2_SDVO_DAC_SLOW_LIMIT	 200000
++#define I9XX_P2_LVDS_SLOW		     14
++#define I9XX_P2_LVDS_FAST		      7
++#define I9XX_P2_LVDS_SLOW_LIMIT		 112000
++
++#define INTEL_LIMIT_I8XX_DVO_DAC    0
++#define INTEL_LIMIT_I8XX_LVDS	    1
++#define INTEL_LIMIT_I9XX_SDVO_DAC   2
++#define INTEL_LIMIT_I9XX_LVDS	    3
++
++static const struct psb_intel_limit_t psb_intel_limits[] = {
++	{			/* INTEL_LIMIT_I8XX_DVO_DAC */
++	 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
++	 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
++	 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
++	 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
++	 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
++	 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
++	 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
++	 .p1 = {.min = I8XX_P1_MIN, .max = I8XX_P1_MAX},
++	 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
++		.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST},
++	 },
++	{			/* INTEL_LIMIT_I8XX_LVDS */
++	 .dot = {.min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX},
++	 .vco = {.min = I8XX_VCO_MIN, .max = I8XX_VCO_MAX},
++	 .n = {.min = I8XX_N_MIN, .max = I8XX_N_MAX},
++	 .m = {.min = I8XX_M_MIN, .max = I8XX_M_MAX},
++	 .m1 = {.min = I8XX_M1_MIN, .max = I8XX_M1_MAX},
++	 .m2 = {.min = I8XX_M2_MIN, .max = I8XX_M2_MAX},
++	 .p = {.min = I8XX_P_MIN, .max = I8XX_P_MAX},
++	 .p1 = {.min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX},
++	 .p2 = {.dot_limit = I8XX_P2_SLOW_LIMIT,
++		.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST},
++	 },
++	{			/* INTEL_LIMIT_I9XX_SDVO_DAC */
++	 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++	 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
++	 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
++	 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
++	 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
++	 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
++	 .p = {.min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX},
++	 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
++	 .p2 = {.dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
++		.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast =
++		I9XX_P2_SDVO_DAC_FAST},
++	 },
++	{			/* INTEL_LIMIT_I9XX_LVDS */
++	 .dot = {.min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
++	 .vco = {.min = I9XX_VCO_MIN, .max = I9XX_VCO_MAX},
++	 .n = {.min = I9XX_N_MIN, .max = I9XX_N_MAX},
++	 .m = {.min = I9XX_M_MIN, .max = I9XX_M_MAX},
++	 .m1 = {.min = I9XX_M1_MIN, .max = I9XX_M1_MAX},
++	 .m2 = {.min = I9XX_M2_MIN, .max = I9XX_M2_MAX},
++	 .p = {.min = I9XX_P_LVDS_MIN, .max = I9XX_P_LVDS_MAX},
++	 .p1 = {.min = I9XX_P1_MIN, .max = I9XX_P1_MAX},
++	 /* The single-channel range is 25-112Mhz, and dual-channel
++	  * is 80-224Mhz.  Prefer single channel as much as possible.
++	  */
++	 .p2 = {.dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
++		.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST},
++	 },
++};
++
++static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
++{
++	const struct psb_intel_limit_t *limit;
++
++	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
++		limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
++	else
++		limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
++	return limit;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
++
++static void i8xx_clock(int refclk, struct psb_intel_clock_t *clock)
++{
++	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++	clock->p = clock->p1 * clock->p2;
++	clock->vco = refclk * clock->m / (clock->n + 2);
++	clock->dot = clock->vco / clock->p;
++}
++
++/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
++
++static void i9xx_clock(int refclk, struct psb_intel_clock_t *clock)
++{
++	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
++	clock->p = clock->p1 * clock->p2;
++	clock->vco = refclk * clock->m / (clock->n + 2);
++	clock->dot = clock->vco / clock->p;
++}
++
++static void psb_intel_clock(struct drm_device *dev, int refclk,
++			struct psb_intel_clock_t *clock)
++{
++	return i9xx_clock(refclk, clock);
++}
++
++/**
++ * Returns whether any output on the specified pipe is of the specified type
++ */
++bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_mode_config *mode_config = &dev->mode_config;
++	struct drm_connector *l_entry;
++
++	list_for_each_entry(l_entry, &mode_config->connector_list, head) {
++		if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
++			struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(l_entry);
++			if (psb_intel_encoder->type == type)
++				return true;
++		}
++	}
++	return false;
++}
++
++#define INTELPllInvalid(s)   { /* ErrorF (s) */; return false; }
++/**
++ * Returns whether the given set of divisors are valid for a given refclk with
++ * the given connectors.
++ */
++
++static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
++			       struct psb_intel_clock_t *clock)
++{
++	const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
++
++	if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
++		INTELPllInvalid("p1 out of range\n");
++	if (clock->p < limit->p.min || limit->p.max < clock->p)
++		INTELPllInvalid("p out of range\n");
++	if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
++		INTELPllInvalid("m2 out of range\n");
++	if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
++		INTELPllInvalid("m1 out of range\n");
++	if (clock->m1 <= clock->m2)
++		INTELPllInvalid("m1 <= m2\n");
++	if (clock->m < limit->m.min || limit->m.max < clock->m)
++		INTELPllInvalid("m out of range\n");
++	if (clock->n < limit->n.min || limit->n.max < clock->n)
++		INTELPllInvalid("n out of range\n");
++	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
++		INTELPllInvalid("vco out of range\n");
++	/* XXX: We may need to be checking "Dot clock"
++	 * depending on the multiplier, connector, etc.,
++	 * rather than just a single range.
++	 */
++	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
++		INTELPllInvalid("dot out of range\n");
++
++	return true;
++}
++
++/**
++ * Returns a set of divisors for the desired target clock with the given
++ * refclk, or FALSE.  The returned values represent the clock equation:
++ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
++ */
++static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
++				int refclk,
++				struct psb_intel_clock_t *best_clock)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_clock_t clock;
++	const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
++	int err = target;
++
++	if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
++	    (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
++		/*
++		 * For LVDS, if the panel is on, just rely on its current
++		 * settings for dual-channel.  We haven't figured out how to
++		 * reliably set up different single/dual channel state, if we
++		 * even can.
++		 */
++		if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
++		    LVDS_CLKB_POWER_UP)
++			clock.p2 = limit->p2.p2_fast;
++		else
++			clock.p2 = limit->p2.p2_slow;
++	} else {
++		if (target < limit->p2.dot_limit)
++			clock.p2 = limit->p2.p2_slow;
++		else
++			clock.p2 = limit->p2.p2_fast;
++	}
++
++	memset(best_clock, 0, sizeof(*best_clock));
++
++	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
++	     clock.m1++) {
++		for (clock.m2 = limit->m2.min;
++		     clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
++		     clock.m2++) {
++			for (clock.n = limit->n.min;
++			     clock.n <= limit->n.max; clock.n++) {
++				for (clock.p1 = limit->p1.min;
++				     clock.p1 <= limit->p1.max;
++				     clock.p1++) {
++					int this_err;
++
++					psb_intel_clock(dev, refclk, &clock);
++
++					if (!psb_intel_PLL_is_valid
++					    (crtc, &clock))
++						continue;
++
++					this_err = abs(clock.dot - target);
++					if (this_err < err) {
++						*best_clock = clock;
++						err = this_err;
++					}
++				}
++			}
++		}
++	}
++
++	return err != target;
++}
++
++void psb_intel_wait_for_vblank(struct drm_device *dev)
++{
++	/* Wait for 20ms, i.e. one cycle at 50hz. */
++	mdelay(20);
++}
++
++static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
++			    int x, int y, struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	/* struct drm_i915_master_private *master_priv; */
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
++	int pipe = psb_intel_crtc->pipe;
++	unsigned long start, offset;
++	int dspbase = (pipe == 0 ? DSPABASE : DSPBBASE);
++	int dspsurf = (pipe == 0 ? DSPASURF : DSPBSURF);
++	int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
++	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++	u32 dspcntr;
++	int ret = 0;
++
++	if (!gma_power_begin(dev, true))
++		return 0;
++
++	/* no fb bound */
++	if (!crtc->fb) {
++		dev_dbg(dev->dev, "No FB bound\n");
++		goto psb_intel_pipe_cleaner;
++	}
++
++	/* We are displaying this buffer, make sure it is actually loaded
++	   into the GTT */
++	ret = psb_gtt_pin(psbfb->gtt);
++	if (ret < 0)
++		goto psb_intel_pipe_set_base_exit;
++	start = psbfb->gtt->offset;
++
++	offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
++
++	REG_WRITE(dspstride, crtc->fb->pitches[0]);
++
++	dspcntr = REG_READ(dspcntr_reg);
++	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
++
++	switch (crtc->fb->bits_per_pixel) {
++	case 8:
++		dspcntr |= DISPPLANE_8BPP;
++		break;
++	case 16:
++		if (crtc->fb->depth == 15)
++			dspcntr |= DISPPLANE_15_16BPP;
++		else
++			dspcntr |= DISPPLANE_16BPP;
++		break;
++	case 24:
++	case 32:
++		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++		break;
++	default:
++		dev_err(dev->dev, "Unknown color depth\n");
++		ret = -EINVAL;
++		psb_gtt_unpin(psbfb->gtt);
++		goto psb_intel_pipe_set_base_exit;
++	}
++	REG_WRITE(dspcntr_reg, dspcntr);
++
++
++	if (0 /* FIXMEAC - check what PSB needs */) {
++		REG_WRITE(dspbase, offset);
++		REG_READ(dspbase);
++		REG_WRITE(dspsurf, start);
++		REG_READ(dspsurf);
++	} else {
++		REG_WRITE(dspbase, start + offset);
++		REG_READ(dspbase);
++	}
++
++psb_intel_pipe_cleaner:
++	/* If there was a previous display we can now unpin it */
++	if (old_fb)
++		psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
++
++psb_intel_pipe_set_base_exit:
++	gma_power_end(dev);
++	return ret;
++}
++
++/**
++ * Sets the power management mode of the pipe and plane.
++ *
++ * This code should probably grow support for turning the cursor off and back
++ * on appropriately at the same time as we're turning the pipe off/on.
++ */
++static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++	struct drm_device *dev = crtc->dev;
++	/* struct drm_i915_master_private *master_priv; */
++	/* struct drm_i915_private *dev_priv = dev->dev_private; */
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++	int dspbase_reg = (pipe == 0) ? DSPABASE : DSPBBASE;
++	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++	u32 temp;
++
++	/* XXX: When our outputs are all unaware of DPMS modes other than off
++	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
++	 */
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++	case DRM_MODE_DPMS_STANDBY:
++	case DRM_MODE_DPMS_SUSPEND:
++		/* Enable the DPLL */
++		temp = REG_READ(dpll_reg);
++		if ((temp & DPLL_VCO_ENABLE) == 0) {
++			REG_WRITE(dpll_reg, temp);
++			REG_READ(dpll_reg);
++			/* Wait for the clocks to stabilize. */
++			udelay(150);
++			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++			REG_READ(dpll_reg);
++			/* Wait for the clocks to stabilize. */
++			udelay(150);
++			REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
++			REG_READ(dpll_reg);
++			/* Wait for the clocks to stabilize. */
++			udelay(150);
++		}
++
++		/* Enable the pipe */
++		temp = REG_READ(pipeconf_reg);
++		if ((temp & PIPEACONF_ENABLE) == 0)
++			REG_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
++
++		/* Enable the plane */
++		temp = REG_READ(dspcntr_reg);
++		if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
++			REG_WRITE(dspcntr_reg,
++				  temp | DISPLAY_PLANE_ENABLE);
++			/* Flush the plane changes */
++			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++		}
++
++		psb_intel_crtc_load_lut(crtc);
++
++		/* Give the overlay scaler a chance to enable
++		 * if it's on this pipe */
++		/* psb_intel_crtc_dpms_video(crtc, true); TODO */
++		break;
++	case DRM_MODE_DPMS_OFF:
++		/* Give the overlay scaler a chance to disable
++		 * if it's on this pipe */
++		/* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
++
++		/* Disable the VGA plane that we never use */
++		REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
++
++		/* Disable display plane */
++		temp = REG_READ(dspcntr_reg);
++		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
++			REG_WRITE(dspcntr_reg,
++				  temp & ~DISPLAY_PLANE_ENABLE);
++			/* Flush the plane changes */
++			REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
++			REG_READ(dspbase_reg);
++		}
++
++		/* Next, disable display pipes */
++		temp = REG_READ(pipeconf_reg);
++		if ((temp & PIPEACONF_ENABLE) != 0) {
++			REG_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
++			REG_READ(pipeconf_reg);
++		}
++
++		/* Wait for vblank for the disable to take effect. */
++		psb_intel_wait_for_vblank(dev);
++
++		temp = REG_READ(dpll_reg);
++		if ((temp & DPLL_VCO_ENABLE) != 0) {
++			REG_WRITE(dpll_reg, temp & ~DPLL_VCO_ENABLE);
++			REG_READ(dpll_reg);
++		}
++
++		/* Wait for the clocks to turn off. */
++		udelay(150);
++		break;
++	}
++
++	/*Set FIFO Watermarks*/
++	REG_WRITE(DSPARB, 0x3F3E);
++}
++
++static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
++{
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++}
++
++static void psb_intel_crtc_commit(struct drm_crtc *crtc)
++{
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++}
++
++void psb_intel_encoder_prepare(struct drm_encoder *encoder)
++{
++	struct drm_encoder_helper_funcs *encoder_funcs =
++	    encoder->helper_private;
++	/* lvds has its own version of prepare see psb_intel_lvds_prepare */
++	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
++}
++
++void psb_intel_encoder_commit(struct drm_encoder *encoder)
++{
++	struct drm_encoder_helper_funcs *encoder_funcs =
++	    encoder->helper_private;
++	/* lvds has its own version of commit see psb_intel_lvds_commit */
++	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
++}
++
++void psb_intel_encoder_destroy(struct drm_encoder *encoder)
++{
++	struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
++
++	drm_encoder_cleanup(encoder);
++	kfree(intel_encoder);
++}
++
++static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
++				  struct drm_display_mode *mode,
++				  struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++
++/**
++ * Return the pipe currently connected to the panel fitter,
++ * or -1 if the panel fitter is not present or not in use
++ */
++static int psb_intel_panel_fitter_pipe(struct drm_device *dev)
++{
++	u32 pfit_control;
++
++	pfit_control = REG_READ(PFIT_CONTROL);
++
++	/* See if the panel fitter is in use */
++	if ((pfit_control & PFIT_ENABLE) == 0)
++		return -1;
++	/* Must be on PIPE 1 for PSB */
++	return 1;
++}
++
++static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
++			       struct drm_display_mode *mode,
++			       struct drm_display_mode *adjusted_mode,
++			       int x, int y,
++			       struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
++	int pipe = psb_intel_crtc->pipe;
++	int fp_reg = (pipe == 0) ? FPA0 : FPB0;
++	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
++	int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
++	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
++	int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
++	int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
++	int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
++	int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
++	int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
++	int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
++	int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
++	int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
++	int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
++	int refclk;
++	struct psb_intel_clock_t clock;
++	u32 dpll = 0, fp = 0, dspcntr, pipeconf;
++	bool ok, is_sdvo = false;
++	bool is_lvds = false, is_tv = false;
++	struct drm_mode_config *mode_config = &dev->mode_config;
++	struct drm_connector *connector;
++
++	/* No scan out no play */
++	if (crtc->fb == NULL) {
++		crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++		return 0;
++	}
++
++	list_for_each_entry(connector, &mode_config->connector_list, head) {
++		struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++
++		if (!connector->encoder
++		    || connector->encoder->crtc != crtc)
++			continue;
++
++		switch (psb_intel_encoder->type) {
++		case INTEL_OUTPUT_LVDS:
++			is_lvds = true;
++			break;
++		case INTEL_OUTPUT_SDVO:
++			is_sdvo = true;
++			break;
++		case INTEL_OUTPUT_TVOUT:
++			is_tv = true;
++			break;
++		}
++	}
++
++	refclk = 96000;
++
++	ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
++				 &clock);
++	if (!ok) {
++		dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
++		return 0;
++	}
++
++	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
++
++	dpll = DPLL_VGA_MODE_DIS;
++	if (is_lvds) {
++		dpll |= DPLLB_MODE_LVDS;
++		dpll |= DPLL_DVO_HIGH_SPEED;
++	} else
++		dpll |= DPLLB_MODE_DAC_SERIAL;
++	if (is_sdvo) {
++		int sdvo_pixel_multiply =
++			    adjusted_mode->clock / mode->clock;
++		dpll |= DPLL_DVO_HIGH_SPEED;
++		dpll |=
++		    (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
++	}
++
++	/* compute bitmask from p1 value */
++	dpll |= (1 << (clock.p1 - 1)) << 16;
++	switch (clock.p2) {
++	case 5:
++		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
++		break;
++	case 7:
++		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
++		break;
++	case 10:
++		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
++		break;
++	case 14:
++		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
++		break;
++	}
++
++	if (is_tv) {
++		/* XXX: just matching BIOS for now */
++/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
++		dpll |= 3;
++	}
++	dpll |= PLL_REF_INPUT_DREFCLK;
++
++	/* setup pipeconf */
++	pipeconf = REG_READ(pipeconf_reg);
++
++	/* Set up the display plane register */
++	dspcntr = DISPPLANE_GAMMA_ENABLE;
++
++	if (pipe == 0)
++		dspcntr |= DISPPLANE_SEL_PIPE_A;
++	else
++		dspcntr |= DISPPLANE_SEL_PIPE_B;
++
++	dspcntr |= DISPLAY_PLANE_ENABLE;
++	pipeconf |= PIPEACONF_ENABLE;
++	dpll |= DPLL_VCO_ENABLE;
++
++
++	/* Disable the panel fitter if it was on our pipe */
++	if (psb_intel_panel_fitter_pipe(dev) == pipe)
++		REG_WRITE(PFIT_CONTROL, 0);
++
++	drm_mode_debug_printmodeline(mode);
++
++	if (dpll & DPLL_VCO_ENABLE) {
++		REG_WRITE(fp_reg, fp);
++		REG_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
++		REG_READ(dpll_reg);
++		udelay(150);
++	}
++
++	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
++	 * This is an exception to the general rule that mode_set doesn't turn
++	 * things on.
++	 */
++	if (is_lvds) {
++		u32 lvds = REG_READ(LVDS);
++
++		lvds &= ~LVDS_PIPEB_SELECT;
++		if (pipe == 1)
++			lvds |= LVDS_PIPEB_SELECT;
++
++		lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
++		/* Set the B0-B3 data pairs corresponding to
++		 * whether we're going to
++		 * set the DPLLs for dual-channel mode or not.
++		 */
++		lvds &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
++		if (clock.p2 == 7)
++			lvds |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
++
++		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
++		 * appropriately here, but we need to look more
++		 * thoroughly into how panels behave in the two modes.
++		 */
++
++		REG_WRITE(LVDS, lvds);
++		REG_READ(LVDS);
++	}
++
++	REG_WRITE(fp_reg, fp);
++	REG_WRITE(dpll_reg, dpll);
++	REG_READ(dpll_reg);
++	/* Wait for the clocks to stabilize. */
++	udelay(150);
++
++	/* write it again -- the BIOS does, after all */
++	REG_WRITE(dpll_reg, dpll);
++
++	REG_READ(dpll_reg);
++	/* Wait for the clocks to stabilize. */
++	udelay(150);
++
++	REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
++		  ((adjusted_mode->crtc_htotal - 1) << 16));
++	REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
++		  ((adjusted_mode->crtc_hblank_end - 1) << 16));
++	REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
++		  ((adjusted_mode->crtc_hsync_end - 1) << 16));
++	REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
++		  ((adjusted_mode->crtc_vtotal - 1) << 16));
++	REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
++		  ((adjusted_mode->crtc_vblank_end - 1) << 16));
++	REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
++		  ((adjusted_mode->crtc_vsync_end - 1) << 16));
++	/* pipesrc and dspsize control the size that is scaled from,
++	 * which should always be the user's requested size.
++	 */
++	REG_WRITE(dspsize_reg,
++		  ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
++	REG_WRITE(dsppos_reg, 0);
++	REG_WRITE(pipesrc_reg,
++		  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
++	REG_WRITE(pipeconf_reg, pipeconf);
++	REG_READ(pipeconf_reg);
++
++	psb_intel_wait_for_vblank(dev);
++
++	REG_WRITE(dspcntr_reg, dspcntr);
++
++	/* Flush the plane changes */
++	crtc_funcs->mode_set_base(crtc, x, y, old_fb);
++
++	psb_intel_wait_for_vblank(dev);
++
++	return 0;
++}
++
++/** Loads the palette/gamma unit for the CRTC with the prepared values */
++void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_psb_private *dev_priv =
++				(struct drm_psb_private *)dev->dev_private;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int palreg = PALETTE_A;
++	int i;
++
++	/* The clocks have to be on to load the palette. */
++	if (!crtc->enabled)
++		return;
++
++	switch (psb_intel_crtc->pipe) {
++	case 0:
++		break;
++	case 1:
++		palreg = PALETTE_B;
++		break;
++	case 2:
++		palreg = PALETTE_C;
++		break;
++	default:
++		dev_err(dev->dev, "Illegal Pipe Number.\n");
++		return;
++	}
++
++	if (gma_power_begin(dev, false)) {
++		for (i = 0; i < 256; i++) {
++			REG_WRITE(palreg + 4 * i,
++				  ((psb_intel_crtc->lut_r[i] +
++				  psb_intel_crtc->lut_adj[i]) << 16) |
++				  ((psb_intel_crtc->lut_g[i] +
++				  psb_intel_crtc->lut_adj[i]) << 8) |
++				  (psb_intel_crtc->lut_b[i] +
++				  psb_intel_crtc->lut_adj[i]));
++		}
++		gma_power_end(dev);
++	} else {
++		for (i = 0; i < 256; i++) {
++			dev_priv->regs.psb.save_palette_a[i] =
++				  ((psb_intel_crtc->lut_r[i] +
++				  psb_intel_crtc->lut_adj[i]) << 16) |
++				  ((psb_intel_crtc->lut_g[i] +
++				  psb_intel_crtc->lut_adj[i]) << 8) |
++				  (psb_intel_crtc->lut_b[i] +
++				  psb_intel_crtc->lut_adj[i]);
++		}
++
++	}
++}
++
++/**
++ * Save HW states of giving crtc
++ */
++static void psb_intel_crtc_save(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	/* struct drm_psb_private *dev_priv =
++			(struct drm_psb_private *)dev->dev_private; */
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
++	int pipeA = (psb_intel_crtc->pipe == 0);
++	uint32_t paletteReg;
++	int i;
++
++	if (!crtc_state) {
++		dev_err(dev->dev, "No CRTC state found\n");
++		return;
++	}
++
++	crtc_state->saveDSPCNTR = REG_READ(pipeA ? DSPACNTR : DSPBCNTR);
++	crtc_state->savePIPECONF = REG_READ(pipeA ? PIPEACONF : PIPEBCONF);
++	crtc_state->savePIPESRC = REG_READ(pipeA ? PIPEASRC : PIPEBSRC);
++	crtc_state->saveFP0 = REG_READ(pipeA ? FPA0 : FPB0);
++	crtc_state->saveFP1 = REG_READ(pipeA ? FPA1 : FPB1);
++	crtc_state->saveDPLL = REG_READ(pipeA ? DPLL_A : DPLL_B);
++	crtc_state->saveHTOTAL = REG_READ(pipeA ? HTOTAL_A : HTOTAL_B);
++	crtc_state->saveHBLANK = REG_READ(pipeA ? HBLANK_A : HBLANK_B);
++	crtc_state->saveHSYNC = REG_READ(pipeA ? HSYNC_A : HSYNC_B);
++	crtc_state->saveVTOTAL = REG_READ(pipeA ? VTOTAL_A : VTOTAL_B);
++	crtc_state->saveVBLANK = REG_READ(pipeA ? VBLANK_A : VBLANK_B);
++	crtc_state->saveVSYNC = REG_READ(pipeA ? VSYNC_A : VSYNC_B);
++	crtc_state->saveDSPSTRIDE = REG_READ(pipeA ? DSPASTRIDE : DSPBSTRIDE);
++
++	/*NOTE: DSPSIZE DSPPOS only for psb*/
++	crtc_state->saveDSPSIZE = REG_READ(pipeA ? DSPASIZE : DSPBSIZE);
++	crtc_state->saveDSPPOS = REG_READ(pipeA ? DSPAPOS : DSPBPOS);
++
++	crtc_state->saveDSPBASE = REG_READ(pipeA ? DSPABASE : DSPBBASE);
++
++	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
++	for (i = 0; i < 256; ++i)
++		crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
++}
++
++/**
++ * Restore HW states of giving crtc
++ */
++static void psb_intel_crtc_restore(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	/* struct drm_psb_private * dev_priv =
++				(struct drm_psb_private *)dev->dev_private; */
++	struct psb_intel_crtc *psb_intel_crtc =  to_psb_intel_crtc(crtc);
++	struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
++	/* struct drm_crtc_helper_funcs * crtc_funcs = crtc->helper_private; */
++	int pipeA = (psb_intel_crtc->pipe == 0);
++	uint32_t paletteReg;
++	int i;
++
++	if (!crtc_state) {
++		dev_err(dev->dev, "No crtc state\n");
++		return;
++	}
++
++	if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
++		REG_WRITE(pipeA ? DPLL_A : DPLL_B,
++			crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
++		REG_READ(pipeA ? DPLL_A : DPLL_B);
++		udelay(150);
++	}
++
++	REG_WRITE(pipeA ? FPA0 : FPB0, crtc_state->saveFP0);
++	REG_READ(pipeA ? FPA0 : FPB0);
++
++	REG_WRITE(pipeA ? FPA1 : FPB1, crtc_state->saveFP1);
++	REG_READ(pipeA ? FPA1 : FPB1);
++
++	REG_WRITE(pipeA ? DPLL_A : DPLL_B, crtc_state->saveDPLL);
++	REG_READ(pipeA ? DPLL_A : DPLL_B);
++	udelay(150);
++
++	REG_WRITE(pipeA ? HTOTAL_A : HTOTAL_B, crtc_state->saveHTOTAL);
++	REG_WRITE(pipeA ? HBLANK_A : HBLANK_B, crtc_state->saveHBLANK);
++	REG_WRITE(pipeA ? HSYNC_A : HSYNC_B, crtc_state->saveHSYNC);
++	REG_WRITE(pipeA ? VTOTAL_A : VTOTAL_B, crtc_state->saveVTOTAL);
++	REG_WRITE(pipeA ? VBLANK_A : VBLANK_B, crtc_state->saveVBLANK);
++	REG_WRITE(pipeA ? VSYNC_A : VSYNC_B, crtc_state->saveVSYNC);
++	REG_WRITE(pipeA ? DSPASTRIDE : DSPBSTRIDE, crtc_state->saveDSPSTRIDE);
++
++	REG_WRITE(pipeA ? DSPASIZE : DSPBSIZE, crtc_state->saveDSPSIZE);
++	REG_WRITE(pipeA ? DSPAPOS : DSPBPOS, crtc_state->saveDSPPOS);
++
++	REG_WRITE(pipeA ? PIPEASRC : PIPEBSRC, crtc_state->savePIPESRC);
++	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
++	REG_WRITE(pipeA ? PIPEACONF : PIPEBCONF, crtc_state->savePIPECONF);
++
++	psb_intel_wait_for_vblank(dev);
++
++	REG_WRITE(pipeA ? DSPACNTR : DSPBCNTR, crtc_state->saveDSPCNTR);
++	REG_WRITE(pipeA ? DSPABASE : DSPBBASE, crtc_state->saveDSPBASE);
++
++	psb_intel_wait_for_vblank(dev);
++
++	paletteReg = pipeA ? PALETTE_A : PALETTE_B;
++	for (i = 0; i < 256; ++i)
++		REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
++}
++
++static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
++				 struct drm_file *file_priv,
++				 uint32_t handle,
++				 uint32_t width, uint32_t height)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
++	uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
++	uint32_t temp;
++	size_t addr = 0;
++	struct gtt_range *gt;
++	struct drm_gem_object *obj;
++	int ret;
++
++	/* if we want to turn of the cursor ignore width and height */
++	if (!handle) {
++		/* turn off the cursor */
++		temp = CURSOR_MODE_DISABLE;
++
++		if (gma_power_begin(dev, false)) {
++			REG_WRITE(control, temp);
++			REG_WRITE(base, 0);
++			gma_power_end(dev);
++		}
++
++		/* Unpin the old GEM object */
++		if (psb_intel_crtc->cursor_obj) {
++			gt = container_of(psb_intel_crtc->cursor_obj,
++							struct gtt_range, gem);
++			psb_gtt_unpin(gt);
++			drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
++			psb_intel_crtc->cursor_obj = NULL;
++		}
++
++		return 0;
++	}
++
++	/* Currently we only support 64x64 cursors */
++	if (width != 64 || height != 64) {
++		dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
++		return -EINVAL;
++	}
++
++	obj = drm_gem_object_lookup(dev, file_priv, handle);
++	if (!obj)
++		return -ENOENT;
++
++	if (obj->size < width * height * 4) {
++		dev_dbg(dev->dev, "buffer is to small\n");
++		return -ENOMEM;
++	}
++
++	gt = container_of(obj, struct gtt_range, gem);
++
++	/* Pin the memory into the GTT */
++	ret = psb_gtt_pin(gt);
++	if (ret) {
++		dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
++		return ret;
++	}
++
++
++	addr = gt->offset;	/* Or resource.start ??? */
++
++	psb_intel_crtc->cursor_addr = addr;
++
++	temp = 0;
++	/* set the pipe for the cursor */
++	temp |= (pipe << 28);
++	temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
++
++	if (gma_power_begin(dev, false)) {
++		REG_WRITE(control, temp);
++		REG_WRITE(base, addr);
++		gma_power_end(dev);
++	}
++
++	/* unpin the old bo */
++	if (psb_intel_crtc->cursor_obj) {
++		gt = container_of(psb_intel_crtc->cursor_obj,
++							struct gtt_range, gem);
++		psb_gtt_unpin(gt);
++		drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
++		psb_intel_crtc->cursor_obj = obj;
++	}
++	return 0;
++}
++
++static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
++{
++	struct drm_device *dev = crtc->dev;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	uint32_t temp = 0;
++	uint32_t addr;
++
++
++	if (x < 0) {
++		temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
++		x = -x;
++	}
++	if (y < 0) {
++		temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
++		y = -y;
++	}
++
++	temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
++	temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
++
++	addr = psb_intel_crtc->cursor_addr;
++
++	if (gma_power_begin(dev, false)) {
++		REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
++		REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
++		gma_power_end(dev);
++	}
++	return 0;
++}
++
++void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
++			 u16 *green, u16 *blue, uint32_t type, uint32_t size)
++{
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int i;
++
++	if (size != 256)
++		return;
++
++	for (i = 0; i < 256; i++) {
++		psb_intel_crtc->lut_r[i] = red[i] >> 8;
++		psb_intel_crtc->lut_g[i] = green[i] >> 8;
++		psb_intel_crtc->lut_b[i] = blue[i] >> 8;
++	}
++
++	psb_intel_crtc_load_lut(crtc);
++}
++
++static int psb_crtc_set_config(struct drm_mode_set *set)
++{
++	int ret;
++	struct drm_device *dev = set->crtc->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (!dev_priv->rpm_enabled)
++		return drm_crtc_helper_set_config(set);
++
++	pm_runtime_forbid(&dev->pdev->dev);
++	ret = drm_crtc_helper_set_config(set);
++	pm_runtime_allow(&dev->pdev->dev);
++	return ret;
++}
++
++/* Returns the clock of the currently programmed mode of the given pipe. */
++static int psb_intel_crtc_clock_get(struct drm_device *dev,
++				struct drm_crtc *crtc)
++{
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	u32 dpll;
++	u32 fp;
++	struct psb_intel_clock_t clock;
++	bool is_lvds;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (gma_power_begin(dev, false)) {
++		dpll = REG_READ((pipe == 0) ? DPLL_A : DPLL_B);
++		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++			fp = REG_READ((pipe == 0) ? FPA0 : FPB0);
++		else
++			fp = REG_READ((pipe == 0) ? FPA1 : FPB1);
++		is_lvds = (pipe == 1) && (REG_READ(LVDS) & LVDS_PORT_EN);
++		gma_power_end(dev);
++	} else {
++		dpll = (pipe == 0) ?
++			dev_priv->regs.psb.saveDPLL_A :
++			dev_priv->regs.psb.saveDPLL_B;
++
++		if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
++			fp = (pipe == 0) ?
++				dev_priv->regs.psb.saveFPA0 :
++				dev_priv->regs.psb.saveFPB0;
++		else
++			fp = (pipe == 0) ?
++				dev_priv->regs.psb.saveFPA1 :
++				dev_priv->regs.psb.saveFPB1;
++
++		is_lvds = (pipe == 1) && (dev_priv->regs.psb.saveLVDS &
++								LVDS_PORT_EN);
++	}
++
++	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
++	clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
++	clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
++
++	if (is_lvds) {
++		clock.p1 =
++		    ffs((dpll &
++			 DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
++			DPLL_FPA01_P1_POST_DIV_SHIFT);
++		clock.p2 = 14;
++
++		if ((dpll & PLL_REF_INPUT_MASK) ==
++		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
++			/* XXX: might not be 66MHz */
++			i8xx_clock(66000, &clock);
++		} else
++			i8xx_clock(48000, &clock);
++	} else {
++		if (dpll & PLL_P1_DIVIDE_BY_TWO)
++			clock.p1 = 2;
++		else {
++			clock.p1 =
++			    ((dpll &
++			      DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
++			     DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
++		}
++		if (dpll & PLL_P2_DIVIDE_BY_4)
++			clock.p2 = 4;
++		else
++			clock.p2 = 2;
++
++		i8xx_clock(48000, &clock);
++	}
++
++	/* XXX: It would be nice to validate the clocks, but we can't reuse
++	 * i830PllIsValid() because it relies on the xf86_config connector
++	 * configuration being accurate, which it isn't necessarily.
++	 */
++
++	return clock.dot;
++}
++
++/** Returns the currently programmed mode of the given pipe. */
++struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
++					     struct drm_crtc *crtc)
++{
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	int pipe = psb_intel_crtc->pipe;
++	struct drm_display_mode *mode;
++	int htot;
++	int hsync;
++	int vtot;
++	int vsync;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (gma_power_begin(dev, false)) {
++		htot = REG_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
++		hsync = REG_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
++		vtot = REG_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
++		vsync = REG_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
++		gma_power_end(dev);
++	} else {
++		htot = (pipe == 0) ?
++			dev_priv->regs.psb.saveHTOTAL_A :
++			dev_priv->regs.psb.saveHTOTAL_B;
++		hsync = (pipe == 0) ?
++			dev_priv->regs.psb.saveHSYNC_A :
++			dev_priv->regs.psb.saveHSYNC_B;
++		vtot = (pipe == 0) ?
++			dev_priv->regs.psb.saveVTOTAL_A :
++			dev_priv->regs.psb.saveVTOTAL_B;
++		vsync = (pipe == 0) ?
++			dev_priv->regs.psb.saveVSYNC_A :
++			dev_priv->regs.psb.saveVSYNC_B;
++	}
++
++	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++	if (!mode)
++		return NULL;
++
++	mode->clock = psb_intel_crtc_clock_get(dev, crtc);
++	mode->hdisplay = (htot & 0xffff) + 1;
++	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
++	mode->hsync_start = (hsync & 0xffff) + 1;
++	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
++	mode->vdisplay = (vtot & 0xffff) + 1;
++	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
++	mode->vsync_start = (vsync & 0xffff) + 1;
++	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
++
++	drm_mode_set_name(mode);
++	drm_mode_set_crtcinfo(mode, 0);
++
++	return mode;
++}
++
++void psb_intel_crtc_destroy(struct drm_crtc *crtc)
++{
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct gtt_range *gt;
++
++	/* Unpin the old GEM object */
++	if (psb_intel_crtc->cursor_obj) {
++		gt = container_of(psb_intel_crtc->cursor_obj,
++						struct gtt_range, gem);
++		psb_gtt_unpin(gt);
++		drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
++		psb_intel_crtc->cursor_obj = NULL;
++	}
++	kfree(psb_intel_crtc->crtc_state);
++	drm_crtc_cleanup(crtc);
++	kfree(psb_intel_crtc);
++}
++
++const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
++	.dpms = psb_intel_crtc_dpms,
++	.mode_fixup = psb_intel_crtc_mode_fixup,
++	.mode_set = psb_intel_crtc_mode_set,
++	.mode_set_base = psb_intel_pipe_set_base,
++	.prepare = psb_intel_crtc_prepare,
++	.commit = psb_intel_crtc_commit,
++};
++
++const struct drm_crtc_funcs psb_intel_crtc_funcs = {
++	.save = psb_intel_crtc_save,
++	.restore = psb_intel_crtc_restore,
++	.cursor_set = psb_intel_crtc_cursor_set,
++	.cursor_move = psb_intel_crtc_cursor_move,
++	.gamma_set = psb_intel_crtc_gamma_set,
++	.set_config = psb_crtc_set_config,
++	.destroy = psb_intel_crtc_destroy,
++};
++
++/*
++ * Set the default value of cursor control and base register
++ * to zero. This is a workaround for h/w defect on Oaktrail
++ */
++static void psb_intel_cursor_init(struct drm_device *dev, int pipe)
++{
++	u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
++	u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
++
++	REG_WRITE(control[pipe], 0);
++	REG_WRITE(base[pipe], 0);
++}
++
++void psb_intel_crtc_init(struct drm_device *dev, int pipe,
++		     struct psb_intel_mode_device *mode_dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_crtc *psb_intel_crtc;
++	int i;
++	uint16_t *r_base, *g_base, *b_base;
++
++	/* We allocate a extra array of drm_connector pointers
++	 * for fbdev after the crtc */
++	psb_intel_crtc =
++	    kzalloc(sizeof(struct psb_intel_crtc) +
++		    (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
++		    GFP_KERNEL);
++	if (psb_intel_crtc == NULL)
++		return;
++
++	psb_intel_crtc->crtc_state =
++		kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
++	if (!psb_intel_crtc->crtc_state) {
++		dev_err(dev->dev, "Crtc state error: No memory\n");
++		kfree(psb_intel_crtc);
++		return;
++	}
++
++	/* Set the CRTC operations from the chip specific data */
++	drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
++
++	drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
++	psb_intel_crtc->pipe = pipe;
++	psb_intel_crtc->plane = pipe;
++
++	r_base = psb_intel_crtc->base.gamma_store;
++	g_base = r_base + 256;
++	b_base = g_base + 256;
++	for (i = 0; i < 256; i++) {
++		psb_intel_crtc->lut_r[i] = i;
++		psb_intel_crtc->lut_g[i] = i;
++		psb_intel_crtc->lut_b[i] = i;
++		r_base[i] = i << 8;
++		g_base[i] = i << 8;
++		b_base[i] = i << 8;
++
++		psb_intel_crtc->lut_adj[i] = 0;
++	}
++
++	psb_intel_crtc->mode_dev = mode_dev;
++	psb_intel_crtc->cursor_addr = 0;
++
++	drm_crtc_helper_add(&psb_intel_crtc->base,
++						dev_priv->ops->crtc_helper);
++
++	/* Setup the array of drm_connector pointer array */
++	psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
++	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
++	       dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
++	dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
++							&psb_intel_crtc->base;
++	dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
++							&psb_intel_crtc->base;
++	psb_intel_crtc->mode_set.connectors =
++	    (struct drm_connector **) (psb_intel_crtc + 1);
++	psb_intel_crtc->mode_set.num_connectors = 0;
++	psb_intel_cursor_init(dev, pipe);
++}
++
++int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
++				struct drm_file *file_priv)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
++	struct drm_mode_object *drmmode_obj;
++	struct psb_intel_crtc *crtc;
++
++	if (!dev_priv) {
++		dev_err(dev->dev, "called with no initialization\n");
++		return -EINVAL;
++	}
++
++	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
++			DRM_MODE_OBJECT_CRTC);
++
++	if (!drmmode_obj) {
++		dev_err(dev->dev, "no such CRTC id\n");
++		return -EINVAL;
++	}
++
++	crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
++	pipe_from_crtc_id->pipe = crtc->pipe;
++
++	return 0;
++}
++
++struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
++{
++	struct drm_crtc *crtc = NULL;
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++		if (psb_intel_crtc->pipe == pipe)
++			break;
++	}
++	return crtc;
++}
++
++int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
++{
++	int index_mask = 0;
++	struct drm_connector *connector;
++	int entry = 0;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list,
++			    head) {
++		struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++		if (type_mask & (1 << psb_intel_encoder->type))
++			index_mask |= (1 << entry);
++		entry++;
++	}
++	return index_mask;
++}
++
++/* current intel driver doesn't take advantage of encoders
++   always give back the encoder for the connector
++*/
++struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
++{
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++
++	return &psb_intel_encoder->base;
++}
++
++void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
++					struct psb_intel_encoder *encoder)
++{
++	connector->encoder = encoder;
++	drm_mode_connector_attach_encoder(&connector->base,
++					  &encoder->base);
++}
+diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h
+new file mode 100644
+index 0000000..535b49a
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_intel_display.h
+@@ -0,0 +1,28 @@
++/* copyright (c) 2008, Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ * Eric Anholt <eric at anholt.net>
++ */
++
++#ifndef _INTEL_DISPLAY_H_
++#define _INTEL_DISPLAY_H_
++
++bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
++void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
++			 u16 *green, u16 *blue, uint32_t type, uint32_t size);
++void psb_intel_crtc_destroy(struct drm_crtc *crtc);
++
++#endif
+diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
+new file mode 100644
+index 0000000..f40535e
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
+@@ -0,0 +1,289 @@
++/*
++ * Copyright (c) 2009-2011, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef __INTEL_DRV_H__
++#define __INTEL_DRV_H__
++
++#include <linux/i2c.h>
++#include <linux/i2c-algo-bit.h>
++#include <drm/drm_crtc.h>
++#include <drm/drm_crtc_helper.h>
++#include <linux/gpio.h>
++
++/*
++ * Display related stuff
++ */
++
++/* store information about an Ixxx DVO */
++/* The i830->i865 use multiple DVOs with multiple i2cs */
++/* the i915, i945 have a single sDVO i2c bus - which is different */
++#define MAX_OUTPUTS 6
++/* maximum connectors per crtcs in the mode set */
++#define INTELFB_CONN_LIMIT 4
++
++#define INTEL_I2C_BUS_DVO 1
++#define INTEL_I2C_BUS_SDVO 2
++
++/* Intel Pipe Clone Bit */
++#define INTEL_HDMIB_CLONE_BIT 1
++#define INTEL_HDMIC_CLONE_BIT 2
++#define INTEL_HDMID_CLONE_BIT 3
++#define INTEL_HDMIE_CLONE_BIT 4
++#define INTEL_HDMIF_CLONE_BIT 5
++#define INTEL_SDVO_NON_TV_CLONE_BIT 6
++#define INTEL_SDVO_TV_CLONE_BIT 7
++#define INTEL_SDVO_LVDS_CLONE_BIT 8
++#define INTEL_ANALOG_CLONE_BIT 9
++#define INTEL_TV_CLONE_BIT 10
++#define INTEL_DP_B_CLONE_BIT 11
++#define INTEL_DP_C_CLONE_BIT 12
++#define INTEL_DP_D_CLONE_BIT 13
++#define INTEL_LVDS_CLONE_BIT 14
++#define INTEL_DVO_TMDS_CLONE_BIT 15
++#define INTEL_DVO_LVDS_CLONE_BIT 16
++#define INTEL_EDP_CLONE_BIT 17
++
++/* these are outputs from the chip - integrated only
++ * external chips are via DVO or SDVO output */
++#define INTEL_OUTPUT_UNUSED 0
++#define INTEL_OUTPUT_ANALOG 1
++#define INTEL_OUTPUT_DVO 2
++#define INTEL_OUTPUT_SDVO 3
++#define INTEL_OUTPUT_LVDS 4
++#define INTEL_OUTPUT_TVOUT 5
++#define INTEL_OUTPUT_HDMI 6
++#define INTEL_OUTPUT_MIPI 7
++#define INTEL_OUTPUT_MIPI2 8
++
++#define INTEL_DVO_CHIP_NONE 0
++#define INTEL_DVO_CHIP_LVDS 1
++#define INTEL_DVO_CHIP_TMDS 2
++#define INTEL_DVO_CHIP_TVOUT 4
++
++#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
++#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
++
++static inline void
++psb_intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
++				int multiplier)
++{
++	mode->clock *= multiplier;
++	mode->private_flags |= multiplier;
++}
++
++static inline int
++psb_intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
++{
++	return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK)
++	       >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
++}
++
++
++/*
++ * Hold information useally put on the device driver privates here,
++ * since it needs to be shared across multiple of devices drivers privates.
++ */
++struct psb_intel_mode_device {
++
++	/*
++	 * Abstracted memory manager operations
++	 */
++	 size_t(*bo_offset) (struct drm_device *dev, void *bo);
++
++	/*
++	 * Cursor (Can go ?)
++	 */
++	int cursor_needs_physical;
++
++	/*
++	 * LVDS info
++	 */
++	int backlight_duty_cycle;	/* restore backlight to this value */
++	bool panel_wants_dither;
++	struct drm_display_mode *panel_fixed_mode;
++	struct drm_display_mode *panel_fixed_mode2;
++	struct drm_display_mode *vbt_mode;	/* if any */
++
++	uint32_t saveBLC_PWM_CTL;
++};
++
++struct psb_intel_i2c_chan {
++	/* for getting at dev. private (mmio etc.) */
++	struct drm_device *drm_dev;
++	u32 reg;		/* GPIO reg */
++	struct i2c_adapter adapter;
++	struct i2c_algo_bit_data algo;
++	u8 slave_addr;
++};
++
++struct psb_intel_encoder {
++	struct drm_encoder base;
++	int type;
++	bool needs_tv_clock;
++	void (*hot_plug)(struct psb_intel_encoder *);
++	int crtc_mask;
++	int clone_mask;
++	void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
++
++	/* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
++	   own set of output privates */
++	struct psb_intel_i2c_chan *i2c_bus;
++	struct psb_intel_i2c_chan *ddc_bus;
++};
++
++struct psb_intel_connector {
++	struct drm_connector base;
++	struct psb_intel_encoder *encoder;
++};
++
++struct psb_intel_crtc_state {
++	uint32_t saveDSPCNTR;
++	uint32_t savePIPECONF;
++	uint32_t savePIPESRC;
++	uint32_t saveDPLL;
++	uint32_t saveFP0;
++	uint32_t saveFP1;
++	uint32_t saveHTOTAL;
++	uint32_t saveHBLANK;
++	uint32_t saveHSYNC;
++	uint32_t saveVTOTAL;
++	uint32_t saveVBLANK;
++	uint32_t saveVSYNC;
++	uint32_t saveDSPSTRIDE;
++	uint32_t saveDSPSIZE;
++	uint32_t saveDSPPOS;
++	uint32_t saveDSPBASE;
++	uint32_t savePalette[256];
++};
++
++struct psb_intel_crtc {
++	struct drm_crtc base;
++	int pipe;
++	int plane;
++	uint32_t cursor_addr;
++	u8 lut_r[256], lut_g[256], lut_b[256];
++	u8 lut_adj[256];
++	struct psb_intel_framebuffer *fbdev_fb;
++	/* a mode_set for fbdev users on this crtc */
++	struct drm_mode_set mode_set;
++
++	/* GEM object that holds our cursor */
++	struct drm_gem_object *cursor_obj;
++
++	struct drm_display_mode saved_mode;
++	struct drm_display_mode saved_adjusted_mode;
++
++	struct psb_intel_mode_device *mode_dev;
++
++	/*crtc mode setting flags*/
++	u32 mode_flags;
++
++	/* Saved Crtc HW states */
++	struct psb_intel_crtc_state *crtc_state;
++};
++
++#define to_psb_intel_crtc(x)	\
++		container_of(x, struct psb_intel_crtc, base)
++#define to_psb_intel_connector(x) \
++		container_of(x, struct psb_intel_connector, base)
++#define to_psb_intel_encoder(x)	\
++		container_of(x, struct psb_intel_encoder, base)
++#define to_psb_intel_framebuffer(x)	\
++		container_of(x, struct psb_intel_framebuffer, base)
++
++struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
++					const u32 reg, const char *name);
++void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
++int psb_intel_ddc_get_modes(struct drm_connector *connector,
++			    struct i2c_adapter *adapter);
++extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
++
++extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
++			    struct psb_intel_mode_device *mode_dev);
++extern void psb_intel_crt_init(struct drm_device *dev);
++extern bool psb_intel_sdvo_init(struct drm_device *dev, int output_device);
++extern void psb_intel_dvo_init(struct drm_device *dev);
++extern void psb_intel_tv_init(struct drm_device *dev);
++extern void psb_intel_lvds_init(struct drm_device *dev,
++			    struct psb_intel_mode_device *mode_dev);
++extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
++extern void oaktrail_lvds_init(struct drm_device *dev,
++			   struct psb_intel_mode_device *mode_dev);
++extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
++extern void oaktrail_dsi_init(struct drm_device *dev,
++			   struct psb_intel_mode_device *mode_dev);
++extern void mid_dsi_init(struct drm_device *dev,
++		    struct psb_intel_mode_device *mode_dev, int dsi_num);
++
++extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
++extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
++extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
++extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
++
++static inline struct psb_intel_encoder *psb_intel_attached_encoder(
++						struct drm_connector *connector)
++{
++	return to_psb_intel_connector(connector)->encoder;
++}
++
++extern void psb_intel_connector_attach_encoder(
++					struct psb_intel_connector *connector,
++					struct psb_intel_encoder *encoder);
++
++extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
++					      *connector);
++
++extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
++						    struct drm_crtc *crtc);
++extern void psb_intel_wait_for_vblank(struct drm_device *dev);
++extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
++				struct drm_file *file_priv);
++extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
++						 int pipe);
++extern struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev,
++					     int sdvoB);
++extern int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector);
++extern void psb_intel_sdvo_set_hotplug(struct drm_connector *connector,
++				   int enable);
++extern int intelfb_probe(struct drm_device *dev);
++extern int intelfb_remove(struct drm_device *dev,
++			  struct drm_framebuffer *fb);
++extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
++							*dev, struct
++							drm_mode_fb_cmd
++							*mode_cmd,
++							void *mm_private);
++extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
++				      struct drm_display_mode *mode,
++				      struct drm_display_mode *adjusted_mode);
++extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++				     struct drm_display_mode *mode);
++extern int psb_intel_lvds_set_property(struct drm_connector *connector,
++					struct drm_property *property,
++					uint64_t value);
++extern void psb_intel_lvds_destroy(struct drm_connector *connector);
++extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
++
++/* intel_gmbus.c */
++extern void gma_intel_i2c_reset(struct drm_device *dev);
++extern int gma_intel_setup_gmbus(struct drm_device *dev);
++extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
++extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
++extern void gma_intel_teardown_gmbus(struct drm_device *dev);
++
++#endif				/* __INTEL_DRV_H__ */
+diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
+new file mode 100644
+index 0000000..c83f5b5
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
+@@ -0,0 +1,867 @@
++/*
++ * Copyright © 2006-2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ *	Dave Airlie <airlied at linux.ie>
++ *	Jesse Barnes <jesse.barnes at intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <drm/drmP.h>
++
++#include "intel_bios.h"
++#include "psb_drv.h"
++#include "psb_intel_drv.h"
++#include "psb_intel_reg.h"
++#include "power.h"
++#include <linux/pm_runtime.h>
++
++/*
++ * LVDS I2C backlight control macros
++ */
++#define BRIGHTNESS_MAX_LEVEL 100
++#define BRIGHTNESS_MASK 0xFF
++#define BLC_I2C_TYPE	0x01
++#define BLC_PWM_TYPT	0x02
++
++#define BLC_POLARITY_NORMAL 0
++#define BLC_POLARITY_INVERSE 1
++
++#define PSB_BLC_MAX_PWM_REG_FREQ       (0xFFFE)
++#define PSB_BLC_MIN_PWM_REG_FREQ	(0x2)
++#define PSB_BLC_PWM_PRECISION_FACTOR	(10)
++#define PSB_BACKLIGHT_PWM_CTL_SHIFT	(16)
++#define PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR (0xFFFE)
++
++struct psb_intel_lvds_priv {
++	/*
++	 * Saved LVDO output states
++	 */
++	uint32_t savePP_ON;
++	uint32_t savePP_OFF;
++	uint32_t saveLVDS;
++	uint32_t savePP_CONTROL;
++	uint32_t savePP_CYCLE;
++	uint32_t savePFIT_CONTROL;
++	uint32_t savePFIT_PGM_RATIOS;
++	uint32_t saveBLC_PWM_CTL;
++
++	struct psb_intel_i2c_chan *i2c_bus;
++	struct psb_intel_i2c_chan *ddc_bus;
++};
++
++
++/*
++ * Returns the maximum level of the backlight duty cycle field.
++ */
++static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 ret;
++
++	if (gma_power_begin(dev, false)) {
++		ret = REG_READ(BLC_PWM_CTL);
++		gma_power_end(dev);
++	} else /* Powered off, use the saved value */
++		ret = dev_priv->regs.saveBLC_PWM_CTL;
++
++	/* Top 15bits hold the frequency mask */
++	ret = (ret &  BACKLIGHT_MODULATION_FREQ_MASK) >>
++					BACKLIGHT_MODULATION_FREQ_SHIFT;
++
++        ret *= 2;	/* Return a 16bit range as needed for setting */
++        if (ret == 0)
++                dev_err(dev->dev, "BL bug: Reg %08x save %08X\n",
++                        REG_READ(BLC_PWM_CTL), dev_priv->regs.saveBLC_PWM_CTL);
++	return ret;
++}
++
++/*
++ * Set LVDS backlight level by I2C command
++ *
++ * FIXME: at some point we need to both track this for PM and also
++ * disable runtime pm on MRST if the brightness is nil (ie blanked)
++ */
++static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
++					unsigned int level)
++{
++	struct drm_psb_private *dev_priv =
++		(struct drm_psb_private *)dev->dev_private;
++
++	struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
++	u8 out_buf[2];
++	unsigned int blc_i2c_brightness;
++
++	struct i2c_msg msgs[] = {
++		{
++			.addr = lvds_i2c_bus->slave_addr,
++			.flags = 0,
++			.len = 2,
++			.buf = out_buf,
++		}
++	};
++
++	blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
++			     BRIGHTNESS_MASK /
++			     BRIGHTNESS_MAX_LEVEL);
++
++	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
++		blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
++
++	out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
++	out_buf[1] = (u8)blc_i2c_brightness;
++
++	if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1) {
++		dev_dbg(dev->dev, "I2C set brightness.(command, value) (%d, %d)\n",
++			dev_priv->lvds_bl->brightnesscmd,
++			blc_i2c_brightness);
++		return 0;
++	}
++
++	dev_err(dev->dev, "I2C transfer error\n");
++	return -1;
++}
++
++
++static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
++{
++	struct drm_psb_private *dev_priv =
++			(struct drm_psb_private *)dev->dev_private;
++
++	u32 max_pwm_blc;
++	u32 blc_pwm_duty_cycle;
++
++	max_pwm_blc = psb_intel_lvds_get_max_backlight(dev);
++
++	/*BLC_PWM_CTL Should be initiated while backlight device init*/
++	BUG_ON(max_pwm_blc == 0);
++
++	blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
++
++	if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
++		blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
++
++	blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
++	REG_WRITE(BLC_PWM_CTL,
++		  (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
++		  (blc_pwm_duty_cycle));
++
++        dev_info(dev->dev, "Backlight lvds set brightness %08x\n",
++		  (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
++		  (blc_pwm_duty_cycle));
++
++	return 0;
++}
++
++/*
++ * Set LVDS backlight level either by I2C or PWM
++ */
++void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	dev_dbg(dev->dev, "backlight level is %d\n", level);
++
++	if (!dev_priv->lvds_bl) {
++		dev_err(dev->dev, "NO LVDS backlight info\n");
++		return;
++	}
++
++	if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
++		psb_lvds_i2c_set_brightness(dev, level);
++	else
++		psb_lvds_pwm_set_brightness(dev, level);
++}
++
++/*
++ * Sets the backlight level.
++ *
++ * level: backlight level, from 0 to psb_intel_lvds_get_max_backlight().
++ */
++static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 blc_pwm_ctl;
++
++	if (gma_power_begin(dev, false)) {
++		blc_pwm_ctl = REG_READ(BLC_PWM_CTL);
++		blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
++		REG_WRITE(BLC_PWM_CTL,
++				(blc_pwm_ctl |
++				(level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
++		dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
++					(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
++		gma_power_end(dev);
++	} else {
++		blc_pwm_ctl = dev_priv->regs.saveBLC_PWM_CTL &
++				~BACKLIGHT_DUTY_CYCLE_MASK;
++		dev_priv->regs.saveBLC_PWM_CTL = (blc_pwm_ctl |
++					(level << BACKLIGHT_DUTY_CYCLE_SHIFT));
++	}
++}
++
++/*
++ * Sets the power state for the panel.
++ */
++static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++	u32 pp_status;
++
++	if (!gma_power_begin(dev, true)) {
++	        dev_err(dev->dev, "set power, chip off!\n");
++		return;
++        }
++        
++	if (on) {
++		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++			  POWER_TARGET_ON);
++		do {
++			pp_status = REG_READ(PP_STATUS);
++		} while ((pp_status & PP_ON) == 0);
++
++		psb_intel_lvds_set_backlight(dev,
++					     mode_dev->backlight_duty_cycle);
++	} else {
++		psb_intel_lvds_set_backlight(dev, 0);
++
++		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++			  ~POWER_TARGET_ON);
++		do {
++			pp_status = REG_READ(PP_STATUS);
++		} while (pp_status & PP_ON);
++	}
++
++	gma_power_end(dev);
++}
++
++static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++
++	if (mode == DRM_MODE_DPMS_ON)
++		psb_intel_lvds_set_power(dev, true);
++	else
++		psb_intel_lvds_set_power(dev, false);
++
++	/* XXX: We never power down the LVDS pairs. */
++}
++
++static void psb_intel_lvds_save(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct drm_psb_private *dev_priv =
++		(struct drm_psb_private *)dev->dev_private;
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++	struct psb_intel_lvds_priv *lvds_priv =
++		(struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
++
++	lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
++	lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
++	lvds_priv->saveLVDS = REG_READ(LVDS);
++	lvds_priv->savePP_CONTROL = REG_READ(PP_CONTROL);
++	lvds_priv->savePP_CYCLE = REG_READ(PP_CYCLE);
++	/*lvds_priv->savePP_DIVISOR = REG_READ(PP_DIVISOR);*/
++	lvds_priv->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++	lvds_priv->savePFIT_CONTROL = REG_READ(PFIT_CONTROL);
++	lvds_priv->savePFIT_PGM_RATIOS = REG_READ(PFIT_PGM_RATIOS);
++
++	/*TODO: move backlight_duty_cycle to psb_intel_lvds_priv*/
++	dev_priv->backlight_duty_cycle = (dev_priv->regs.saveBLC_PWM_CTL &
++						BACKLIGHT_DUTY_CYCLE_MASK);
++
++	/*
++	 * If the light is off at server startup,
++	 * just make it full brightness
++	 */
++	if (dev_priv->backlight_duty_cycle == 0)
++		dev_priv->backlight_duty_cycle =
++		psb_intel_lvds_get_max_backlight(dev);
++
++	dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
++			lvds_priv->savePP_ON,
++			lvds_priv->savePP_OFF,
++			lvds_priv->saveLVDS,
++			lvds_priv->savePP_CONTROL,
++			lvds_priv->savePP_CYCLE,
++			lvds_priv->saveBLC_PWM_CTL);
++}
++
++static void psb_intel_lvds_restore(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	u32 pp_status;
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++	struct psb_intel_lvds_priv *lvds_priv =
++		(struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
++
++	dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
++			lvds_priv->savePP_ON,
++			lvds_priv->savePP_OFF,
++			lvds_priv->saveLVDS,
++			lvds_priv->savePP_CONTROL,
++			lvds_priv->savePP_CYCLE,
++			lvds_priv->saveBLC_PWM_CTL);
++
++	REG_WRITE(BLC_PWM_CTL, lvds_priv->saveBLC_PWM_CTL);
++	REG_WRITE(PFIT_CONTROL, lvds_priv->savePFIT_CONTROL);
++	REG_WRITE(PFIT_PGM_RATIOS, lvds_priv->savePFIT_PGM_RATIOS);
++	REG_WRITE(LVDSPP_ON, lvds_priv->savePP_ON);
++	REG_WRITE(LVDSPP_OFF, lvds_priv->savePP_OFF);
++	/*REG_WRITE(PP_DIVISOR, lvds_priv->savePP_DIVISOR);*/
++	REG_WRITE(PP_CYCLE, lvds_priv->savePP_CYCLE);
++	REG_WRITE(PP_CONTROL, lvds_priv->savePP_CONTROL);
++	REG_WRITE(LVDS, lvds_priv->saveLVDS);
++
++	if (lvds_priv->savePP_CONTROL & POWER_TARGET_ON) {
++		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) |
++			POWER_TARGET_ON);
++		do {
++			pp_status = REG_READ(PP_STATUS);
++		} while ((pp_status & PP_ON) == 0);
++	} else {
++		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) &
++			~POWER_TARGET_ON);
++		do {
++			pp_status = REG_READ(PP_STATUS);
++		} while (pp_status & PP_ON);
++	}
++}
++
++int psb_intel_lvds_mode_valid(struct drm_connector *connector,
++				 struct drm_display_mode *mode)
++{
++	struct drm_psb_private *dev_priv = connector->dev->dev_private;
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++	struct drm_display_mode *fixed_mode =
++					dev_priv->mode_dev.panel_fixed_mode;
++
++	if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
++		fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
++
++	/* just in case */
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		return MODE_NO_DBLESCAN;
++
++	/* just in case */
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++		return MODE_NO_INTERLACE;
++
++	if (fixed_mode) {
++		if (mode->hdisplay > fixed_mode->hdisplay)
++			return MODE_PANEL;
++		if (mode->vdisplay > fixed_mode->vdisplay)
++			return MODE_PANEL;
++	}
++	return MODE_OK;
++}
++
++bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
++				  struct drm_display_mode *mode,
++				  struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++	struct psb_intel_crtc *psb_intel_crtc =
++				to_psb_intel_crtc(encoder->crtc);
++	struct drm_encoder *tmp_encoder;
++	struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
++	struct psb_intel_encoder *psb_intel_encoder =
++						to_psb_intel_encoder(encoder);
++
++	if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
++		panel_fixed_mode = mode_dev->panel_fixed_mode2;
++
++	/* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
++	if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
++		printk(KERN_ERR "Can't support LVDS on pipe A\n");
++		return false;
++	}
++	if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
++		printk(KERN_ERR "Must use PIPE A\n");
++		return false;
++	}
++	/* Should never happen!! */
++	list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list,
++			    head) {
++		if (tmp_encoder != encoder
++		    && tmp_encoder->crtc == encoder->crtc) {
++			printk(KERN_ERR "Can't enable LVDS and another "
++			       "encoder on the same pipe\n");
++			return false;
++		}
++	}
++
++	/*
++	 * If we have timings from the BIOS for the panel, put them in
++	 * to the adjusted mode.  The CRTC will be set up for this mode,
++	 * with the panel scaling set up to source from the H/VDisplay
++	 * of the original mode.
++	 */
++	if (panel_fixed_mode != NULL) {
++		adjusted_mode->hdisplay = panel_fixed_mode->hdisplay;
++		adjusted_mode->hsync_start = panel_fixed_mode->hsync_start;
++		adjusted_mode->hsync_end = panel_fixed_mode->hsync_end;
++		adjusted_mode->htotal = panel_fixed_mode->htotal;
++		adjusted_mode->vdisplay = panel_fixed_mode->vdisplay;
++		adjusted_mode->vsync_start = panel_fixed_mode->vsync_start;
++		adjusted_mode->vsync_end = panel_fixed_mode->vsync_end;
++		adjusted_mode->vtotal = panel_fixed_mode->vtotal;
++		adjusted_mode->clock = panel_fixed_mode->clock;
++		drm_mode_set_crtcinfo(adjusted_mode,
++				      CRTC_INTERLACE_HALVE_V);
++	}
++
++	/*
++	 * XXX: It would be nice to support lower refresh rates on the
++	 * panels to reduce power consumption, and perhaps match the
++	 * user's requested refresh rate.
++	 */
++
++	return true;
++}
++
++static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++
++	if (!gma_power_begin(dev, true))
++		return;
++
++	mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
++	mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
++					  BACKLIGHT_DUTY_CYCLE_MASK);
++
++	psb_intel_lvds_set_power(dev, false);
++
++	gma_power_end(dev);
++}
++
++static void psb_intel_lvds_commit(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++
++	if (mode_dev->backlight_duty_cycle == 0)
++		mode_dev->backlight_duty_cycle =
++		    psb_intel_lvds_get_max_backlight(dev);
++
++	psb_intel_lvds_set_power(dev, true);
++}
++
++static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 pfit_control;
++
++	/*
++	 * The LVDS pin pair will already have been turned on in the
++	 * psb_intel_crtc_mode_set since it has a large impact on the DPLL
++	 * settings.
++	 */
++
++	/*
++	 * Enable automatic panel scaling so that non-native modes fill the
++	 * screen.  Should be enabled before the pipe is enabled, according to
++	 * register description and PRM.
++	 */
++	if (mode->hdisplay != adjusted_mode->hdisplay ||
++	    mode->vdisplay != adjusted_mode->vdisplay)
++		pfit_control = (PFIT_ENABLE | VERT_AUTO_SCALE |
++				HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR |
++				HORIZ_INTERP_BILINEAR);
++	else
++		pfit_control = 0;
++
++	if (dev_priv->lvds_dither)
++		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
++
++	REG_WRITE(PFIT_CONTROL, pfit_control);
++}
++
++/*
++ * Detect the LVDS connection.
++ *
++ * This always returns CONNECTOR_STATUS_CONNECTED.
++ * This connector should only have
++ * been set up if the LVDS was actually connected anyway.
++ */
++static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
++						   *connector, bool force)
++{
++	return connector_status_connected;
++}
++
++/*
++ * Return the list of DDC modes if available, or the BIOS fixed mode otherwise.
++ */
++static int psb_intel_lvds_get_modes(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++	struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
++	int ret = 0;
++
++	if (!IS_MRST(dev))
++		ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter);
++
++	if (ret)
++		return ret;
++
++	/* Didn't get an EDID, so
++	 * Set wide sync ranges so we get all modes
++	 * handed to valid_mode for checking
++	 */
++	connector->display_info.min_vfreq = 0;
++	connector->display_info.max_vfreq = 200;
++	connector->display_info.min_hfreq = 0;
++	connector->display_info.max_hfreq = 200;
++
++	if (mode_dev->panel_fixed_mode != NULL) {
++		struct drm_display_mode *mode =
++		    drm_mode_duplicate(dev, mode_dev->panel_fixed_mode);
++		drm_mode_probed_add(connector, mode);
++		return 1;
++	}
++
++	return 0;
++}
++
++/**
++ * psb_intel_lvds_destroy - unregister and free LVDS structures
++ * @connector: connector to free
++ *
++ * Unregister the DDC bus for this connector then free the driver private
++ * structure.
++ */
++void psb_intel_lvds_destroy(struct drm_connector *connector)
++{
++	struct psb_intel_encoder *psb_intel_encoder =
++					psb_intel_attached_encoder(connector);
++	struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
++
++	if (lvds_priv->ddc_bus)
++		psb_intel_i2c_destroy(lvds_priv->ddc_bus);
++	drm_sysfs_connector_remove(connector);
++	drm_connector_cleanup(connector);
++	kfree(connector);
++}
++
++int psb_intel_lvds_set_property(struct drm_connector *connector,
++				       struct drm_property *property,
++				       uint64_t value)
++{
++	struct drm_encoder *encoder = connector->encoder;
++
++	if (!encoder)
++		return -1;
++
++	if (!strcmp(property->name, "scaling mode")) {
++		struct psb_intel_crtc *crtc =
++					to_psb_intel_crtc(encoder->crtc);
++		uint64_t curval;
++
++		if (!crtc)
++			goto set_prop_error;
++
++		switch (value) {
++		case DRM_MODE_SCALE_FULLSCREEN:
++			break;
++		case DRM_MODE_SCALE_NO_SCALE:
++			break;
++		case DRM_MODE_SCALE_ASPECT:
++			break;
++		default:
++			goto set_prop_error;
++		}
++
++		if (drm_connector_property_get_value(connector,
++						     property,
++						     &curval))
++			goto set_prop_error;
++
++		if (curval == value)
++			goto set_prop_done;
++
++		if (drm_connector_property_set_value(connector,
++							property,
++							value))
++			goto set_prop_error;
++
++		if (crtc->saved_mode.hdisplay != 0 &&
++		    crtc->saved_mode.vdisplay != 0) {
++			if (!drm_crtc_helper_set_mode(encoder->crtc,
++						      &crtc->saved_mode,
++						      encoder->crtc->x,
++						      encoder->crtc->y,
++						      encoder->crtc->fb))
++				goto set_prop_error;
++		}
++	} else if (!strcmp(property->name, "backlight")) {
++		if (drm_connector_property_set_value(connector,
++							property,
++							value))
++			goto set_prop_error;
++		else {
++#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
++			struct drm_psb_private *devp =
++						encoder->dev->dev_private;
++			struct backlight_device *bd = devp->backlight_device;
++			if (bd) {
++				bd->props.brightness = value;
++				backlight_update_status(bd);
++			}
++#endif
++		}
++	} else if (!strcmp(property->name, "DPMS")) {
++		struct drm_encoder_helper_funcs *hfuncs
++						= encoder->helper_private;
++		hfuncs->dpms(encoder, value);
++	}
++
++set_prop_done:
++	return 0;
++set_prop_error:
++	return -1;
++}
++
++static const struct drm_encoder_helper_funcs psb_intel_lvds_helper_funcs = {
++	.dpms = psb_intel_lvds_encoder_dpms,
++	.mode_fixup = psb_intel_lvds_mode_fixup,
++	.prepare = psb_intel_lvds_prepare,
++	.mode_set = psb_intel_lvds_mode_set,
++	.commit = psb_intel_lvds_commit,
++};
++
++const struct drm_connector_helper_funcs
++				psb_intel_lvds_connector_helper_funcs = {
++	.get_modes = psb_intel_lvds_get_modes,
++	.mode_valid = psb_intel_lvds_mode_valid,
++	.best_encoder = psb_intel_best_encoder,
++};
++
++const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
++	.dpms = drm_helper_connector_dpms,
++	.save = psb_intel_lvds_save,
++	.restore = psb_intel_lvds_restore,
++	.detect = psb_intel_lvds_detect,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.set_property = psb_intel_lvds_set_property,
++	.destroy = psb_intel_lvds_destroy,
++};
++
++
++static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
++{
++	drm_encoder_cleanup(encoder);
++}
++
++const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
++	.destroy = psb_intel_lvds_enc_destroy,
++};
++
++
++
++/**
++ * psb_intel_lvds_init - setup LVDS connectors on this device
++ * @dev: drm device
++ *
++ * Create the connector, register the LVDS DDC bus, and try to figure out what
++ * modes we can display on the LVDS panel (if present).
++ */
++void psb_intel_lvds_init(struct drm_device *dev,
++			 struct psb_intel_mode_device *mode_dev)
++{
++	struct psb_intel_encoder *psb_intel_encoder;
++	struct psb_intel_connector *psb_intel_connector;
++	struct psb_intel_lvds_priv *lvds_priv;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++	struct drm_display_mode *scan;	/* *modes, *bios_mode; */
++	struct drm_crtc *crtc;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	u32 lvds;
++	int pipe;
++
++	psb_intel_encoder =
++			kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
++	if (!psb_intel_encoder) {
++		dev_err(dev->dev, "psb_intel_encoder allocation error\n");
++		return;
++	}
++
++	psb_intel_connector =
++		kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
++	if (!psb_intel_connector) {
++		dev_err(dev->dev, "psb_intel_connector allocation error\n");
++		goto failed_encoder;
++	}
++
++	lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
++	if (!lvds_priv) {
++		dev_err(dev->dev, "LVDS private allocation error\n");
++		goto failed_connector;
++	}
++
++	psb_intel_encoder->dev_priv = lvds_priv;
++
++	connector = &psb_intel_connector->base;
++	encoder = &psb_intel_encoder->base;
++	drm_connector_init(dev, connector,
++			   &psb_intel_lvds_connector_funcs,
++			   DRM_MODE_CONNECTOR_LVDS);
++
++	drm_encoder_init(dev, encoder,
++			 &psb_intel_lvds_enc_funcs,
++			 DRM_MODE_ENCODER_LVDS);
++
++	psb_intel_connector_attach_encoder(psb_intel_connector,
++					   psb_intel_encoder);
++	psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
++
++	drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
++	drm_connector_helper_add(connector,
++				 &psb_intel_lvds_connector_helper_funcs);
++	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
++	connector->interlace_allowed = false;
++	connector->doublescan_allowed = false;
++
++	/*Attach connector properties*/
++	drm_connector_attach_property(connector,
++				      dev->mode_config.scaling_mode_property,
++				      DRM_MODE_SCALE_FULLSCREEN);
++	drm_connector_attach_property(connector,
++				      dev_priv->backlight_property,
++				      BRIGHTNESS_MAX_LEVEL);
++
++	/*
++	 * Set up I2C bus
++	 * FIXME: distroy i2c_bus when exit
++	 */
++	lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
++	if (!lvds_priv->i2c_bus) {
++		dev_printk(KERN_ERR,
++			&dev->pdev->dev, "I2C bus registration failed.\n");
++		goto failed_blc_i2c;
++	}
++	lvds_priv->i2c_bus->slave_addr = 0x2C;
++	dev_priv->lvds_i2c_bus =  lvds_priv->i2c_bus;
++
++	/*
++	 * LVDS discovery:
++	 * 1) check for EDID on DDC
++	 * 2) check for VBT data
++	 * 3) check to see if LVDS is already on
++	 *    if none of the above, no panel
++	 * 4) make sure lid is open
++	 *    if closed, act like it's not there for now
++	 */
++
++	/* Set up the DDC bus. */
++	lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
++	if (!lvds_priv->ddc_bus) {
++		dev_printk(KERN_ERR, &dev->pdev->dev,
++			   "DDC bus registration " "failed.\n");
++		goto failed_ddc;
++	}
++
++	/*
++	 * Attempt to get the fixed panel mode from DDC.  Assume that the
++	 * preferred mode is the right one.
++	 */
++	psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
++	list_for_each_entry(scan, &connector->probed_modes, head) {
++		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
++			mode_dev->panel_fixed_mode =
++			    drm_mode_duplicate(dev, scan);
++			goto out;	/* FIXME: check for quirks */
++		}
++	}
++
++	/* Failed to get EDID, what about VBT? do we need this? */
++	if (mode_dev->vbt_mode)
++		mode_dev->panel_fixed_mode =
++		    drm_mode_duplicate(dev, mode_dev->vbt_mode);
++
++	if (!mode_dev->panel_fixed_mode)
++		if (dev_priv->lfp_lvds_vbt_mode)
++			mode_dev->panel_fixed_mode =
++				drm_mode_duplicate(dev,
++					dev_priv->lfp_lvds_vbt_mode);
++
++	/*
++	 * If we didn't get EDID, try checking if the panel is already turned
++	 * on.	If so, assume that whatever is currently programmed is the
++	 * correct mode.
++	 */
++	lvds = REG_READ(LVDS);
++	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
++	crtc = psb_intel_get_crtc_from_pipe(dev, pipe);
++
++	if (crtc && (lvds & LVDS_PORT_EN)) {
++		mode_dev->panel_fixed_mode =
++		    psb_intel_crtc_mode_get(dev, crtc);
++		if (mode_dev->panel_fixed_mode) {
++			mode_dev->panel_fixed_mode->type |=
++			    DRM_MODE_TYPE_PREFERRED;
++			goto out;	/* FIXME: check for quirks */
++		}
++	}
++
++	/* If we still don't have a mode after all that, give up. */
++	if (!mode_dev->panel_fixed_mode) {
++		dev_err(dev->dev, "Found no modes on the lvds, ignoring the LVDS\n");
++		goto failed_find;
++	}
++
++	/*
++	 * Blacklist machines with BIOSes that list an LVDS panel without
++	 * actually having one.
++	 */
++out:
++	drm_sysfs_connector_add(connector);
++	return;
++
++failed_find:
++	if (lvds_priv->ddc_bus)
++		psb_intel_i2c_destroy(lvds_priv->ddc_bus);
++failed_ddc:
++	if (lvds_priv->i2c_bus)
++		psb_intel_i2c_destroy(lvds_priv->i2c_bus);
++failed_blc_i2c:
++	drm_encoder_cleanup(encoder);
++	drm_connector_cleanup(connector);
++failed_connector:
++	kfree(psb_intel_connector);
++failed_encoder:
++	kfree(psb_intel_encoder);
++}
++
+diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
+new file mode 100644
+index 0000000..4fca0d6
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
+@@ -0,0 +1,75 @@
++/*
++ * Copyright (c) 2007 Intel Corporation
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authers: Jesse Barnes <jesse.barnes at intel.com>
++ */
++
++#include <linux/i2c.h>
++#include <linux/fb.h>
++#include <drm/drmP.h>
++#include "psb_intel_drv.h"
++
++/**
++ * psb_intel_ddc_probe
++ *
++ */
++bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
++{
++	u8 out_buf[] = { 0x0, 0x0 };
++	u8 buf[2];
++	int ret;
++	struct i2c_msg msgs[] = {
++		{
++		 .addr = 0x50,
++		 .flags = 0,
++		 .len = 1,
++		 .buf = out_buf,
++		 },
++		{
++		 .addr = 0x50,
++		 .flags = I2C_M_RD,
++		 .len = 1,
++		 .buf = buf,
++		 }
++	};
++
++	ret = i2c_transfer(adapter, msgs, 2);
++	if (ret == 2)
++		return true;
++
++	return false;
++}
++
++/**
++ * psb_intel_ddc_get_modes - get modelist from monitor
++ * @connector: DRM connector device to use
++ *
++ * Fetch the EDID information from @connector using the DDC bus.
++ */
++int psb_intel_ddc_get_modes(struct drm_connector *connector,
++			    struct i2c_adapter *adapter)
++{
++	struct edid *edid;
++	int ret = 0;
++
++	edid = drm_get_edid(connector, adapter);
++	if (edid) {
++		drm_mode_connector_update_edid_property(connector, edid);
++		ret = drm_add_edid_modes(connector, edid);
++		kfree(edid);
++	}
++	return ret;
++}
+diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
+new file mode 100644
+index 0000000..e89d3a2
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
+@@ -0,0 +1,1318 @@
++/*
++ * Copyright (c) 2009, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++#ifndef __PSB_INTEL_REG_H__
++#define __PSB_INTEL_REG_H__
++
++/*
++ * GPIO regs
++ */
++#define GPIOA			0x5010
++#define GPIOB			0x5014
++#define GPIOC			0x5018
++#define GPIOD			0x501c
++#define GPIOE			0x5020
++#define GPIOF			0x5024
++#define GPIOG			0x5028
++#define GPIOH			0x502c
++# define GPIO_CLOCK_DIR_MASK		(1 << 0)
++# define GPIO_CLOCK_DIR_IN		(0 << 1)
++# define GPIO_CLOCK_DIR_OUT		(1 << 1)
++# define GPIO_CLOCK_VAL_MASK		(1 << 2)
++# define GPIO_CLOCK_VAL_OUT		(1 << 3)
++# define GPIO_CLOCK_VAL_IN		(1 << 4)
++# define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
++# define GPIO_DATA_DIR_MASK		(1 << 8)
++# define GPIO_DATA_DIR_IN		(0 << 9)
++# define GPIO_DATA_DIR_OUT		(1 << 9)
++# define GPIO_DATA_VAL_MASK		(1 << 10)
++# define GPIO_DATA_VAL_OUT		(1 << 11)
++# define GPIO_DATA_VAL_IN		(1 << 12)
++# define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
++
++#define GMBUS0			0x5100 /* clock/port select */
++#define   GMBUS_RATE_100KHZ	(0<<8)
++#define   GMBUS_RATE_50KHZ	(1<<8)
++#define   GMBUS_RATE_400KHZ	(2<<8) /* reserved on Pineview */
++#define   GMBUS_RATE_1MHZ	(3<<8) /* reserved on Pineview */
++#define   GMBUS_HOLD_EXT	(1<<7) /* 300ns hold time, rsvd on Pineview */
++#define   GMBUS_PORT_DISABLED	0
++#define   GMBUS_PORT_SSC	1
++#define   GMBUS_PORT_VGADDC	2
++#define   GMBUS_PORT_PANEL	3
++#define   GMBUS_PORT_DPC	4 /* HDMIC */
++#define   GMBUS_PORT_DPB	5 /* SDVO, HDMIB */
++				  /* 6 reserved */
++#define   GMBUS_PORT_DPD	7 /* HDMID */
++#define   GMBUS_NUM_PORTS       8
++#define GMBUS1			0x5104 /* command/status */
++#define   GMBUS_SW_CLR_INT	(1<<31)
++#define   GMBUS_SW_RDY		(1<<30)
++#define   GMBUS_ENT		(1<<29) /* enable timeout */
++#define   GMBUS_CYCLE_NONE	(0<<25)
++#define   GMBUS_CYCLE_WAIT	(1<<25)
++#define   GMBUS_CYCLE_INDEX	(2<<25)
++#define   GMBUS_CYCLE_STOP	(4<<25)
++#define   GMBUS_BYTE_COUNT_SHIFT 16
++#define   GMBUS_SLAVE_INDEX_SHIFT 8
++#define   GMBUS_SLAVE_ADDR_SHIFT 1
++#define   GMBUS_SLAVE_READ	(1<<0)
++#define   GMBUS_SLAVE_WRITE	(0<<0)
++#define GMBUS2			0x5108 /* status */
++#define   GMBUS_INUSE		(1<<15)
++#define   GMBUS_HW_WAIT_PHASE	(1<<14)
++#define   GMBUS_STALL_TIMEOUT	(1<<13)
++#define   GMBUS_INT		(1<<12)
++#define   GMBUS_HW_RDY		(1<<11)
++#define   GMBUS_SATOER		(1<<10)
++#define   GMBUS_ACTIVE		(1<<9)
++#define GMBUS3			0x510c /* data buffer bytes 3-0 */
++#define GMBUS4			0x5110 /* interrupt mask (Pineview+) */
++#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
++#define   GMBUS_NAK_EN		(1<<3)
++#define   GMBUS_IDLE_EN		(1<<2)
++#define   GMBUS_HW_WAIT_EN	(1<<1)
++#define   GMBUS_HW_RDY_EN	(1<<0)
++#define GMBUS5			0x5120 /* byte index */
++#define   GMBUS_2BYTE_INDEX_EN	(1<<31)
++
++#define BLC_PWM_CTL		0x61254
++#define BLC_PWM_CTL2		0x61250
++#define BLC_PWM_CTL_C		0x62254
++#define BLC_PWM_CTL2_C		0x62250
++#define BACKLIGHT_MODULATION_FREQ_SHIFT		(17)
++/*
++ * This is the most significant 15 bits of the number of backlight cycles in a
++ * complete cycle of the modulated backlight control.
++ *
++ * The actual value is this field multiplied by two.
++ */
++#define BACKLIGHT_MODULATION_FREQ_MASK	(0x7fff << 17)
++#define BLM_LEGACY_MODE			(1 << 16)
++/*
++ * This is the number of cycles out of the backlight modulation cycle for which
++ * the backlight is on.
++ *
++ * This field must be no greater than the number of cycles in the complete
++ * backlight modulation cycle.
++ */
++#define BACKLIGHT_DUTY_CYCLE_SHIFT	(0)
++#define BACKLIGHT_DUTY_CYCLE_MASK	(0xffff)
++
++#define I915_GCFGC			0xf0
++#define I915_LOW_FREQUENCY_ENABLE	(1 << 7)
++#define I915_DISPLAY_CLOCK_190_200_MHZ	(0 << 4)
++#define I915_DISPLAY_CLOCK_333_MHZ	(4 << 4)
++#define I915_DISPLAY_CLOCK_MASK		(7 << 4)
++
++#define I855_HPLLCC			0xc0
++#define I855_CLOCK_CONTROL_MASK		(3 << 0)
++#define I855_CLOCK_133_200		(0 << 0)
++#define I855_CLOCK_100_200		(1 << 0)
++#define I855_CLOCK_100_133		(2 << 0)
++#define I855_CLOCK_166_250		(3 << 0)
++
++/* I830 CRTC registers */
++#define HTOTAL_A		0x60000
++#define HBLANK_A		0x60004
++#define HSYNC_A			0x60008
++#define VTOTAL_A		0x6000c
++#define VBLANK_A		0x60010
++#define VSYNC_A			0x60014
++#define PIPEASRC		0x6001c
++#define BCLRPAT_A		0x60020
++#define VSYNCSHIFT_A		0x60028
++
++#define HTOTAL_B		0x61000
++#define HBLANK_B		0x61004
++#define HSYNC_B			0x61008
++#define VTOTAL_B		0x6100c
++#define VBLANK_B		0x61010
++#define VSYNC_B			0x61014
++#define PIPEBSRC		0x6101c
++#define BCLRPAT_B		0x61020
++#define VSYNCSHIFT_B		0x61028
++
++#define HTOTAL_C		0x62000
++#define HBLANK_C		0x62004
++#define HSYNC_C			0x62008
++#define VTOTAL_C		0x6200c
++#define VBLANK_C		0x62010
++#define VSYNC_C			0x62014
++#define PIPECSRC		0x6201c
++#define BCLRPAT_C		0x62020
++#define VSYNCSHIFT_C		0x62028
++
++#define PP_STATUS		0x61200
++# define PP_ON				(1 << 31)
++/*
++ * Indicates that all dependencies of the panel are on:
++ *
++ * - PLL enabled
++ * - pipe enabled
++ * - LVDS/DVOB/DVOC on
++ */
++#define PP_READY			(1 << 30)
++#define PP_SEQUENCE_NONE		(0 << 28)
++#define PP_SEQUENCE_ON			(1 << 28)
++#define PP_SEQUENCE_OFF			(2 << 28)
++#define PP_SEQUENCE_MASK		0x30000000
++#define PP_CONTROL		0x61204
++#define POWER_TARGET_ON			(1 << 0)
++
++#define LVDSPP_ON		0x61208
++#define LVDSPP_OFF		0x6120c
++#define PP_CYCLE		0x61210
++
++#define PP_ON_DELAYS		0x61208		/* Cedartrail */
++#define PP_OFF_DELAYS		0x6120c		/* Cedartrail */
++
++#define PFIT_CONTROL		0x61230
++#define PFIT_ENABLE			(1 << 31)
++#define PFIT_PIPE_MASK			(3 << 29)
++#define PFIT_PIPE_SHIFT			29
++#define PFIT_SCALING_MODE_PILLARBOX	(1 << 27)
++#define PFIT_SCALING_MODE_LETTERBOX	(3 << 26)
++#define VERT_INTERP_DISABLE		(0 << 10)
++#define VERT_INTERP_BILINEAR		(1 << 10)
++#define VERT_INTERP_MASK		(3 << 10)
++#define VERT_AUTO_SCALE			(1 << 9)
++#define HORIZ_INTERP_DISABLE		(0 << 6)
++#define HORIZ_INTERP_BILINEAR		(1 << 6)
++#define HORIZ_INTERP_MASK		(3 << 6)
++#define HORIZ_AUTO_SCALE		(1 << 5)
++#define PANEL_8TO6_DITHER_ENABLE	(1 << 3)
++
++#define PFIT_PGM_RATIOS		0x61234
++#define PFIT_VERT_SCALE_MASK			0xfff00000
++#define PFIT_HORIZ_SCALE_MASK			0x0000fff0
++
++#define PFIT_AUTO_RATIOS	0x61238
++
++#define DPLL_A			0x06014
++#define DPLL_B			0x06018
++#define DPLL_VCO_ENABLE			(1 << 31)
++#define DPLL_DVO_HIGH_SPEED		(1 << 30)
++#define DPLL_SYNCLOCK_ENABLE		(1 << 29)
++#define DPLL_VGA_MODE_DIS		(1 << 28)
++#define DPLLB_MODE_DAC_SERIAL		(1 << 26)	/* i915 */
++#define DPLLB_MODE_LVDS			(2 << 26)	/* i915 */
++#define DPLL_MODE_MASK			(3 << 26)
++#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10	(0 << 24)	/* i915 */
++#define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5	(1 << 24)	/* i915 */
++#define DPLLB_LVDS_P2_CLOCK_DIV_14	(0 << 24)	/* i915 */
++#define DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24)	/* i915 */
++#define DPLL_P2_CLOCK_DIV_MASK		0x03000000	/* i915 */
++#define DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000	/* i915 */
++#define DPLL_LOCK			(1 << 15)	/* CDV */
++
++/*
++ *  The i830 generation, in DAC/serial mode, defines p1 as two plus this
++ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
++ */
++# define DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
++/*
++ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
++ * this field (only one bit may be set).
++ */
++#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
++#define DPLL_FPA01_P1_POST_DIV_SHIFT	16
++#define PLL_P2_DIVIDE_BY_4		(1 << 23)	/* i830, required
++							 * in DVO non-gang */
++# define PLL_P1_DIVIDE_BY_TWO		(1 << 21)	/* i830 */
++#define PLL_REF_INPUT_DREFCLK		(0 << 13)
++#define PLL_REF_INPUT_TVCLKINA		(1 << 13)	/* i830 */
++#define PLL_REF_INPUT_TVCLKINBC		(2 << 13)	/* SDVO
++								 * TVCLKIN */
++#define PLLB_REF_INPUT_SPREADSPECTRUMIN	(3 << 13)
++#define PLL_REF_INPUT_MASK		(3 << 13)
++#define PLL_LOAD_PULSE_PHASE_SHIFT	9
++/*
++ * Parallel to Serial Load Pulse phase selection.
++ * Selects the phase for the 10X DPLL clock for the PCIe
++ * digital display port. The range is 4 to 13; 10 or more
++ * is just a flip delay. The default is 6
++ */
++#define PLL_LOAD_PULSE_PHASE_MASK	(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
++#define DISPLAY_RATE_SELECT_FPA1	(1 << 8)
++
++/*
++ * SDVO multiplier for 945G/GM. Not used on 965.
++ *
++ * DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++#define SDVO_MULTIPLIER_MASK		0x000000ff
++#define SDVO_MULTIPLIER_SHIFT_HIRES	4
++#define SDVO_MULTIPLIER_SHIFT_VGA	0
++
++/*
++ * PLL_MD
++ */
++/* Pipe A SDVO/UDI clock multiplier/divider register for G965. */
++#define DPLL_A_MD		0x0601c
++/* Pipe B SDVO/UDI clock multiplier/divider register for G965. */
++#define DPLL_B_MD		0x06020
++/*
++ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
++ *
++ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
++ */
++#define DPLL_MD_UDI_DIVIDER_MASK	0x3f000000
++#define DPLL_MD_UDI_DIVIDER_SHIFT	24
++/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
++#define DPLL_MD_VGA_UDI_DIVIDER_MASK	0x003f0000
++#define DPLL_MD_VGA_UDI_DIVIDER_SHIFT	16
++/*
++ * SDVO/UDI pixel multiplier.
++ *
++ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
++ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
++ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
++ * dummy bytes in the datastream at an increased clock rate, with both sides of
++ * the link knowing how many bytes are fill.
++ *
++ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
++ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
++ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
++ * through an SDVO command.
++ *
++ * This register field has values of multiplication factor minus 1, with
++ * a maximum multiplier of 5 for SDVO.
++ */
++#define DPLL_MD_UDI_MULTIPLIER_MASK	0x00003f00
++#define DPLL_MD_UDI_MULTIPLIER_SHIFT	8
++/*
++ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
++ * This best be set to the default value (3) or the CRT won't work. No,
++ * I don't entirely understand what this does...
++ */
++#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
++#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
++
++#define DPLL_TEST		0x606c
++#define DPLLB_TEST_SDVO_DIV_1		(0 << 22)
++#define DPLLB_TEST_SDVO_DIV_2		(1 << 22)
++#define DPLLB_TEST_SDVO_DIV_4		(2 << 22)
++#define DPLLB_TEST_SDVO_DIV_MASK	(3 << 22)
++#define DPLLB_TEST_N_BYPASS		(1 << 19)
++#define DPLLB_TEST_M_BYPASS		(1 << 18)
++#define DPLLB_INPUT_BUFFER_ENABLE	(1 << 16)
++#define DPLLA_TEST_N_BYPASS		(1 << 3)
++#define DPLLA_TEST_M_BYPASS		(1 << 2)
++#define DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
++
++#define ADPA			0x61100
++#define ADPA_DAC_ENABLE			(1 << 31)
++#define ADPA_DAC_DISABLE		0
++#define ADPA_PIPE_SELECT_MASK		(1 << 30)
++#define ADPA_PIPE_A_SELECT		0
++#define ADPA_PIPE_B_SELECT		(1 << 30)
++#define ADPA_USE_VGA_HVPOLARITY		(1 << 15)
++#define ADPA_SETS_HVPOLARITY		0
++#define ADPA_VSYNC_CNTL_DISABLE		(1 << 11)
++#define ADPA_VSYNC_CNTL_ENABLE		0
++#define ADPA_HSYNC_CNTL_DISABLE		(1 << 10)
++#define ADPA_HSYNC_CNTL_ENABLE		0
++#define ADPA_VSYNC_ACTIVE_HIGH		(1 << 4)
++#define ADPA_VSYNC_ACTIVE_LOW		0
++#define ADPA_HSYNC_ACTIVE_HIGH		(1 << 3)
++#define ADPA_HSYNC_ACTIVE_LOW		0
++
++#define FPA0			0x06040
++#define FPA1			0x06044
++#define FPB0			0x06048
++#define FPB1			0x0604c
++#define FP_N_DIV_MASK			0x003f0000
++#define FP_N_DIV_SHIFT			16
++#define FP_M1_DIV_MASK			0x00003f00
++#define FP_M1_DIV_SHIFT			8
++#define FP_M2_DIV_MASK			0x0000003f
++#define FP_M2_DIV_SHIFT			0
++
++#define PORT_HOTPLUG_EN		0x61110
++#define SDVOB_HOTPLUG_INT_EN		(1 << 26)
++#define SDVOC_HOTPLUG_INT_EN		(1 << 25)
++#define TV_HOTPLUG_INT_EN		(1 << 18)
++#define CRT_HOTPLUG_INT_EN		(1 << 9)
++#define CRT_HOTPLUG_FORCE_DETECT	(1 << 3)
++/* CDV.. */
++#define CRT_HOTPLUG_ACTIVATION_PERIOD_64	(1 << 8)
++#define CRT_HOTPLUG_DAC_ON_TIME_2M		(0 << 7)
++#define CRT_HOTPLUG_DAC_ON_TIME_4M		(1 << 7)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_40		(0 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_50		(1 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_60		(2 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_70		(3 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK	(3 << 5)
++#define CRT_HOTPLUG_DETECT_DELAY_1G		(0 << 4)
++#define CRT_HOTPLUG_DETECT_DELAY_2G		(1 << 4)
++#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV	(0 << 2)
++#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV	(1 << 2)
++#define CRT_HOTPLUG_DETECT_MASK			0x000000F8
++
++#define PORT_HOTPLUG_STAT	0x61114
++#define CRT_HOTPLUG_INT_STATUS		(1 << 11)
++#define TV_HOTPLUG_INT_STATUS		(1 << 10)
++#define CRT_HOTPLUG_MONITOR_MASK	(3 << 8)
++#define CRT_HOTPLUG_MONITOR_COLOR	(3 << 8)
++#define CRT_HOTPLUG_MONITOR_MONO	(2 << 8)
++#define CRT_HOTPLUG_MONITOR_NONE	(0 << 8)
++#define SDVOC_HOTPLUG_INT_STATUS	(1 << 7)
++#define SDVOB_HOTPLUG_INT_STATUS	(1 << 6)
++
++#define SDVOB			0x61140
++#define SDVOC			0x61160
++#define SDVO_ENABLE			(1 << 31)
++#define SDVO_PIPE_B_SELECT		(1 << 30)
++#define SDVO_STALL_SELECT		(1 << 29)
++#define SDVO_INTERRUPT_ENABLE		(1 << 26)
++#define SDVO_COLOR_RANGE_16_235		(1 << 8)
++#define SDVO_AUDIO_ENABLE		(1 << 6)
++
++/**
++ * 915G/GM SDVO pixel multiplier.
++ *
++ * Programmed value is multiplier - 1, up to 5x.
++ *
++ * DPLL_MD_UDI_MULTIPLIER_MASK
++ */
++#define SDVO_PORT_MULTIPLY_MASK		(7 << 23)
++#define SDVO_PORT_MULTIPLY_SHIFT	23
++#define SDVO_PHASE_SELECT_MASK		(15 << 19)
++#define SDVO_PHASE_SELECT_DEFAULT	(6 << 19)
++#define SDVO_CLOCK_OUTPUT_INVERT	(1 << 18)
++#define SDVOC_GANG_MODE			(1 << 16)
++#define SDVO_BORDER_ENABLE		(1 << 7)
++#define SDVOB_PCIE_CONCURRENCY		(1 << 3)
++#define SDVO_DETECTED			(1 << 2)
++/* Bits to be preserved when writing */
++#define SDVOB_PRESERVE_MASK		((1 << 17) | (1 << 16) | (1 << 14))
++#define SDVOC_PRESERVE_MASK		(1 << 17)
++
++/*
++ * This register controls the LVDS output enable, pipe selection, and data
++ * format selection.
++ *
++ * All of the clock/data pairs are force powered down by power sequencing.
++ */
++#define LVDS			0x61180
++/*
++ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
++ * the DPLL semantics change when the LVDS is assigned to that pipe.
++ */
++#define LVDS_PORT_EN			(1 << 31)
++/* Selects pipe B for LVDS data.  Must be set on pre-965. */
++#define LVDS_PIPEB_SELECT		(1 << 30)
++
++/* Turns on border drawing to allow centered display. */
++#define LVDS_BORDER_EN			(1 << 15)
++
++/*
++ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
++ * pixel.
++ */
++#define LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
++#define LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
++#define LVDS_A0A2_CLKA_POWER_UP		(3 << 8)
++/*
++ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
++ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
++ * on.
++ */
++#define LVDS_A3_POWER_MASK		(3 << 6)
++#define LVDS_A3_POWER_DOWN		(0 << 6)
++#define LVDS_A3_POWER_UP		(3 << 6)
++/*
++ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
++ * is set.
++ */
++#define LVDS_CLKB_POWER_MASK		(3 << 4)
++#define LVDS_CLKB_POWER_DOWN		(0 << 4)
++#define LVDS_CLKB_POWER_UP		(3 << 4)
++/*
++ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
++ * setting for whether we are in dual-channel mode.  The B3 pair will
++ * additionally only be powered up when LVDS_A3_POWER_UP is set.
++ */
++#define LVDS_B0B3_POWER_MASK		(3 << 2)
++#define LVDS_B0B3_POWER_DOWN		(0 << 2)
++#define LVDS_B0B3_POWER_UP		(3 << 2)
++
++#define PIPEACONF		0x70008
++#define PIPEACONF_ENABLE		(1 << 31)
++#define PIPEACONF_DISABLE		0
++#define PIPEACONF_DOUBLE_WIDE		(1 << 30)
++#define PIPECONF_ACTIVE			(1 << 30)
++#define I965_PIPECONF_ACTIVE		(1 << 30)
++#define PIPECONF_DSIPLL_LOCK		(1 << 29)
++#define PIPEACONF_SINGLE_WIDE		0
++#define PIPEACONF_PIPE_UNLOCKED		0
++#define PIPEACONF_DSR			(1 << 26)
++#define PIPEACONF_PIPE_LOCKED		(1 << 25)
++#define PIPEACONF_PALETTE		0
++#define PIPECONF_FORCE_BORDER		(1 << 25)
++#define PIPEACONF_GAMMA			(1 << 24)
++#define PIPECONF_PROGRESSIVE		(0 << 21)
++#define PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
++#define PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
++#define PIPECONF_PLANE_OFF		(1 << 19)
++#define PIPECONF_CURSOR_OFF		(1 << 18)
++
++#define PIPEBCONF		0x71008
++#define PIPEBCONF_ENABLE		(1 << 31)
++#define PIPEBCONF_DISABLE		0
++#define PIPEBCONF_DOUBLE_WIDE		(1 << 30)
++#define PIPEBCONF_DISABLE		0
++#define PIPEBCONF_GAMMA			(1 << 24)
++#define PIPEBCONF_PALETTE		0
++
++#define PIPECCONF		0x72008
++
++#define PIPEBGCMAXRED		0x71010
++#define PIPEBGCMAXGREEN		0x71014
++#define PIPEBGCMAXBLUE		0x71018
++
++#define PIPEASTAT		0x70024
++#define PIPEBSTAT		0x71024
++#define PIPECSTAT		0x72024
++#define PIPE_VBLANK_INTERRUPT_STATUS		(1UL << 1)
++#define PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL << 2)
++#define PIPE_VBLANK_CLEAR			(1 << 1)
++#define PIPE_VBLANK_STATUS			(1 << 1)
++#define PIPE_TE_STATUS				(1UL << 6)
++#define PIPE_DPST_EVENT_STATUS			(1UL << 7)
++#define PIPE_VSYNC_CLEAR			(1UL << 9)
++#define PIPE_VSYNC_STATUS			(1UL << 9)
++#define PIPE_HDMI_AUDIO_UNDERRUN_STATUS		(1UL << 10)
++#define PIPE_HDMI_AUDIO_BUFFER_DONE_STATUS	(1UL << 11)
++#define PIPE_VBLANK_INTERRUPT_ENABLE		(1UL << 17)
++#define PIPE_START_VBLANK_INTERRUPT_ENABLE	(1UL << 18)
++#define PIPE_TE_ENABLE				(1UL << 22)
++#define PIPE_DPST_EVENT_ENABLE			(1UL << 23)
++#define PIPE_VSYNC_ENABL			(1UL << 25)
++#define PIPE_HDMI_AUDIO_UNDERRUN		(1UL << 26)
++#define PIPE_HDMI_AUDIO_BUFFER_DONE		(1UL << 27)
++#define PIPE_HDMI_AUDIO_INT_MASK		(PIPE_HDMI_AUDIO_UNDERRUN | \
++						PIPE_HDMI_AUDIO_BUFFER_DONE)
++#define PIPE_EVENT_MASK ((1 << 29)|(1 << 28)|(1 << 27)|(1 << 26)|(1 << 24)|(1 << 23)|(1 << 22)|(1 << 21)|(1 << 20)|(1 << 16))
++#define PIPE_VBLANK_MASK ((1 << 25)|(1 << 24)|(1 << 18)|(1 << 17))
++#define HISTOGRAM_INT_CONTROL		0x61268
++#define HISTOGRAM_BIN_DATA		0X61264
++#define HISTOGRAM_LOGIC_CONTROL		0x61260
++#define PWM_CONTROL_LOGIC		0x61250
++#define PIPE_HOTPLUG_INTERRUPT_STATUS		(1UL << 10)
++#define HISTOGRAM_INTERRUPT_ENABLE		(1UL << 31)
++#define HISTOGRAM_LOGIC_ENABLE			(1UL << 31)
++#define PWM_LOGIC_ENABLE			(1UL << 31)
++#define PWM_PHASEIN_ENABLE			(1UL << 25)
++#define PWM_PHASEIN_INT_ENABLE			(1UL << 24)
++#define PWM_PHASEIN_VB_COUNT			0x00001f00
++#define PWM_PHASEIN_INC				0x0000001f
++#define HISTOGRAM_INT_CTRL_CLEAR		(1UL << 30)
++#define DPST_YUV_LUMA_MODE			0
++
++struct dpst_ie_histogram_control {
++	union {
++		uint32_t data;
++		struct {
++			uint32_t bin_reg_index:7;
++			uint32_t reserved:4;
++			uint32_t bin_reg_func_select:1;
++			uint32_t sync_to_phase_in:1;
++			uint32_t alt_enhancement_mode:2;
++			uint32_t reserved1:1;
++			uint32_t sync_to_phase_in_count:8;
++			uint32_t histogram_mode_select:1;
++			uint32_t reserved2:4;
++			uint32_t ie_pipe_assignment:1;
++			uint32_t ie_mode_table_enabled:1;
++			uint32_t ie_histogram_enable:1;
++		};
++	};
++};
++
++struct dpst_guardband {
++	union {
++		uint32_t data;
++		struct {
++			uint32_t guardband:22;
++			uint32_t guardband_interrupt_delay:8;
++			uint32_t interrupt_status:1;
++			uint32_t interrupt_enable:1;
++		};
++	};
++};
++
++#define PIPEAFRAMEHIGH		0x70040
++#define PIPEAFRAMEPIXEL		0x70044
++#define PIPEBFRAMEHIGH		0x71040
++#define PIPEBFRAMEPIXEL		0x71044
++#define PIPECFRAMEHIGH		0x72040
++#define PIPECFRAMEPIXEL		0x72044
++#define PIPE_FRAME_HIGH_MASK	0x0000ffff
++#define PIPE_FRAME_HIGH_SHIFT	0
++#define PIPE_FRAME_LOW_MASK	0xff000000
++#define PIPE_FRAME_LOW_SHIFT	24
++#define PIPE_PIXEL_MASK		0x00ffffff
++#define PIPE_PIXEL_SHIFT	0
++
++#define DSPARB			0x70030
++#define DSPFW1			0x70034
++#define DSPFW2			0x70038
++#define DSPFW3			0x7003c
++#define DSPFW4			0x70050
++#define DSPFW5			0x70054
++#define DSPFW6			0x70058
++#define DSPCHICKENBIT		0x70400
++#define DSPACNTR		0x70180
++#define DSPBCNTR		0x71180
++#define DSPCCNTR		0x72180
++#define DISPLAY_PLANE_ENABLE			(1 << 31)
++#define DISPLAY_PLANE_DISABLE			0
++#define DISPPLANE_GAMMA_ENABLE			(1 << 30)
++#define DISPPLANE_GAMMA_DISABLE			0
++#define DISPPLANE_PIXFORMAT_MASK		(0xf << 26)
++#define DISPPLANE_8BPP				(0x2 << 26)
++#define DISPPLANE_15_16BPP			(0x4 << 26)
++#define DISPPLANE_16BPP				(0x5 << 26)
++#define DISPPLANE_32BPP_NO_ALPHA		(0x6 << 26)
++#define DISPPLANE_32BPP				(0x7 << 26)
++#define DISPPLANE_STEREO_ENABLE			(1 << 25)
++#define DISPPLANE_STEREO_DISABLE		0
++#define DISPPLANE_SEL_PIPE_MASK			(1 << 24)
++#define DISPPLANE_SEL_PIPE_POS			24
++#define DISPPLANE_SEL_PIPE_A			0
++#define DISPPLANE_SEL_PIPE_B			(1 << 24)
++#define DISPPLANE_SRC_KEY_ENABLE		(1 << 22)
++#define DISPPLANE_SRC_KEY_DISABLE		0
++#define DISPPLANE_LINE_DOUBLE			(1 << 20)
++#define DISPPLANE_NO_LINE_DOUBLE		0
++#define DISPPLANE_STEREO_POLARITY_FIRST		0
++#define DISPPLANE_STEREO_POLARITY_SECOND	(1 << 18)
++/* plane B only */
++#define DISPPLANE_ALPHA_TRANS_ENABLE		(1 << 15)
++#define DISPPLANE_ALPHA_TRANS_DISABLE		0
++#define DISPPLANE_SPRITE_ABOVE_DISPLAYA		0
++#define DISPPLANE_SPRITE_ABOVE_OVERLAY		(1)
++#define DISPPLANE_BOTTOM			(4)
++
++#define DSPABASE		0x70184
++#define DSPALINOFF		0x70184
++#define DSPASTRIDE		0x70188
++
++#define DSPBBASE		0x71184
++#define DSPBLINOFF		0X71184
++#define DSPBADDR		DSPBBASE
++#define DSPBSTRIDE		0x71188
++
++#define DSPCBASE		0x72184
++#define DSPCLINOFF		0x72184
++#define DSPCSTRIDE		0x72188
++
++#define DSPAKEYVAL		0x70194
++#define DSPAKEYMASK		0x70198
++
++#define DSPAPOS			0x7018C	/* reserved */
++#define DSPASIZE		0x70190
++#define DSPBPOS			0x7118C
++#define DSPBSIZE		0x71190
++#define DSPCPOS			0x7218C
++#define DSPCSIZE		0x72190
++
++#define DSPASURF		0x7019C
++#define DSPATILEOFF		0x701A4
++
++#define DSPBSURF		0x7119C
++#define DSPBTILEOFF		0x711A4
++
++#define DSPCSURF		0x7219C
++#define DSPCTILEOFF		0x721A4
++#define DSPCKEYMAXVAL		0x721A0
++#define DSPCKEYMINVAL		0x72194
++#define DSPCKEYMSK		0x72198
++
++#define VGACNTRL		0x71400
++#define VGA_DISP_DISABLE		(1 << 31)
++#define VGA_2X_MODE			(1 << 30)
++#define VGA_PIPE_B_SELECT		(1 << 29)
++
++/*
++ * Overlay registers
++ */
++#define OV_C_OFFSET		0x08000
++#define OV_OVADD		0x30000
++#define OV_DOVASTA		0x30008
++# define OV_PIPE_SELECT			((1 << 6)|(1 << 7))
++# define OV_PIPE_SELECT_POS		6
++# define OV_PIPE_A			0
++# define OV_PIPE_C			1
++#define OV_OGAMC5		0x30010
++#define OV_OGAMC4		0x30014
++#define OV_OGAMC3		0x30018
++#define OV_OGAMC2		0x3001C
++#define OV_OGAMC1		0x30020
++#define OV_OGAMC0		0x30024
++#define OVC_OVADD		0x38000
++#define OVC_DOVCSTA		0x38008
++#define OVC_OGAMC5		0x38010
++#define OVC_OGAMC4		0x38014
++#define OVC_OGAMC3		0x38018
++#define OVC_OGAMC2		0x3801C
++#define OVC_OGAMC1		0x38020
++#define OVC_OGAMC0		0x38024
++
++/*
++ * Some BIOS scratch area registers.  The 845 (and 830?) store the amount
++ * of video memory available to the BIOS in SWF1.
++ */
++#define SWF0			0x71410
++#define SWF1			0x71414
++#define SWF2			0x71418
++#define SWF3			0x7141c
++#define SWF4			0x71420
++#define SWF5			0x71424
++#define SWF6			0x71428
++
++/*
++ * 855 scratch registers.
++ */
++#define SWF00			0x70410
++#define SWF01			0x70414
++#define SWF02			0x70418
++#define SWF03			0x7041c
++#define SWF04			0x70420
++#define SWF05			0x70424
++#define SWF06			0x70428
++
++#define SWF10			SWF0
++#define SWF11			SWF1
++#define SWF12			SWF2
++#define SWF13			SWF3
++#define SWF14			SWF4
++#define SWF15			SWF5
++#define SWF16			SWF6
++
++#define SWF30			0x72414
++#define SWF31			0x72418
++#define SWF32			0x7241c
++
++
++/*
++ * Palette registers
++ */
++#define PALETTE_A		0x0a000
++#define PALETTE_B		0x0a800
++#define PALETTE_C		0x0ac00
++
++/* Cursor A & B regs */
++#define CURACNTR		0x70080
++#define CURSOR_MODE_DISABLE		0x00
++#define CURSOR_MODE_64_32B_AX		0x07
++#define CURSOR_MODE_64_ARGB_AX		((1 << 5) | CURSOR_MODE_64_32B_AX)
++#define MCURSOR_GAMMA_ENABLE		(1 << 26)
++#define CURABASE		0x70084
++#define CURAPOS			0x70088
++#define CURSOR_POS_MASK			0x007FF
++#define CURSOR_POS_SIGN			0x8000
++#define CURSOR_X_SHIFT			0
++#define CURSOR_Y_SHIFT			16
++#define CURBCNTR		0x700c0
++#define CURBBASE		0x700c4
++#define CURBPOS			0x700c8
++#define CURCCNTR		0x700e0
++#define CURCBASE		0x700e4
++#define CURCPOS			0x700e8
++
++/*
++ * Interrupt Registers
++ */
++#define IER			0x020a0
++#define IIR			0x020a4
++#define IMR			0x020a8
++#define ISR			0x020ac
++
++/*
++ * MOORESTOWN delta registers
++ */
++#define MRST_DPLL_A		0x0f014
++#define MDFLD_DPLL_B		0x0f018
++#define MDFLD_INPUT_REF_SEL		(1 << 14)
++#define MDFLD_VCO_SEL			(1 << 16)
++#define DPLLA_MODE_LVDS			(2 << 26)	/* mrst */
++#define MDFLD_PLL_LATCHEN		(1 << 28)
++#define MDFLD_PWR_GATE_EN		(1 << 30)
++#define MDFLD_P1_MASK			(0x1FF << 17)
++#define MRST_FPA0		0x0f040
++#define MRST_FPA1		0x0f044
++#define MDFLD_DPLL_DIV0		0x0f048
++#define MDFLD_DPLL_DIV1		0x0f04c
++#define MRST_PERF_MODE		0x020f4
++
++/*
++ * MEDFIELD HDMI registers
++ */
++#define HDMIPHYMISCCTL		0x61134
++#define HDMI_PHY_POWER_DOWN		0x7f
++#define HDMIB_CONTROL		0x61140
++#define HDMIB_PORT_EN			(1 << 31)
++#define HDMIB_PIPE_B_SELECT		(1 << 30)
++#define HDMIB_NULL_PACKET		(1 << 9)
++#define HDMIB_HDCP_PORT			(1 << 5)
++
++/* #define LVDS			0x61180 */
++#define MRST_PANEL_8TO6_DITHER_ENABLE	(1 << 25)
++#define MRST_PANEL_24_DOT_1_FORMAT	(1 << 24)
++#define LVDS_A3_POWER_UP_0_OUTPUT	(1 << 6)
++
++#define MIPI			0x61190
++#define MIPI_C			0x62190
++#define MIPI_PORT_EN			(1 << 31)
++/* Turns on border drawing to allow centered display. */
++#define SEL_FLOPPED_HSTX		(1 << 23)
++#define PASS_FROM_SPHY_TO_AFE		(1 << 16)
++#define MIPI_BORDER_EN			(1 << 15)
++#define MIPIA_3LANE_MIPIC_1LANE		0x1
++#define MIPIA_2LANE_MIPIC_2LANE		0x2
++#define TE_TRIGGER_DSI_PROTOCOL		(1 << 2)
++#define TE_TRIGGER_GPIO_PIN		(1 << 3)
++#define MIPI_TE_COUNT		0x61194
++
++/* #define PP_CONTROL	0x61204 */
++#define POWER_DOWN_ON_RESET		(1 << 1)
++
++/* #define PFIT_CONTROL	0x61230 */
++#define PFIT_PIPE_SELECT		(3 << 29)
++#define PFIT_PIPE_SELECT_SHIFT		(29)
++
++/* #define BLC_PWM_CTL		0x61254 */
++#define MRST_BACKLIGHT_MODULATION_FREQ_SHIFT	(16)
++#define MRST_BACKLIGHT_MODULATION_FREQ_MASK	(0xffff << 16)
++
++/* #define PIPEACONF 0x70008 */
++#define PIPEACONF_PIPE_STATE		(1 << 30)
++/* #define DSPACNTR		0x70180 */
++
++#define MRST_DSPABASE		0x7019c
++#define MRST_DSPBBASE		0x7119c
++#define MDFLD_DSPCBASE		0x7219c
++
++/*
++ * Moorestown registers.
++ */
++
++/*
++ *	MIPI IP registers
++ */
++#define MIPIC_REG_OFFSET		0x800
++
++#define DEVICE_READY_REG		0xb000
++#define LP_OUTPUT_HOLD				(1 << 16)
++#define EXIT_ULPS_DEV_READY			0x3
++#define LP_OUTPUT_HOLD_RELEASE			0x810000
++# define ENTERING_ULPS				(2 << 1)
++# define EXITING_ULPS				(1 << 1)
++# define ULPS_MASK				(3 << 1)
++# define BUS_POSSESSION				(1 << 3)
++#define INTR_STAT_REG			0xb004
++#define RX_SOT_ERROR				(1 << 0)
++#define RX_SOT_SYNC_ERROR			(1 << 1)
++#define RX_ESCAPE_MODE_ENTRY_ERROR		(1 << 3)
++#define RX_LP_TX_SYNC_ERROR			(1 << 4)
++#define RX_HS_RECEIVE_TIMEOUT_ERROR		(1 << 5)
++#define RX_FALSE_CONTROL_ERROR			(1 << 6)
++#define RX_ECC_SINGLE_BIT_ERROR			(1 << 7)
++#define RX_ECC_MULTI_BIT_ERROR			(1 << 8)
++#define RX_CHECKSUM_ERROR			(1 << 9)
++#define RX_DSI_DATA_TYPE_NOT_RECOGNIZED		(1 << 10)
++#define RX_DSI_VC_ID_INVALID			(1 << 11)
++#define TX_FALSE_CONTROL_ERROR			(1 << 12)
++#define TX_ECC_SINGLE_BIT_ERROR			(1 << 13)
++#define TX_ECC_MULTI_BIT_ERROR			(1 << 14)
++#define TX_CHECKSUM_ERROR			(1 << 15)
++#define TX_DSI_DATA_TYPE_NOT_RECOGNIZED		(1 << 16)
++#define TX_DSI_VC_ID_INVALID			(1 << 17)
++#define HIGH_CONTENTION				(1 << 18)
++#define LOW_CONTENTION				(1 << 19)
++#define DPI_FIFO_UNDER_RUN			(1 << 20)
++#define HS_TX_TIMEOUT				(1 << 21)
++#define LP_RX_TIMEOUT				(1 << 22)
++#define TURN_AROUND_ACK_TIMEOUT			(1 << 23)
++#define ACK_WITH_NO_ERROR			(1 << 24)
++#define HS_GENERIC_WR_FIFO_FULL			(1 << 27)
++#define LP_GENERIC_WR_FIFO_FULL			(1 << 28)
++#define SPL_PKT_SENT				(1 << 30)
++#define INTR_EN_REG			0xb008
++#define DSI_FUNC_PRG_REG		0xb00c
++#define DPI_CHANNEL_NUMBER_POS			0x03
++#define DBI_CHANNEL_NUMBER_POS			0x05
++#define FMT_DPI_POS				0x07
++#define FMT_DBI_POS				0x0A
++#define DBI_DATA_WIDTH_POS			0x0D
++
++/* DPI PIXEL FORMATS */
++#define RGB_565_FMT				0x01	/* RGB 565 FORMAT */
++#define RGB_666_FMT				0x02	/* RGB 666 FORMAT */
++#define LRGB_666_FMT				0x03	/* RGB LOOSELY PACKED
++							 * 666 FORMAT
++							 */
++#define RGB_888_FMT				0x04	/* RGB 888 FORMAT */
++#define VIRTUAL_CHANNEL_NUMBER_0		0x00	/* Virtual channel 0 */
++#define VIRTUAL_CHANNEL_NUMBER_1		0x01	/* Virtual channel 1 */
++#define VIRTUAL_CHANNEL_NUMBER_2		0x02	/* Virtual channel 2 */
++#define VIRTUAL_CHANNEL_NUMBER_3		0x03	/* Virtual channel 3 */
++
++#define DBI_NOT_SUPPORTED			0x00	/* command mode
++							 * is not supported
++							 */
++#define DBI_DATA_WIDTH_16BIT			0x01	/* 16 bit data */
++#define DBI_DATA_WIDTH_9BIT			0x02	/* 9 bit data */
++#define DBI_DATA_WIDTH_8BIT			0x03	/* 8 bit data */
++#define DBI_DATA_WIDTH_OPT1			0x04	/* option 1 */
++#define DBI_DATA_WIDTH_OPT2			0x05	/* option 2 */
++
++#define HS_TX_TIMEOUT_REG		0xb010
++#define LP_RX_TIMEOUT_REG		0xb014
++#define TURN_AROUND_TIMEOUT_REG		0xb018
++#define DEVICE_RESET_REG		0xb01C
++#define DPI_RESOLUTION_REG		0xb020
++#define RES_V_POS				0x10
++#define DBI_RESOLUTION_REG		0xb024 /* Reserved for MDFLD */
++#define HORIZ_SYNC_PAD_COUNT_REG	0xb028
++#define HORIZ_BACK_PORCH_COUNT_REG	0xb02C
++#define HORIZ_FRONT_PORCH_COUNT_REG	0xb030
++#define HORIZ_ACTIVE_AREA_COUNT_REG	0xb034
++#define VERT_SYNC_PAD_COUNT_REG		0xb038
++#define VERT_BACK_PORCH_COUNT_REG	0xb03c
++#define VERT_FRONT_PORCH_COUNT_REG	0xb040
++#define HIGH_LOW_SWITCH_COUNT_REG	0xb044
++#define DPI_CONTROL_REG			0xb048
++#define DPI_SHUT_DOWN				(1 << 0)
++#define DPI_TURN_ON				(1 << 1)
++#define DPI_COLOR_MODE_ON			(1 << 2)
++#define DPI_COLOR_MODE_OFF			(1 << 3)
++#define DPI_BACK_LIGHT_ON			(1 << 4)
++#define DPI_BACK_LIGHT_OFF			(1 << 5)
++#define DPI_LP					(1 << 6)
++#define DPI_DATA_REG			0xb04c
++#define DPI_BACK_LIGHT_ON_DATA			0x07
++#define DPI_BACK_LIGHT_OFF_DATA			0x17
++#define INIT_COUNT_REG			0xb050
++#define MAX_RET_PAK_REG			0xb054
++#define VIDEO_FMT_REG			0xb058
++#define COMPLETE_LAST_PCKT			(1 << 2)
++#define EOT_DISABLE_REG			0xb05c
++#define ENABLE_CLOCK_STOPPING			(1 << 1)
++#define LP_BYTECLK_REG			0xb060
++#define LP_GEN_DATA_REG			0xb064
++#define HS_GEN_DATA_REG			0xb068
++#define LP_GEN_CTRL_REG			0xb06C
++#define HS_GEN_CTRL_REG			0xb070
++#define DCS_CHANNEL_NUMBER_POS		0x6
++#define MCS_COMMANDS_POS		0x8
++#define WORD_COUNTS_POS			0x8
++#define MCS_PARAMETER_POS			0x10
++#define GEN_FIFO_STAT_REG		0xb074
++#define HS_DATA_FIFO_FULL			(1 << 0)
++#define HS_DATA_FIFO_HALF_EMPTY			(1 << 1)
++#define HS_DATA_FIFO_EMPTY			(1 << 2)
++#define LP_DATA_FIFO_FULL			(1 << 8)
++#define LP_DATA_FIFO_HALF_EMPTY			(1 << 9)
++#define LP_DATA_FIFO_EMPTY			(1 << 10)
++#define HS_CTRL_FIFO_FULL			(1 << 16)
++#define HS_CTRL_FIFO_HALF_EMPTY			(1 << 17)
++#define HS_CTRL_FIFO_EMPTY			(1 << 18)
++#define LP_CTRL_FIFO_FULL			(1 << 24)
++#define LP_CTRL_FIFO_HALF_EMPTY			(1 << 25)
++#define LP_CTRL_FIFO_EMPTY			(1 << 26)
++#define DBI_FIFO_EMPTY				(1 << 27)
++#define DPI_FIFO_EMPTY				(1 << 28)
++#define HS_LS_DBI_ENABLE_REG		0xb078
++#define TXCLKESC_REG			0xb07c
++#define DPHY_PARAM_REG			0xb080
++#define DBI_BW_CTRL_REG			0xb084
++#define CLK_LANE_SWT_REG		0xb088
++
++/*
++ * MIPI Adapter registers
++ */
++#define MIPI_CONTROL_REG		0xb104
++#define MIPI_2X_CLOCK_BITS			((1 << 0) | (1 << 1))
++#define MIPI_DATA_ADDRESS_REG		0xb108
++#define MIPI_DATA_LENGTH_REG		0xb10C
++#define MIPI_COMMAND_ADDRESS_REG	0xb110
++#define MIPI_COMMAND_LENGTH_REG		0xb114
++#define MIPI_READ_DATA_RETURN_REG0	0xb118
++#define MIPI_READ_DATA_RETURN_REG1	0xb11C
++#define MIPI_READ_DATA_RETURN_REG2	0xb120
++#define MIPI_READ_DATA_RETURN_REG3	0xb124
++#define MIPI_READ_DATA_RETURN_REG4	0xb128
++#define MIPI_READ_DATA_RETURN_REG5	0xb12C
++#define MIPI_READ_DATA_RETURN_REG6	0xb130
++#define MIPI_READ_DATA_RETURN_REG7	0xb134
++#define MIPI_READ_DATA_VALID_REG	0xb138
++
++/* DBI COMMANDS */
++#define soft_reset			0x01
++/*
++ *	The display module performs a software reset.
++ *	Registers are written with their SW Reset default values.
++ */
++#define get_power_mode			0x0a
++/*
++ *	The display module returns the current power mode
++ */
++#define get_address_mode		0x0b
++/*
++ *	The display module returns the current status.
++ */
++#define get_pixel_format		0x0c
++/*
++ *	This command gets the pixel format for the RGB image data
++ *	used by the interface.
++ */
++#define get_display_mode		0x0d
++/*
++ *	The display module returns the Display Image Mode status.
++ */
++#define get_signal_mode			0x0e
++/*
++ *	The display module returns the Display Signal Mode.
++ */
++#define get_diagnostic_result		0x0f
++/*
++ *	The display module returns the self-diagnostic results following
++ *	a Sleep Out command.
++ */
++#define enter_sleep_mode		0x10
++/*
++ *	This command causes the display module to enter the Sleep mode.
++ *	In this mode, all unnecessary blocks inside the display module are
++ *	disabled except interface communication. This is the lowest power
++ *	mode the display module supports.
++ */
++#define exit_sleep_mode			0x11
++/*
++ *	This command causes the display module to exit Sleep mode.
++ *	All blocks inside the display module are enabled.
++ */
++#define enter_partial_mode		0x12
++/*
++ *	This command causes the display module to enter the Partial Display
++ *	Mode. The Partial Display Mode window is described by the
++ *	set_partial_area command.
++ */
++#define enter_normal_mode		0x13
++/*
++ *	This command causes the display module to enter the Normal mode.
++ *	Normal Mode is defined as Partial Display mode and Scroll mode are off
++ */
++#define exit_invert_mode		0x20
++/*
++ *	This command causes the display module to stop inverting the image
++ *	data on the display device. The frame memory contents remain unchanged.
++ *	No status bits are changed.
++ */
++#define enter_invert_mode		0x21
++/*
++ *	This command causes the display module to invert the image data only on
++ *	the display device. The frame memory contents remain unchanged.
++ *	No status bits are changed.
++ */
++#define set_gamma_curve			0x26
++/*
++ *	This command selects the desired gamma curve for the display device.
++ *	Four fixed gamma curves are defined in section DCS spec.
++ */
++#define set_display_off			0x28
++/* ************************************************************************* *\
++This command causes the display module to stop displaying the image data
++on the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_display_on			0x29
++/* ************************************************************************* *\
++This command causes the display module to start displaying the image data
++on the display device. The frame memory contents remain unchanged.
++No status bits are changed.
++\* ************************************************************************* */
++#define set_column_address		0x2a
++/*
++ *	This command defines the column extent of the frame memory accessed by
++ *	the hostprocessor with the read_memory_continue and
++ *	write_memory_continue commands.
++ *	No status bits are changed.
++ */
++#define set_page_addr			0x2b
++/*
++ *	This command defines the page extent of the frame memory accessed by
++ *	the host processor with the write_memory_continue and
++ *	read_memory_continue command.
++ *	No status bits are changed.
++ */
++#define write_mem_start			0x2c
++/*
++ *	This command transfers image data from the host processor to the
++ *	display modules frame memory starting at the pixel location specified
++ *	by preceding set_column_address and set_page_address commands.
++ */
++#define set_partial_area		0x30
++/*
++ *	This command defines the Partial Display mode s display area.
++ *	There are two parameters associated with this command, the first
++ *	defines the Start Row (SR) and the second the End Row (ER). SR and ER
++ *	refer to the Frame Memory Line Pointer.
++ */
++#define set_scroll_area			0x33
++/*
++ *	This command defines the display modules Vertical Scrolling Area.
++ */
++#define set_tear_off			0x34
++/*
++ *	This command turns off the display modules Tearing Effect output
++ *	signal on the TE signal line.
++ */
++#define set_tear_on			0x35
++/*
++ *	This command turns on the display modules Tearing Effect output signal
++ *	on the TE signal line.
++ */
++#define set_address_mode		0x36
++/*
++ *	This command sets the data order for transfers from the host processor
++ *	to display modules frame memory,bits B[7:5] and B3, and from the
++ *	display modules frame memory to the display device, bits B[2:0] and B4.
++ */
++#define set_scroll_start		0x37
++/*
++ *	This command sets the start of the vertical scrolling area in the frame
++ *	memory. The vertical scrolling area is fully defined when this command
++ *	is used with the set_scroll_area command The set_scroll_start command
++ *	has one parameter, the Vertical Scroll Pointer. The VSP defines the
++ *	line in the frame memory that is written to the display device as the
++ *	first line of the vertical scroll area.
++ */
++#define exit_idle_mode			0x38
++/*
++ *	This command causes the display module to exit Idle mode.
++ */
++#define enter_idle_mode			0x39
++/*
++ *	This command causes the display module to enter Idle Mode.
++ *	In Idle Mode, color expression is reduced. Colors are shown on the
++ *	display device using the MSB of each of the R, G and B color
++ *	components in the frame memory
++ */
++#define set_pixel_format		0x3a
++/*
++ *	This command sets the pixel format for the RGB image data used by the
++ *	interface.
++ *	Bits D[6:4]  DPI Pixel Format Definition
++ *	Bits D[2:0]  DBI Pixel Format Definition
++ *	Bits D7 and D3 are not used.
++ */
++#define DCS_PIXEL_FORMAT_3bpp		0x1
++#define DCS_PIXEL_FORMAT_8bpp		0x2
++#define DCS_PIXEL_FORMAT_12bpp		0x3
++#define DCS_PIXEL_FORMAT_16bpp		0x5
++#define DCS_PIXEL_FORMAT_18bpp		0x6
++#define DCS_PIXEL_FORMAT_24bpp		0x7
++
++#define write_mem_cont			0x3c
++
++/*
++ *	This command transfers image data from the host processor to the
++ *	display module's frame memory continuing from the pixel location
++ *	following the previous write_memory_continue or write_memory_start
++ *	command.
++ */
++#define set_tear_scanline		0x44
++/*
++ *	This command turns on the display modules Tearing Effect output signal
++ *	on the TE signal line when the display module reaches line N.
++ */
++#define get_scanline			0x45
++/*
++ *	The display module returns the current scanline, N, used to update the
++ *	 display device. The total number of scanlines on a display device is
++ *	defined as VSYNC + VBP + VACT + VFP.The first scanline is defined as
++ *	the first line of V Sync and is denoted as Line 0.
++ *	When in Sleep Mode, the value returned by get_scanline is undefined.
++ */
++
++/* MCS or Generic COMMANDS */
++/* MCS/generic data type */
++#define GEN_SHORT_WRITE_0	0x03  /* generic short write, no parameters */
++#define GEN_SHORT_WRITE_1	0x13  /* generic short write, 1 parameters */
++#define GEN_SHORT_WRITE_2	0x23  /* generic short write, 2 parameters */
++#define GEN_READ_0		0x04  /* generic read, no parameters */
++#define GEN_READ_1		0x14  /* generic read, 1 parameters */
++#define GEN_READ_2		0x24  /* generic read, 2 parameters */
++#define GEN_LONG_WRITE		0x29  /* generic long write */
++#define MCS_SHORT_WRITE_0	0x05  /* MCS short write, no parameters */
++#define MCS_SHORT_WRITE_1	0x15  /* MCS short write, 1 parameters */
++#define MCS_READ		0x06  /* MCS read, no parameters */
++#define MCS_LONG_WRITE		0x39  /* MCS long write */
++/* MCS/generic commands */
++/* TPO MCS */
++#define write_display_profile		0x50
++#define write_display_brightness	0x51
++#define write_ctrl_display		0x53
++#define write_ctrl_cabc			0x55
++  #define UI_IMAGE		0x01
++  #define STILL_IMAGE		0x02
++  #define MOVING_IMAGE		0x03
++#define write_hysteresis		0x57
++#define write_gamma_setting		0x58
++#define write_cabc_min_bright		0x5e
++#define write_kbbc_profile		0x60
++/* TMD MCS */
++#define tmd_write_display_brightness 0x8c
++
++/*
++ *	This command is used to control ambient light, panel backlight
++ *	brightness and gamma settings.
++ */
++#define BRIGHT_CNTL_BLOCK_ON	(1 << 5)
++#define AMBIENT_LIGHT_SENSE_ON	(1 << 4)
++#define DISPLAY_DIMMING_ON	(1 << 3)
++#define BACKLIGHT_ON		(1 << 2)
++#define DISPLAY_BRIGHTNESS_AUTO	(1 << 1)
++#define GAMMA_AUTO		(1 << 0)
++
++/* DCS Interface Pixel Formats */
++#define DCS_PIXEL_FORMAT_3BPP	0x1
++#define DCS_PIXEL_FORMAT_8BPP	0x2
++#define DCS_PIXEL_FORMAT_12BPP	0x3
++#define DCS_PIXEL_FORMAT_16BPP	0x5
++#define DCS_PIXEL_FORMAT_18BPP	0x6
++#define DCS_PIXEL_FORMAT_24BPP	0x7
++/* ONE PARAMETER READ DATA */
++#define addr_mode_data		0xfc
++#define diag_res_data		0x00
++#define disp_mode_data		0x23
++#define pxl_fmt_data		0x77
++#define pwr_mode_data		0x74
++#define sig_mode_data		0x00
++/* TWO PARAMETERS READ DATA */
++#define scanline_data1		0xff
++#define scanline_data2		0xff
++#define NON_BURST_MODE_SYNC_PULSE	0x01	/* Non Burst Mode
++						 * with Sync Pulse
++						 */
++#define NON_BURST_MODE_SYNC_EVENTS	0x02	/* Non Burst Mode
++						 * with Sync events
++						 */
++#define BURST_MODE			0x03	/* Burst Mode */
++#define DBI_COMMAND_BUFFER_SIZE		0x240   /* 0x32 */    /* 0x120 */
++						/* Allocate at least
++						 * 0x100 Byte with 32
++						 * byte alignment
++						 */
++#define DBI_DATA_BUFFER_SIZE		0x120	/* Allocate at least
++						 * 0x100 Byte with 32
++						 * byte alignment
++						 */
++#define DBI_CB_TIME_OUT			0xFFFF
++
++#define GEN_FB_TIME_OUT			2000
++
++#define SKU_83				0x01
++#define SKU_100				0x02
++#define SKU_100L			0x04
++#define SKU_BYPASS			0x08
++
++/* Some handy macros for playing with bitfields. */
++#define PSB_MASK(high, low) (((1<<((high)-(low)+1))-1)<<(low))
++#define SET_FIELD(value, field) (((value) << field ## _SHIFT) & field ## _MASK)
++#define GET_FIELD(word, field) (((word)  & field ## _MASK) >> field ## _SHIFT)
++
++#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
++
++/* PCI config space */
++
++#define SB_PCKT         0x02100 /* cedarview */
++# define SB_OPCODE_MASK                         PSB_MASK(31, 16)
++# define SB_OPCODE_SHIFT                        16
++# define SB_OPCODE_READ                         0
++# define SB_OPCODE_WRITE                        1
++# define SB_DEST_MASK                           PSB_MASK(15, 8)
++# define SB_DEST_SHIFT                          8
++# define SB_DEST_DPLL                           0x88
++# define SB_BYTE_ENABLE_MASK                    PSB_MASK(7, 4)
++# define SB_BYTE_ENABLE_SHIFT                   4
++# define SB_BUSY                                (1 << 0)
++
++#define DSPCLK_GATE_D		0x6200
++# define VRHUNIT_CLOCK_GATE_DISABLE		(1 << 28) /* Fixed value on CDV */
++# define DPOUNIT_CLOCK_GATE_DISABLE		(1 << 11)
++# define DPIOUNIT_CLOCK_GATE_DISABLE		(1 << 6)
++
++#define RAMCLK_GATE_D		0x6210
++
++/* 32-bit value read/written from the DPIO reg. */
++#define SB_DATA		0x02104 /* cedarview */
++/* 32-bit address of the DPIO reg to be read/written. */
++#define SB_ADDR		0x02108 /* cedarview */
++#define DPIO_CFG	0x02110 /* cedarview */
++# define DPIO_MODE_SELECT_1			(1 << 3)
++# define DPIO_MODE_SELECT_0			(1 << 2)
++# define DPIO_SFR_BYPASS			(1 << 1)
++/* reset is active low */
++# define DPIO_CMN_RESET_N			(1 << 0)
++
++/* Cedarview sideband registers */
++#define _SB_M_A			0x8008
++#define _SB_M_B			0x8028
++#define SB_M(pipe) _PIPE(pipe, _SB_M_A, _SB_M_B)
++# define SB_M_DIVIDER_MASK			(0xFF << 24)
++# define SB_M_DIVIDER_SHIFT			24
++
++#define _SB_N_VCO_A		0x8014
++#define _SB_N_VCO_B		0x8034
++#define SB_N_VCO(pipe) _PIPE(pipe, _SB_N_VCO_A, _SB_N_VCO_B)
++#define SB_N_VCO_SEL_MASK			PSB_MASK(31, 30)
++#define SB_N_VCO_SEL_SHIFT			30
++#define SB_N_DIVIDER_MASK			PSB_MASK(29, 26)
++#define SB_N_DIVIDER_SHIFT			26
++#define SB_N_CB_TUNE_MASK			PSB_MASK(25, 24)
++#define SB_N_CB_TUNE_SHIFT			24
++
++#define _SB_REF_A		0x8018
++#define _SB_REF_B		0x8038
++#define SB_REF_SFR(pipe)	_PIPE(pipe, _SB_REF_A, _SB_REF_B)
++
++#define _SB_P_A			0x801c
++#define _SB_P_B			0x803c
++#define SB_P(pipe) _PIPE(pipe, _SB_P_A, _SB_P_B)
++#define SB_P2_DIVIDER_MASK			PSB_MASK(31, 30)
++#define SB_P2_DIVIDER_SHIFT			30
++#define SB_P2_10				0 /* HDMI, DP, DAC */
++#define SB_P2_5				1 /* DAC */
++#define SB_P2_14				2 /* LVDS single */
++#define SB_P2_7				3 /* LVDS double */
++#define SB_P1_DIVIDER_MASK			PSB_MASK(15, 12)
++#define SB_P1_DIVIDER_SHIFT			12
++
++#define PSB_LANE0		0x120
++#define PSB_LANE1		0x220
++#define PSB_LANE2		0x2320
++#define PSB_LANE3		0x2420
++
++#define LANE_PLL_MASK		(0x7 << 20)
++#define LANE_PLL_ENABLE		(0x3 << 20)
++
++
++#endif
+diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+new file mode 100644
+index 0000000..36330ca
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+@@ -0,0 +1,2607 @@
++/*
++ * Copyright 2006 Dave Airlie <airlied at linux.ie>
++ * Copyright © 2006-2007 Intel Corporation
++ *   Jesse Barnes <jesse.barnes at intel.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ */
++#include <linux/module.h>
++#include <linux/i2c.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include "drmP.h"
++#include "drm.h"
++#include "drm_crtc.h"
++#include "drm_edid.h"
++#include "psb_intel_drv.h"
++#include "gma_drm.h"
++#include "psb_drv.h"
++#include "psb_intel_sdvo_regs.h"
++#include "psb_intel_reg.h"
++
++#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
++#define SDVO_RGB_MASK  (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
++#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
++#define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
++
++#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
++                         SDVO_TV_MASK)
++
++#define IS_TV(c)	(c->output_flag & SDVO_TV_MASK)
++#define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
++#define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
++#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
++
++
++static const char *tv_format_names[] = {
++	"NTSC_M"   , "NTSC_J"  , "NTSC_443",
++	"PAL_B"    , "PAL_D"   , "PAL_G"   ,
++	"PAL_H"    , "PAL_I"   , "PAL_M"   ,
++	"PAL_N"    , "PAL_NC"  , "PAL_60"  ,
++	"SECAM_B"  , "SECAM_D" , "SECAM_G" ,
++	"SECAM_K"  , "SECAM_K1", "SECAM_L" ,
++	"SECAM_60"
++};
++
++#define TV_FORMAT_NUM  (sizeof(tv_format_names) / sizeof(*tv_format_names))
++
++struct psb_intel_sdvo {
++	struct psb_intel_encoder base;
++
++	struct i2c_adapter *i2c;
++	u8 slave_addr;
++
++	struct i2c_adapter ddc;
++
++	/* Register for the SDVO device: SDVOB or SDVOC */
++	int sdvo_reg;
++
++	/* Active outputs controlled by this SDVO output */
++	uint16_t controlled_output;
++
++	/*
++	 * Capabilities of the SDVO device returned by
++	 * i830_sdvo_get_capabilities()
++	 */
++	struct psb_intel_sdvo_caps caps;
++
++	/* Pixel clock limitations reported by the SDVO device, in kHz */
++	int pixel_clock_min, pixel_clock_max;
++
++	/*
++	* For multiple function SDVO device,
++	* this is for current attached outputs.
++	*/
++	uint16_t attached_output;
++
++	/**
++	 * This is used to select the color range of RBG outputs in HDMI mode.
++	 * It is only valid when using TMDS encoding and 8 bit per color mode.
++	 */
++	uint32_t color_range;
++
++	/**
++	 * This is set if we're going to treat the device as TV-out.
++	 *
++	 * While we have these nice friendly flags for output types that ought
++	 * to decide this for us, the S-Video output on our HDMI+S-Video card
++	 * shows up as RGB1 (VGA).
++	 */
++	bool is_tv;
++
++	/* This is for current tv format name */
++	int tv_format_index;
++
++	/**
++	 * This is set if we treat the device as HDMI, instead of DVI.
++	 */
++	bool is_hdmi;
++	bool has_hdmi_monitor;
++	bool has_hdmi_audio;
++
++	/**
++	 * This is set if we detect output of sdvo device as LVDS and
++	 * have a valid fixed mode to use with the panel.
++	 */
++	bool is_lvds;
++
++	/**
++	 * This is sdvo fixed pannel mode pointer
++	 */
++	struct drm_display_mode *sdvo_lvds_fixed_mode;
++
++	/* DDC bus used by this SDVO encoder */
++	uint8_t ddc_bus;
++
++	/* Input timings for adjusted_mode */
++	struct psb_intel_sdvo_dtd input_dtd;
++};
++
++struct psb_intel_sdvo_connector {
++	struct psb_intel_connector base;
++
++	/* Mark the type of connector */
++	uint16_t output_flag;
++
++	int force_audio;
++
++	/* This contains all current supported TV format */
++	u8 tv_format_supported[TV_FORMAT_NUM];
++	int   format_supported_num;
++	struct drm_property *tv_format;
++
++	/* add the property for the SDVO-TV */
++	struct drm_property *left;
++	struct drm_property *right;
++	struct drm_property *top;
++	struct drm_property *bottom;
++	struct drm_property *hpos;
++	struct drm_property *vpos;
++	struct drm_property *contrast;
++	struct drm_property *saturation;
++	struct drm_property *hue;
++	struct drm_property *sharpness;
++	struct drm_property *flicker_filter;
++	struct drm_property *flicker_filter_adaptive;
++	struct drm_property *flicker_filter_2d;
++	struct drm_property *tv_chroma_filter;
++	struct drm_property *tv_luma_filter;
++	struct drm_property *dot_crawl;
++
++	/* add the property for the SDVO-TV/LVDS */
++	struct drm_property *brightness;
++
++	/* Add variable to record current setting for the above property */
++	u32	left_margin, right_margin, top_margin, bottom_margin;
++
++	/* this is to get the range of margin.*/
++	u32	max_hscan,  max_vscan;
++	u32	max_hpos, cur_hpos;
++	u32	max_vpos, cur_vpos;
++	u32	cur_brightness, max_brightness;
++	u32	cur_contrast,	max_contrast;
++	u32	cur_saturation, max_saturation;
++	u32	cur_hue,	max_hue;
++	u32	cur_sharpness,	max_sharpness;
++	u32	cur_flicker_filter,		max_flicker_filter;
++	u32	cur_flicker_filter_adaptive,	max_flicker_filter_adaptive;
++	u32	cur_flicker_filter_2d,		max_flicker_filter_2d;
++	u32	cur_tv_chroma_filter,	max_tv_chroma_filter;
++	u32	cur_tv_luma_filter,	max_tv_luma_filter;
++	u32	cur_dot_crawl,	max_dot_crawl;
++};
++
++static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
++{
++	return container_of(encoder, struct psb_intel_sdvo, base.base);
++}
++
++static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
++{
++	return container_of(psb_intel_attached_encoder(connector),
++			    struct psb_intel_sdvo, base);
++}
++
++static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
++{
++	return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
++}
++
++static bool
++psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
++static bool
++psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
++			      struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
++			      int type);
++static bool
++psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
++				   struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
++
++/**
++ * Writes the SDVOB or SDVOC with the given value, but always writes both
++ * SDVOB and SDVOC to work around apparent hardware issues (according to
++ * comments in the BIOS).
++ */
++static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
++{
++	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
++	u32 bval = val, cval = val;
++	int i;
++
++	if (psb_intel_sdvo->sdvo_reg == SDVOB) {
++		cval = REG_READ(SDVOC);
++	} else {
++		bval = REG_READ(SDVOB);
++	}
++	/*
++	 * Write the registers twice for luck. Sometimes,
++	 * writing them only once doesn't appear to 'stick'.
++	 * The BIOS does this too. Yay, magic
++	 */
++	for (i = 0; i < 2; i++)
++	{
++		REG_WRITE(SDVOB, bval);
++		REG_READ(SDVOB);
++		REG_WRITE(SDVOC, cval);
++		REG_READ(SDVOC);
++	}
++}
++
++static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
++{
++	struct i2c_msg msgs[] = {
++		{
++			.addr = psb_intel_sdvo->slave_addr,
++			.flags = 0,
++			.len = 1,
++			.buf = &addr,
++		},
++		{
++			.addr = psb_intel_sdvo->slave_addr,
++			.flags = I2C_M_RD,
++			.len = 1,
++			.buf = ch,
++		}
++	};
++	int ret;
++
++	if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
++		return true;
++
++	DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
++	return false;
++}
++
++#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
++/** Mapping of command numbers to names, for debug output */
++static const struct _sdvo_cmd_name {
++	u8 cmd;
++	const char *name;
++} sdvo_cmd_names[] = {
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
++
++    /* Add the op code for SDVO enhancements */
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
++
++    /* HDMI op code */
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
++    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
++};
++
++#define IS_SDVOB(reg)	(reg == SDVOB)
++#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
++
++static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
++				   const void *args, int args_len)
++{
++	int i;
++
++	DRM_DEBUG_KMS("%s: W: %02X ",
++				SDVO_NAME(psb_intel_sdvo), cmd);
++	for (i = 0; i < args_len; i++)
++		DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
++	for (; i < 8; i++)
++		DRM_LOG_KMS("   ");
++	for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
++		if (cmd == sdvo_cmd_names[i].cmd) {
++			DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
++			break;
++		}
++	}
++	if (i == ARRAY_SIZE(sdvo_cmd_names))
++		DRM_LOG_KMS("(%02X)", cmd);
++	DRM_LOG_KMS("\n");
++}
++
++static const char *cmd_status_names[] = {
++	"Power on",
++	"Success",
++	"Not supported",
++	"Invalid arg",
++	"Pending",
++	"Target not specified",
++	"Scaling not supported"
++};
++
++static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
++				 const void *args, int args_len)
++{
++	u8 buf[args_len*2 + 2], status;
++	struct i2c_msg msgs[args_len + 3];
++	int i, ret;
++
++	psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
++
++	for (i = 0; i < args_len; i++) {
++		msgs[i].addr = psb_intel_sdvo->slave_addr;
++		msgs[i].flags = 0;
++		msgs[i].len = 2;
++		msgs[i].buf = buf + 2 *i;
++		buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
++		buf[2*i + 1] = ((u8*)args)[i];
++	}
++	msgs[i].addr = psb_intel_sdvo->slave_addr;
++	msgs[i].flags = 0;
++	msgs[i].len = 2;
++	msgs[i].buf = buf + 2*i;
++	buf[2*i + 0] = SDVO_I2C_OPCODE;
++	buf[2*i + 1] = cmd;
++
++	/* the following two are to read the response */
++	status = SDVO_I2C_CMD_STATUS;
++	msgs[i+1].addr = psb_intel_sdvo->slave_addr;
++	msgs[i+1].flags = 0;
++	msgs[i+1].len = 1;
++	msgs[i+1].buf = &status;
++
++	msgs[i+2].addr = psb_intel_sdvo->slave_addr;
++	msgs[i+2].flags = I2C_M_RD;
++	msgs[i+2].len = 1;
++	msgs[i+2].buf = &status;
++
++	ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
++	if (ret < 0) {
++		DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
++		return false;
++	}
++	if (ret != i+3) {
++		/* failure in I2C transfer */
++		DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
++		return false;
++	}
++
++	return true;
++}
++
++static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
++				     void *response, int response_len)
++{
++	u8 retry = 5;
++	u8 status;
++	int i;
++
++	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
++
++	/*
++	 * The documentation states that all commands will be
++	 * processed within 15µs, and that we need only poll
++	 * the status byte a maximum of 3 times in order for the
++	 * command to be complete.
++	 *
++	 * Check 5 times in case the hardware failed to read the docs.
++	 */
++	if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
++				  SDVO_I2C_CMD_STATUS,
++				  &status))
++		goto log_fail;
++
++	while (status == SDVO_CMD_STATUS_PENDING && retry--) {
++		udelay(15);
++		if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
++					  SDVO_I2C_CMD_STATUS,
++					  &status))
++			goto log_fail;
++	}
++
++	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
++		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
++	else
++		DRM_LOG_KMS("(??? %d)", status);
++
++	if (status != SDVO_CMD_STATUS_SUCCESS)
++		goto log_fail;
++
++	/* Read the command response */
++	for (i = 0; i < response_len; i++) {
++		if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
++					  SDVO_I2C_RETURN_0 + i,
++					  &((u8 *)response)[i]))
++			goto log_fail;
++		DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
++	}
++	DRM_LOG_KMS("\n");
++	return true;
++
++log_fail:
++	DRM_LOG_KMS("... failed\n");
++	return false;
++}
++
++static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
++{
++	if (mode->clock >= 100000)
++		return 1;
++	else if (mode->clock >= 50000)
++		return 2;
++	else
++		return 4;
++}
++
++static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
++					      u8 ddc_bus)
++{
++	/* This must be the immediately preceding write before the i2c xfer */
++	return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
++				    SDVO_CMD_SET_CONTROL_BUS_SWITCH,
++				    &ddc_bus, 1);
++}
++
++static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
++{
++	if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
++		return false;
++
++	return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
++}
++
++static bool
++psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
++{
++	if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
++		return false;
++
++	return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
++}
++
++static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
++{
++	struct psb_intel_sdvo_set_target_input_args targets = {0};
++	return psb_intel_sdvo_set_value(psb_intel_sdvo,
++				    SDVO_CMD_SET_TARGET_INPUT,
++				    &targets, sizeof(targets));
++}
++
++/**
++ * Return whether each input is trained.
++ *
++ * This function is making an assumption about the layout of the response,
++ * which should be checked against the docs.
++ */
++static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
++{
++	struct psb_intel_sdvo_get_trained_inputs_response response;
++
++	BUILD_BUG_ON(sizeof(response) != 1);
++	if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
++				  &response, sizeof(response)))
++		return false;
++
++	*input_1 = response.input0_trained;
++	*input_2 = response.input1_trained;
++	return true;
++}
++
++static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
++					  u16 outputs)
++{
++	return psb_intel_sdvo_set_value(psb_intel_sdvo,
++				    SDVO_CMD_SET_ACTIVE_OUTPUTS,
++				    &outputs, sizeof(outputs));
++}
++
++static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
++					       int mode)
++{
++	u8 state = SDVO_ENCODER_STATE_ON;
++
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++		state = SDVO_ENCODER_STATE_ON;
++		break;
++	case DRM_MODE_DPMS_STANDBY:
++		state = SDVO_ENCODER_STATE_STANDBY;
++		break;
++	case DRM_MODE_DPMS_SUSPEND:
++		state = SDVO_ENCODER_STATE_SUSPEND;
++		break;
++	case DRM_MODE_DPMS_OFF:
++		state = SDVO_ENCODER_STATE_OFF;
++		break;
++	}
++
++	return psb_intel_sdvo_set_value(psb_intel_sdvo,
++				    SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
++}
++
++static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
++						   int *clock_min,
++						   int *clock_max)
++{
++	struct psb_intel_sdvo_pixel_clock_range clocks;
++
++	BUILD_BUG_ON(sizeof(clocks) != 4);
++	if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
++				  SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
++				  &clocks, sizeof(clocks)))
++		return false;
++
++	/* Convert the values from units of 10 kHz to kHz. */
++	*clock_min = clocks.min * 10;
++	*clock_max = clocks.max * 10;
++	return true;
++}
++
++static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
++					 u16 outputs)
++{
++	return psb_intel_sdvo_set_value(psb_intel_sdvo,
++				    SDVO_CMD_SET_TARGET_OUTPUT,
++				    &outputs, sizeof(outputs));
++}
++
++static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
++				  struct psb_intel_sdvo_dtd *dtd)
++{
++	return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
++		psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
++}
++
++static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
++					 struct psb_intel_sdvo_dtd *dtd)
++{
++	return psb_intel_sdvo_set_timing(psb_intel_sdvo,
++				     SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
++}
++
++static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
++					 struct psb_intel_sdvo_dtd *dtd)
++{
++	return psb_intel_sdvo_set_timing(psb_intel_sdvo,
++				     SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
++}
++
++static bool
++psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
++					 uint16_t clock,
++					 uint16_t width,
++					 uint16_t height)
++{
++	struct psb_intel_sdvo_preferred_input_timing_args args;
++
++	memset(&args, 0, sizeof(args));
++	args.clock = clock;
++	args.width = width;
++	args.height = height;
++	args.interlace = 0;
++
++	if (psb_intel_sdvo->is_lvds &&
++	   (psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
++	    psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
++		args.scaled = 1;
++
++	return psb_intel_sdvo_set_value(psb_intel_sdvo,
++				    SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
++				    &args, sizeof(args));
++}
++
++static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
++						  struct psb_intel_sdvo_dtd *dtd)
++{
++	BUILD_BUG_ON(sizeof(dtd->part1) != 8);
++	BUILD_BUG_ON(sizeof(dtd->part2) != 8);
++	return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
++				    &dtd->part1, sizeof(dtd->part1)) &&
++		psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
++				     &dtd->part2, sizeof(dtd->part2));
++}
++
++static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
++{
++	return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
++}
++
++static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
++					 const struct drm_display_mode *mode)
++{
++	uint16_t width, height;
++	uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
++	uint16_t h_sync_offset, v_sync_offset;
++
++	width = mode->crtc_hdisplay;
++	height = mode->crtc_vdisplay;
++
++	/* do some mode translations */
++	h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
++	h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
++
++	v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
++	v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
++
++	h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
++	v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
++
++	dtd->part1.clock = mode->clock / 10;
++	dtd->part1.h_active = width & 0xff;
++	dtd->part1.h_blank = h_blank_len & 0xff;
++	dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
++		((h_blank_len >> 8) & 0xf);
++	dtd->part1.v_active = height & 0xff;
++	dtd->part1.v_blank = v_blank_len & 0xff;
++	dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
++		((v_blank_len >> 8) & 0xf);
++
++	dtd->part2.h_sync_off = h_sync_offset & 0xff;
++	dtd->part2.h_sync_width = h_sync_len & 0xff;
++	dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
++		(v_sync_len & 0xf);
++	dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
++		((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
++		((v_sync_len & 0x30) >> 4);
++
++	dtd->part2.dtd_flags = 0x18;
++	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
++		dtd->part2.dtd_flags |= 0x2;
++	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
++		dtd->part2.dtd_flags |= 0x4;
++
++	dtd->part2.sdvo_flags = 0;
++	dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
++	dtd->part2.reserved = 0;
++}
++
++static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
++					 const struct psb_intel_sdvo_dtd *dtd)
++{
++	mode->hdisplay = dtd->part1.h_active;
++	mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
++	mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
++	mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
++	mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
++	mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
++	mode->htotal = mode->hdisplay + dtd->part1.h_blank;
++	mode->htotal += (dtd->part1.h_high & 0xf) << 8;
++
++	mode->vdisplay = dtd->part1.v_active;
++	mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
++	mode->vsync_start = mode->vdisplay;
++	mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
++	mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
++	mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
++	mode->vsync_end = mode->vsync_start +
++		(dtd->part2.v_sync_off_width & 0xf);
++	mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
++	mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
++	mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
++
++	mode->clock = dtd->part1.clock * 10;
++
++	mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
++	if (dtd->part2.dtd_flags & 0x2)
++		mode->flags |= DRM_MODE_FLAG_PHSYNC;
++	if (dtd->part2.dtd_flags & 0x4)
++		mode->flags |= DRM_MODE_FLAG_PVSYNC;
++}
++
++static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
++{
++	struct psb_intel_sdvo_encode encode;
++
++	BUILD_BUG_ON(sizeof(encode) != 2);
++	return psb_intel_sdvo_get_value(psb_intel_sdvo,
++				  SDVO_CMD_GET_SUPP_ENCODE,
++				  &encode, sizeof(encode));
++}
++
++static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
++				  uint8_t mode)
++{
++	return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
++}
++
++static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
++				       uint8_t mode)
++{
++	return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
++}
++
++#if 0
++static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
++{
++	int i, j;
++	uint8_t set_buf_index[2];
++	uint8_t av_split;
++	uint8_t buf_size;
++	uint8_t buf[48];
++	uint8_t *pos;
++
++	psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
++
++	for (i = 0; i <= av_split; i++) {
++		set_buf_index[0] = i; set_buf_index[1] = 0;
++		psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
++				     set_buf_index, 2);
++		psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
++		psb_intel_sdvo_read_response(encoder, &buf_size, 1);
++
++		pos = buf;
++		for (j = 0; j <= buf_size; j += 8) {
++			psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
++					     NULL, 0);
++			psb_intel_sdvo_read_response(encoder, pos, 8);
++			pos += 8;
++		}
++	}
++}
++#endif
++
++static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
++{
++	DRM_INFO("HDMI is not supported yet");
++
++	return false;
++#if 0
++	struct dip_infoframe avi_if = {
++		.type = DIP_TYPE_AVI,
++		.ver = DIP_VERSION_AVI,
++		.len = DIP_LEN_AVI,
++	};
++	uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
++	uint8_t set_buf_index[2] = { 1, 0 };
++	uint64_t *data = (uint64_t *)&avi_if;
++	unsigned i;
++
++	intel_dip_infoframe_csum(&avi_if);
++
++	if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
++				  SDVO_CMD_SET_HBUF_INDEX,
++				  set_buf_index, 2))
++		return false;
++
++	for (i = 0; i < sizeof(avi_if); i += 8) {
++		if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
++					  SDVO_CMD_SET_HBUF_DATA,
++					  data, 8))
++			return false;
++		data++;
++	}
++
++	return psb_intel_sdvo_set_value(psb_intel_sdvo,
++				    SDVO_CMD_SET_HBUF_TXRATE,
++				    &tx_rate, 1);
++#endif
++}
++
++static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
++{
++	struct psb_intel_sdvo_tv_format format;
++	uint32_t format_map;
++
++	format_map = 1 << psb_intel_sdvo->tv_format_index;
++	memset(&format, 0, sizeof(format));
++	memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
++
++	BUILD_BUG_ON(sizeof(format) != 6);
++	return psb_intel_sdvo_set_value(psb_intel_sdvo,
++				    SDVO_CMD_SET_TV_FORMAT,
++				    &format, sizeof(format));
++}
++
++static bool
++psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
++					struct drm_display_mode *mode)
++{
++	struct psb_intel_sdvo_dtd output_dtd;
++
++	if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
++					  psb_intel_sdvo->attached_output))
++		return false;
++
++	psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
++	if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
++		return false;
++
++	return true;
++}
++
++static bool
++psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
++					struct drm_display_mode *mode,
++					struct drm_display_mode *adjusted_mode)
++{
++	/* Reset the input timing to the screen. Assume always input 0. */
++	if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
++		return false;
++
++	if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
++						      mode->clock / 10,
++						      mode->hdisplay,
++						      mode->vdisplay))
++		return false;
++
++	if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
++						   &psb_intel_sdvo->input_dtd))
++		return false;
++
++	psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
++
++	drm_mode_set_crtcinfo(adjusted_mode, 0);
++	return true;
++}
++
++static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
++				  struct drm_display_mode *mode,
++				  struct drm_display_mode *adjusted_mode)
++{
++	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
++	int multiplier;
++
++	/* We need to construct preferred input timings based on our
++	 * output timings.  To do that, we have to set the output
++	 * timings, even though this isn't really the right place in
++	 * the sequence to do it. Oh well.
++	 */
++	if (psb_intel_sdvo->is_tv) {
++		if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
++			return false;
++
++		(void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
++							     mode,
++							     adjusted_mode);
++	} else if (psb_intel_sdvo->is_lvds) {
++		if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
++							     psb_intel_sdvo->sdvo_lvds_fixed_mode))
++			return false;
++
++		(void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
++							     mode,
++							     adjusted_mode);
++	}
++
++	/* Make the CRTC code factor in the SDVO pixel multiplier.  The
++	 * SDVO device will factor out the multiplier during mode_set.
++	 */
++	multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
++	psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
++
++	return true;
++}
++
++static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_crtc *crtc = encoder->crtc;
++	struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
++	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
++	u32 sdvox;
++	struct psb_intel_sdvo_in_out_map in_out;
++	struct psb_intel_sdvo_dtd input_dtd;
++	int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
++	int rate;
++
++	if (!mode)
++		return;
++
++	/* First, set the input mapping for the first input to our controlled
++	 * output. This is only correct if we're a single-input device, in
++	 * which case the first input is the output from the appropriate SDVO
++	 * channel on the motherboard.  In a two-input device, the first input
++	 * will be SDVOB and the second SDVOC.
++	 */
++	in_out.in0 = psb_intel_sdvo->attached_output;
++	in_out.in1 = 0;
++
++	psb_intel_sdvo_set_value(psb_intel_sdvo,
++			     SDVO_CMD_SET_IN_OUT_MAP,
++			     &in_out, sizeof(in_out));
++
++	/* Set the output timings to the screen */
++	if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
++					  psb_intel_sdvo->attached_output))
++		return;
++
++	/* We have tried to get input timing in mode_fixup, and filled into
++	 * adjusted_mode.
++	 */
++	if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
++		input_dtd = psb_intel_sdvo->input_dtd;
++	} else {
++		/* Set the output timing to the screen */
++		if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
++						  psb_intel_sdvo->attached_output))
++			return;
++
++		psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
++		(void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
++	}
++
++	/* Set the input timing to the screen. Assume always input 0. */
++	if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
++		return;
++
++	if (psb_intel_sdvo->has_hdmi_monitor) {
++		psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
++		psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
++					   SDVO_COLORIMETRY_RGB256);
++		psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
++	} else
++		psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
++
++	if (psb_intel_sdvo->is_tv &&
++	    !psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
++		return;
++
++	(void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
++
++	switch (pixel_multiplier) {
++	default:
++	case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
++	case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
++	case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
++	}
++	if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
++		return;
++
++	/* Set the SDVO control regs. */
++	sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
++	switch (psb_intel_sdvo->sdvo_reg) {
++	case SDVOB:
++		sdvox &= SDVOB_PRESERVE_MASK;
++		break;
++	case SDVOC:
++		sdvox &= SDVOC_PRESERVE_MASK;
++		break;
++	}
++	sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
++
++	if (psb_intel_crtc->pipe == 1)
++		sdvox |= SDVO_PIPE_B_SELECT;
++	if (psb_intel_sdvo->has_hdmi_audio)
++		sdvox |= SDVO_AUDIO_ENABLE;
++
++	/* FIXME: Check if this is needed for PSB
++	sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
++	*/
++
++	if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
++		sdvox |= SDVO_STALL_SELECT;
++	psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
++}
++
++static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
++	u32 temp;
++
++	switch (mode) {
++	case DRM_MODE_DPMS_ON:
++		DRM_DEBUG("DPMS_ON");
++		break;
++	case DRM_MODE_DPMS_OFF:
++		DRM_DEBUG("DPMS_OFF");
++		break;
++	default:
++		DRM_DEBUG("DPMS: %d", mode);
++	}
++
++	if (mode != DRM_MODE_DPMS_ON) {
++		psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
++		if (0)
++			psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
++
++		if (mode == DRM_MODE_DPMS_OFF) {
++			temp = REG_READ(psb_intel_sdvo->sdvo_reg);
++			if ((temp & SDVO_ENABLE) != 0) {
++				psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
++			}
++		}
++	} else {
++		bool input1, input2;
++		int i;
++		u8 status;
++
++		temp = REG_READ(psb_intel_sdvo->sdvo_reg);
++		if ((temp & SDVO_ENABLE) == 0)
++			psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
++		for (i = 0; i < 2; i++)
++			psb_intel_wait_for_vblank(dev);
++
++		status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
++		/* Warn if the device reported failure to sync.
++		 * A lot of SDVO devices fail to notify of sync, but it's
++		 * a given it the status is a success, we succeeded.
++		 */
++		if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
++			DRM_DEBUG_KMS("First %s output reported failure to "
++					"sync\n", SDVO_NAME(psb_intel_sdvo));
++		}
++
++		if (0)
++			psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
++		psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
++	}
++	return;
++}
++
++static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
++				 struct drm_display_mode *mode)
++{
++	struct drm_psb_private *dev_priv = connector->dev->dev_private;
++	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
++
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		return MODE_NO_DBLESCAN;
++
++	if (psb_intel_sdvo->pixel_clock_min > mode->clock)
++		return MODE_CLOCK_LOW;
++
++	if (psb_intel_sdvo->pixel_clock_max < mode->clock)
++		return MODE_CLOCK_HIGH;
++
++	if (psb_intel_sdvo->is_lvds) {
++		if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
++			return MODE_PANEL;
++
++		if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
++			return MODE_PANEL;
++	}
++
++	/* We assume worst case scenario of 32 bpp here, since we don't know */
++	if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
++	    dev_priv->vram_stolen_size)
++		return MODE_MEM;
++
++	return MODE_OK;
++}
++
++static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
++{
++	BUILD_BUG_ON(sizeof(*caps) != 8);
++	if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
++				  SDVO_CMD_GET_DEVICE_CAPS,
++				  caps, sizeof(*caps)))
++		return false;
++
++	DRM_DEBUG_KMS("SDVO capabilities:\n"
++		      "  vendor_id: %d\n"
++		      "  device_id: %d\n"
++		      "  device_rev_id: %d\n"
++		      "  sdvo_version_major: %d\n"
++		      "  sdvo_version_minor: %d\n"
++		      "  sdvo_inputs_mask: %d\n"
++		      "  smooth_scaling: %d\n"
++		      "  sharp_scaling: %d\n"
++		      "  up_scaling: %d\n"
++		      "  down_scaling: %d\n"
++		      "  stall_support: %d\n"
++		      "  output_flags: %d\n",
++		      caps->vendor_id,
++		      caps->device_id,
++		      caps->device_rev_id,
++		      caps->sdvo_version_major,
++		      caps->sdvo_version_minor,
++		      caps->sdvo_inputs_mask,
++		      caps->smooth_scaling,
++		      caps->sharp_scaling,
++		      caps->up_scaling,
++		      caps->down_scaling,
++		      caps->stall_support,
++		      caps->output_flags);
++
++	return true;
++}
++
++/* No use! */
++#if 0
++struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
++{
++	struct drm_connector *connector = NULL;
++	struct psb_intel_sdvo *iout = NULL;
++	struct psb_intel_sdvo *sdvo;
++
++	/* find the sdvo connector */
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		iout = to_psb_intel_sdvo(connector);
++
++		if (iout->type != INTEL_OUTPUT_SDVO)
++			continue;
++
++		sdvo = iout->dev_priv;
++
++		if (sdvo->sdvo_reg == SDVOB && sdvoB)
++			return connector;
++
++		if (sdvo->sdvo_reg == SDVOC && !sdvoB)
++			return connector;
++
++	}
++
++	return NULL;
++}
++
++int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
++{
++	u8 response[2];
++	u8 status;
++	struct psb_intel_sdvo *psb_intel_sdvo;
++	DRM_DEBUG_KMS("\n");
++
++	if (!connector)
++		return 0;
++
++	psb_intel_sdvo = to_psb_intel_sdvo(connector);
++
++	return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
++				    &response, 2) && response[0];
++}
++
++void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
++{
++	u8 response[2];
++	u8 status;
++	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
++
++	psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
++	psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
++
++	if (on) {
++		psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
++		status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
++
++		psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
++	} else {
++		response[0] = 0;
++		response[1] = 0;
++		psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
++	}
++
++	psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
++	psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
++}
++#endif
++
++static bool
++psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
++{
++	/* Is there more than one type of output? */
++	int caps = psb_intel_sdvo->caps.output_flags & 0xf;
++	return caps & -caps;
++}
++
++static struct edid *
++psb_intel_sdvo_get_edid(struct drm_connector *connector)
++{
++	struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
++	return drm_get_edid(connector, &sdvo->ddc);
++}
++
++/* Mac mini hack -- use the same DDC as the analog connector */
++static struct edid *
++psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
++{
++	struct drm_psb_private *dev_priv = connector->dev->dev_private;
++
++	return drm_get_edid(connector,
++			    &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
++	return NULL;
++}
++
++static enum drm_connector_status
++psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
++{
++	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
++	enum drm_connector_status status;
++	struct edid *edid;
++
++	edid = psb_intel_sdvo_get_edid(connector);
++
++	if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
++		u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
++
++		/*
++		 * Don't use the 1 as the argument of DDC bus switch to get
++		 * the EDID. It is used for SDVO SPD ROM.
++		 */
++		for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
++			psb_intel_sdvo->ddc_bus = ddc;
++			edid = psb_intel_sdvo_get_edid(connector);
++			if (edid)
++				break;
++		}
++		/*
++		 * If we found the EDID on the other bus,
++		 * assume that is the correct DDC bus.
++		 */
++		if (edid == NULL)
++			psb_intel_sdvo->ddc_bus = saved_ddc;
++	}
++
++	/*
++	 * When there is no edid and no monitor is connected with VGA
++	 * port, try to use the CRT ddc to read the EDID for DVI-connector.
++	 */
++	if (edid == NULL)
++		edid = psb_intel_sdvo_get_analog_edid(connector);
++
++	status = connector_status_unknown;
++	if (edid != NULL) {
++		/* DDC bus is shared, match EDID to connector type */
++		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
++			status = connector_status_connected;
++			if (psb_intel_sdvo->is_hdmi) {
++				psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
++				psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
++			}
++		} else
++			status = connector_status_disconnected;
++		connector->display_info.raw_edid = NULL;
++		kfree(edid);
++	}
++
++	if (status == connector_status_connected) {
++		struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
++		if (psb_intel_sdvo_connector->force_audio)
++			psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
++	}
++
++	return status;
++}
++
++static enum drm_connector_status
++psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
++{
++	uint16_t response;
++	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
++	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
++	enum drm_connector_status ret;
++
++	if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
++				  SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
++		return connector_status_unknown;
++
++	/* add 30ms delay when the output type might be TV */
++	if (psb_intel_sdvo->caps.output_flags &
++	    (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
++		mdelay(30);
++
++	if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
++		return connector_status_unknown;
++
++	DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
++		      response & 0xff, response >> 8,
++		      psb_intel_sdvo_connector->output_flag);
++
++	if (response == 0)
++		return connector_status_disconnected;
++
++	psb_intel_sdvo->attached_output = response;
++
++	psb_intel_sdvo->has_hdmi_monitor = false;
++	psb_intel_sdvo->has_hdmi_audio = false;
++
++	if ((psb_intel_sdvo_connector->output_flag & response) == 0)
++		ret = connector_status_disconnected;
++	else if (IS_TMDS(psb_intel_sdvo_connector))
++		ret = psb_intel_sdvo_hdmi_sink_detect(connector);
++	else {
++		struct edid *edid;
++
++		/* if we have an edid check it matches the connection */
++		edid = psb_intel_sdvo_get_edid(connector);
++		if (edid == NULL)
++			edid = psb_intel_sdvo_get_analog_edid(connector);
++		if (edid != NULL) {
++			if (edid->input & DRM_EDID_INPUT_DIGITAL)
++				ret = connector_status_disconnected;
++			else
++				ret = connector_status_connected;
++			connector->display_info.raw_edid = NULL;
++			kfree(edid);
++		} else
++			ret = connector_status_connected;
++	}
++
++	/* May update encoder flag for like clock for SDVO TV, etc.*/
++	if (ret == connector_status_connected) {
++		psb_intel_sdvo->is_tv = false;
++		psb_intel_sdvo->is_lvds = false;
++		psb_intel_sdvo->base.needs_tv_clock = false;
++
++		if (response & SDVO_TV_MASK) {
++			psb_intel_sdvo->is_tv = true;
++			psb_intel_sdvo->base.needs_tv_clock = true;
++		}
++		if (response & SDVO_LVDS_MASK)
++			psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
++	}
++
++	return ret;
++}
++
++static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
++{
++	struct edid *edid;
++
++	/* set the bus switch and get the modes */
++	edid = psb_intel_sdvo_get_edid(connector);
++
++	/*
++	 * Mac mini hack.  On this device, the DVI-I connector shares one DDC
++	 * link between analog and digital outputs. So, if the regular SDVO
++	 * DDC fails, check to see if the analog output is disconnected, in
++	 * which case we'll look there for the digital DDC data.
++	 */
++	if (edid == NULL)
++		edid = psb_intel_sdvo_get_analog_edid(connector);
++
++	if (edid != NULL) {
++		struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
++		bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
++		bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
++
++		if (connector_is_digital == monitor_is_digital) {
++			drm_mode_connector_update_edid_property(connector, edid);
++			drm_add_edid_modes(connector, edid);
++		}
++
++		connector->display_info.raw_edid = NULL;
++		kfree(edid);
++	}
++}
++
++/*
++ * Set of SDVO TV modes.
++ * Note!  This is in reply order (see loop in get_tv_modes).
++ * XXX: all 60Hz refresh?
++ */
++static const struct drm_display_mode sdvo_tv_modes[] = {
++	{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
++		   416, 0, 200, 201, 232, 233, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
++		   416, 0, 240, 241, 272, 273, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
++		   496, 0, 300, 301, 332, 333, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
++		   736, 0, 350, 351, 382, 383, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
++		   736, 0, 400, 401, 432, 433, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
++		   736, 0, 480, 481, 512, 513, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
++		   800, 0, 480, 481, 512, 513, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
++		   800, 0, 576, 577, 608, 609, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
++		   816, 0, 350, 351, 382, 383, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
++		   816, 0, 400, 401, 432, 433, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
++		   816, 0, 480, 481, 512, 513, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
++		   816, 0, 540, 541, 572, 573, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
++		   816, 0, 576, 577, 608, 609, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
++		   864, 0, 576, 577, 608, 609, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
++		   896, 0, 600, 601, 632, 633, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
++		   928, 0, 624, 625, 656, 657, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
++		   1016, 0, 766, 767, 798, 799, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
++		   1120, 0, 768, 769, 800, 801, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
++		   1376, 0, 1024, 1025, 1056, 1057, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++};
++
++static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
++{
++	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
++	struct psb_intel_sdvo_sdtv_resolution_request tv_res;
++	uint32_t reply = 0, format_map = 0;
++	int i;
++
++	/* Read the list of supported input resolutions for the selected TV
++	 * format.
++	 */
++	format_map = 1 << psb_intel_sdvo->tv_format_index;
++	memcpy(&tv_res, &format_map,
++	       min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
++
++	if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
++		return;
++
++	BUILD_BUG_ON(sizeof(tv_res) != 3);
++	if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
++				  SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
++				  &tv_res, sizeof(tv_res)))
++		return;
++	if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
++		return;
++
++	for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
++		if (reply & (1 << i)) {
++			struct drm_display_mode *nmode;
++			nmode = drm_mode_duplicate(connector->dev,
++						   &sdvo_tv_modes[i]);
++			if (nmode)
++				drm_mode_probed_add(connector, nmode);
++		}
++}
++
++static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
++{
++	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
++	struct drm_psb_private *dev_priv = connector->dev->dev_private;
++	struct drm_display_mode *newmode;
++
++	/*
++	 * Attempt to get the mode list from DDC.
++	 * Assume that the preferred modes are
++	 * arranged in priority order.
++	 */
++	psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
++	if (list_empty(&connector->probed_modes) == false)
++		goto end;
++
++	/* Fetch modes from VBT */
++	if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
++		newmode = drm_mode_duplicate(connector->dev,
++					     dev_priv->sdvo_lvds_vbt_mode);
++		if (newmode != NULL) {
++			/* Guarantee the mode is preferred */
++			newmode->type = (DRM_MODE_TYPE_PREFERRED |
++					 DRM_MODE_TYPE_DRIVER);
++			drm_mode_probed_add(connector, newmode);
++		}
++	}
++
++end:
++	list_for_each_entry(newmode, &connector->probed_modes, head) {
++		if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
++			psb_intel_sdvo->sdvo_lvds_fixed_mode =
++				drm_mode_duplicate(connector->dev, newmode);
++
++			drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
++					      0);
++
++			psb_intel_sdvo->is_lvds = true;
++			break;
++		}
++	}
++
++}
++
++static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
++{
++	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
++
++	if (IS_TV(psb_intel_sdvo_connector))
++		psb_intel_sdvo_get_tv_modes(connector);
++	else if (IS_LVDS(psb_intel_sdvo_connector))
++		psb_intel_sdvo_get_lvds_modes(connector);
++	else
++		psb_intel_sdvo_get_ddc_modes(connector);
++
++	return !list_empty(&connector->probed_modes);
++}
++
++static void
++psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
++{
++	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
++	struct drm_device *dev = connector->dev;
++
++	if (psb_intel_sdvo_connector->left)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->left);
++	if (psb_intel_sdvo_connector->right)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->right);
++	if (psb_intel_sdvo_connector->top)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->top);
++	if (psb_intel_sdvo_connector->bottom)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
++	if (psb_intel_sdvo_connector->hpos)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
++	if (psb_intel_sdvo_connector->vpos)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
++	if (psb_intel_sdvo_connector->saturation)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
++	if (psb_intel_sdvo_connector->contrast)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
++	if (psb_intel_sdvo_connector->hue)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
++	if (psb_intel_sdvo_connector->sharpness)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
++	if (psb_intel_sdvo_connector->flicker_filter)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
++	if (psb_intel_sdvo_connector->flicker_filter_2d)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
++	if (psb_intel_sdvo_connector->flicker_filter_adaptive)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
++	if (psb_intel_sdvo_connector->tv_luma_filter)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
++	if (psb_intel_sdvo_connector->tv_chroma_filter)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
++	if (psb_intel_sdvo_connector->dot_crawl)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
++	if (psb_intel_sdvo_connector->brightness)
++		drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
++}
++
++static void psb_intel_sdvo_destroy(struct drm_connector *connector)
++{
++	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
++
++	if (psb_intel_sdvo_connector->tv_format)
++		drm_property_destroy(connector->dev,
++				     psb_intel_sdvo_connector->tv_format);
++
++	psb_intel_sdvo_destroy_enhance_property(connector);
++	drm_sysfs_connector_remove(connector);
++	drm_connector_cleanup(connector);
++	kfree(connector);
++}
++
++static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
++{
++	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
++	struct edid *edid;
++	bool has_audio = false;
++
++	if (!psb_intel_sdvo->is_hdmi)
++		return false;
++
++	edid = psb_intel_sdvo_get_edid(connector);
++	if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
++		has_audio = drm_detect_monitor_audio(edid);
++
++	return has_audio;
++}
++
++static int
++psb_intel_sdvo_set_property(struct drm_connector *connector,
++			struct drm_property *property,
++			uint64_t val)
++{
++	struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
++	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
++	struct drm_psb_private *dev_priv = connector->dev->dev_private;
++	uint16_t temp_value;
++	uint8_t cmd;
++	int ret;
++
++	ret = drm_connector_property_set_value(connector, property, val);
++	if (ret)
++		return ret;
++
++	if (property == dev_priv->force_audio_property) {
++		int i = val;
++		bool has_audio;
++
++		if (i == psb_intel_sdvo_connector->force_audio)
++			return 0;
++
++		psb_intel_sdvo_connector->force_audio = i;
++
++		if (i == 0)
++			has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
++		else
++			has_audio = i > 0;
++
++		if (has_audio == psb_intel_sdvo->has_hdmi_audio)
++			return 0;
++
++		psb_intel_sdvo->has_hdmi_audio = has_audio;
++		goto done;
++	}
++
++	if (property == dev_priv->broadcast_rgb_property) {
++		if (val == !!psb_intel_sdvo->color_range)
++			return 0;
++
++		psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
++		goto done;
++	}
++
++#define CHECK_PROPERTY(name, NAME) \
++	if (psb_intel_sdvo_connector->name == property) { \
++		if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
++		if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
++		cmd = SDVO_CMD_SET_##NAME; \
++		psb_intel_sdvo_connector->cur_##name = temp_value; \
++		goto set_value; \
++	}
++
++	if (property == psb_intel_sdvo_connector->tv_format) {
++		if (val >= TV_FORMAT_NUM)
++			return -EINVAL;
++
++		if (psb_intel_sdvo->tv_format_index ==
++		    psb_intel_sdvo_connector->tv_format_supported[val])
++			return 0;
++
++		psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
++		goto done;
++	} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
++		temp_value = val;
++		if (psb_intel_sdvo_connector->left == property) {
++			drm_connector_property_set_value(connector,
++							 psb_intel_sdvo_connector->right, val);
++			if (psb_intel_sdvo_connector->left_margin == temp_value)
++				return 0;
++
++			psb_intel_sdvo_connector->left_margin = temp_value;
++			psb_intel_sdvo_connector->right_margin = temp_value;
++			temp_value = psb_intel_sdvo_connector->max_hscan -
++				psb_intel_sdvo_connector->left_margin;
++			cmd = SDVO_CMD_SET_OVERSCAN_H;
++			goto set_value;
++		} else if (psb_intel_sdvo_connector->right == property) {
++			drm_connector_property_set_value(connector,
++							 psb_intel_sdvo_connector->left, val);
++			if (psb_intel_sdvo_connector->right_margin == temp_value)
++				return 0;
++
++			psb_intel_sdvo_connector->left_margin = temp_value;
++			psb_intel_sdvo_connector->right_margin = temp_value;
++			temp_value = psb_intel_sdvo_connector->max_hscan -
++				psb_intel_sdvo_connector->left_margin;
++			cmd = SDVO_CMD_SET_OVERSCAN_H;
++			goto set_value;
++		} else if (psb_intel_sdvo_connector->top == property) {
++			drm_connector_property_set_value(connector,
++							 psb_intel_sdvo_connector->bottom, val);
++			if (psb_intel_sdvo_connector->top_margin == temp_value)
++				return 0;
++
++			psb_intel_sdvo_connector->top_margin = temp_value;
++			psb_intel_sdvo_connector->bottom_margin = temp_value;
++			temp_value = psb_intel_sdvo_connector->max_vscan -
++				psb_intel_sdvo_connector->top_margin;
++			cmd = SDVO_CMD_SET_OVERSCAN_V;
++			goto set_value;
++		} else if (psb_intel_sdvo_connector->bottom == property) {
++			drm_connector_property_set_value(connector,
++							 psb_intel_sdvo_connector->top, val);
++			if (psb_intel_sdvo_connector->bottom_margin == temp_value)
++				return 0;
++
++			psb_intel_sdvo_connector->top_margin = temp_value;
++			psb_intel_sdvo_connector->bottom_margin = temp_value;
++			temp_value = psb_intel_sdvo_connector->max_vscan -
++				psb_intel_sdvo_connector->top_margin;
++			cmd = SDVO_CMD_SET_OVERSCAN_V;
++			goto set_value;
++		}
++		CHECK_PROPERTY(hpos, HPOS)
++		CHECK_PROPERTY(vpos, VPOS)
++		CHECK_PROPERTY(saturation, SATURATION)
++		CHECK_PROPERTY(contrast, CONTRAST)
++		CHECK_PROPERTY(hue, HUE)
++		CHECK_PROPERTY(brightness, BRIGHTNESS)
++		CHECK_PROPERTY(sharpness, SHARPNESS)
++		CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
++		CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
++		CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
++		CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
++		CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
++		CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
++	}
++
++	return -EINVAL; /* unknown property */
++
++set_value:
++	if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
++		return -EIO;
++
++
++done:
++	if (psb_intel_sdvo->base.base.crtc) {
++		struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
++		drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
++					 crtc->y, crtc->fb);
++	}
++
++	return 0;
++#undef CHECK_PROPERTY
++}
++
++static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
++	.dpms = psb_intel_sdvo_dpms,
++	.mode_fixup = psb_intel_sdvo_mode_fixup,
++	.prepare = psb_intel_encoder_prepare,
++	.mode_set = psb_intel_sdvo_mode_set,
++	.commit = psb_intel_encoder_commit,
++};
++
++static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
++	.dpms = drm_helper_connector_dpms,
++	.detect = psb_intel_sdvo_detect,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.set_property = psb_intel_sdvo_set_property,
++	.destroy = psb_intel_sdvo_destroy,
++};
++
++static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
++	.get_modes = psb_intel_sdvo_get_modes,
++	.mode_valid = psb_intel_sdvo_mode_valid,
++	.best_encoder = psb_intel_best_encoder,
++};
++
++static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
++{
++	struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
++
++	if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
++		drm_mode_destroy(encoder->dev,
++				 psb_intel_sdvo->sdvo_lvds_fixed_mode);
++
++	i2c_del_adapter(&psb_intel_sdvo->ddc);
++	psb_intel_encoder_destroy(encoder);
++}
++
++static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
++	.destroy = psb_intel_sdvo_enc_destroy,
++};
++
++static void
++psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
++{
++	/* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
++	 * We need to figure out if this is true for all available poulsbo
++	 * hardware, or if we need to fiddle with the guessing code above.
++	 * The problem might go away if we can parse sdvo mappings from bios */
++	sdvo->ddc_bus = 2;
++
++#if 0
++	uint16_t mask = 0;
++	unsigned int num_bits;
++
++	/* Make a mask of outputs less than or equal to our own priority in the
++	 * list.
++	 */
++	switch (sdvo->controlled_output) {
++	case SDVO_OUTPUT_LVDS1:
++		mask |= SDVO_OUTPUT_LVDS1;
++	case SDVO_OUTPUT_LVDS0:
++		mask |= SDVO_OUTPUT_LVDS0;
++	case SDVO_OUTPUT_TMDS1:
++		mask |= SDVO_OUTPUT_TMDS1;
++	case SDVO_OUTPUT_TMDS0:
++		mask |= SDVO_OUTPUT_TMDS0;
++	case SDVO_OUTPUT_RGB1:
++		mask |= SDVO_OUTPUT_RGB1;
++	case SDVO_OUTPUT_RGB0:
++		mask |= SDVO_OUTPUT_RGB0;
++		break;
++	}
++
++	/* Count bits to find what number we are in the priority list. */
++	mask &= sdvo->caps.output_flags;
++	num_bits = hweight16(mask);
++	/* If more than 3 outputs, default to DDC bus 3 for now. */
++	if (num_bits > 3)
++		num_bits = 3;
++
++	/* Corresponds to SDVO_CONTROL_BUS_DDCx */
++	sdvo->ddc_bus = 1 << num_bits;
++#endif
++}
++
++/**
++ * Choose the appropriate DDC bus for control bus switch command for this
++ * SDVO output based on the controlled output.
++ *
++ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
++ * outputs, then LVDS outputs.
++ */
++static void
++psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
++			  struct psb_intel_sdvo *sdvo, u32 reg)
++{
++	struct sdvo_device_mapping *mapping;
++
++	if (IS_SDVOB(reg))
++		mapping = &(dev_priv->sdvo_mappings[0]);
++	else
++		mapping = &(dev_priv->sdvo_mappings[1]);
++
++	if (mapping->initialized)
++		sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
++	else
++		psb_intel_sdvo_guess_ddc_bus(sdvo);
++}
++
++static void
++psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
++			  struct psb_intel_sdvo *sdvo, u32 reg)
++{
++	struct sdvo_device_mapping *mapping;
++	u8 pin, speed;
++
++	if (IS_SDVOB(reg))
++		mapping = &dev_priv->sdvo_mappings[0];
++	else
++		mapping = &dev_priv->sdvo_mappings[1];
++
++	pin = GMBUS_PORT_DPB;
++	speed = GMBUS_RATE_1MHZ >> 8;
++	if (mapping->initialized) {
++		pin = mapping->i2c_pin;
++		speed = mapping->i2c_speed;
++	}
++
++	if (pin < GMBUS_NUM_PORTS) {
++		sdvo->i2c = &dev_priv->gmbus[pin].adapter;
++		gma_intel_gmbus_set_speed(sdvo->i2c, speed);
++		gma_intel_gmbus_force_bit(sdvo->i2c, true);
++	} else
++		sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
++}
++
++static bool
++psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
++{
++	return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
++}
++
++static u8
++psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct sdvo_device_mapping *my_mapping, *other_mapping;
++
++	if (IS_SDVOB(sdvo_reg)) {
++		my_mapping = &dev_priv->sdvo_mappings[0];
++		other_mapping = &dev_priv->sdvo_mappings[1];
++	} else {
++		my_mapping = &dev_priv->sdvo_mappings[1];
++		other_mapping = &dev_priv->sdvo_mappings[0];
++	}
++
++	/* If the BIOS described our SDVO device, take advantage of it. */
++	if (my_mapping->slave_addr)
++		return my_mapping->slave_addr;
++
++	/* If the BIOS only described a different SDVO device, use the
++	 * address that it isn't using.
++	 */
++	if (other_mapping->slave_addr) {
++		if (other_mapping->slave_addr == 0x70)
++			return 0x72;
++		else
++			return 0x70;
++	}
++
++	/* No SDVO device info is found for another DVO port,
++	 * so use mapping assumption we had before BIOS parsing.
++	 */
++	if (IS_SDVOB(sdvo_reg))
++		return 0x70;
++	else
++		return 0x72;
++}
++
++static void
++psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
++			  struct psb_intel_sdvo *encoder)
++{
++	drm_connector_init(encoder->base.base.dev,
++			   &connector->base.base,
++			   &psb_intel_sdvo_connector_funcs,
++			   connector->base.base.connector_type);
++
++	drm_connector_helper_add(&connector->base.base,
++				 &psb_intel_sdvo_connector_helper_funcs);
++
++	connector->base.base.interlace_allowed = 0;
++	connector->base.base.doublescan_allowed = 0;
++	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
++
++	psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
++	drm_sysfs_connector_add(&connector->base.base);
++}
++
++static void
++psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
++{
++	/* FIXME: We don't support HDMI at the moment
++	struct drm_device *dev = connector->base.base.dev;
++
++	intel_attach_force_audio_property(&connector->base.base);
++	if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
++		intel_attach_broadcast_rgb_property(&connector->base.base);
++	*/
++}
++
++static bool
++psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
++{
++	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
++	struct drm_connector *connector;
++	struct psb_intel_connector *intel_connector;
++	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
++
++	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
++	if (!psb_intel_sdvo_connector)
++		return false;
++
++	if (device == 0) {
++		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
++		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
++	} else if (device == 1) {
++		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
++		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
++	}
++
++	intel_connector = &psb_intel_sdvo_connector->base;
++	connector = &intel_connector->base;
++	// connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
++	encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
++	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
++
++	if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
++		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
++		psb_intel_sdvo->is_hdmi = true;
++	}
++	psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
++				       (1 << INTEL_ANALOG_CLONE_BIT));
++
++	psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
++	if (psb_intel_sdvo->is_hdmi)
++		psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
++
++	return true;
++}
++
++static bool
++psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
++{
++	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
++	struct drm_connector *connector;
++	struct psb_intel_connector *intel_connector;
++	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
++
++	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
++	if (!psb_intel_sdvo_connector)
++		return false;
++
++	intel_connector = &psb_intel_sdvo_connector->base;
++	connector = &intel_connector->base;
++	encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
++	connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
++
++	psb_intel_sdvo->controlled_output |= type;
++	psb_intel_sdvo_connector->output_flag = type;
++
++	psb_intel_sdvo->is_tv = true;
++	psb_intel_sdvo->base.needs_tv_clock = true;
++	psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
++
++	psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
++
++	if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
++		goto err;
++
++	if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
++		goto err;
++
++	return true;
++
++err:
++	psb_intel_sdvo_destroy(connector);
++	return false;
++}
++
++static bool
++psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
++{
++	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
++	struct drm_connector *connector;
++	struct psb_intel_connector *intel_connector;
++	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
++
++	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
++	if (!psb_intel_sdvo_connector)
++		return false;
++
++	intel_connector = &psb_intel_sdvo_connector->base;
++	connector = &intel_connector->base;
++	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++	encoder->encoder_type = DRM_MODE_ENCODER_DAC;
++	connector->connector_type = DRM_MODE_CONNECTOR_VGA;
++
++	if (device == 0) {
++		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
++		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
++	} else if (device == 1) {
++		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
++		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
++	}
++
++	psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
++				       (1 << INTEL_ANALOG_CLONE_BIT));
++
++	psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
++				  psb_intel_sdvo);
++	return true;
++}
++
++static bool
++psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
++{
++	struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
++	struct drm_connector *connector;
++	struct psb_intel_connector *intel_connector;
++	struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
++
++	psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
++	if (!psb_intel_sdvo_connector)
++		return false;
++
++	intel_connector = &psb_intel_sdvo_connector->base;
++	connector = &intel_connector->base;
++	encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
++	connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
++
++	if (device == 0) {
++		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
++		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
++	} else if (device == 1) {
++		psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
++		psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
++	}
++
++	psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
++				       (1 << INTEL_SDVO_LVDS_CLONE_BIT));
++
++	psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
++	if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
++		goto err;
++
++	return true;
++
++err:
++	psb_intel_sdvo_destroy(connector);
++	return false;
++}
++
++static bool
++psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
++{
++	psb_intel_sdvo->is_tv = false;
++	psb_intel_sdvo->base.needs_tv_clock = false;
++	psb_intel_sdvo->is_lvds = false;
++
++	/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
++
++	if (flags & SDVO_OUTPUT_TMDS0)
++		if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
++			return false;
++
++	if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
++		if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
++			return false;
++
++	/* TV has no XXX1 function block */
++	if (flags & SDVO_OUTPUT_SVID0)
++		if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
++			return false;
++
++	if (flags & SDVO_OUTPUT_CVBS0)
++		if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
++			return false;
++
++	if (flags & SDVO_OUTPUT_RGB0)
++		if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
++			return false;
++
++	if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
++		if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
++			return false;
++
++	if (flags & SDVO_OUTPUT_LVDS0)
++		if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
++			return false;
++
++	if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
++		if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
++			return false;
++
++	if ((flags & SDVO_OUTPUT_MASK) == 0) {
++		unsigned char bytes[2];
++
++		psb_intel_sdvo->controlled_output = 0;
++		memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
++		DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
++			      SDVO_NAME(psb_intel_sdvo),
++			      bytes[0], bytes[1]);
++		return false;
++	}
++	psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
++
++	return true;
++}
++
++static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
++					  struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
++					  int type)
++{
++	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
++	struct psb_intel_sdvo_tv_format format;
++	uint32_t format_map, i;
++
++	if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
++		return false;
++
++	BUILD_BUG_ON(sizeof(format) != 6);
++	if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
++				  SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
++				  &format, sizeof(format)))
++		return false;
++
++	memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
++
++	if (format_map == 0)
++		return false;
++
++	psb_intel_sdvo_connector->format_supported_num = 0;
++	for (i = 0 ; i < TV_FORMAT_NUM; i++)
++		if (format_map & (1 << i))
++			psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
++
++
++	psb_intel_sdvo_connector->tv_format =
++			drm_property_create(dev, DRM_MODE_PROP_ENUM,
++					    "mode", psb_intel_sdvo_connector->format_supported_num);
++	if (!psb_intel_sdvo_connector->tv_format)
++		return false;
++
++	for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
++		drm_property_add_enum(
++				psb_intel_sdvo_connector->tv_format, i,
++				i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
++
++	psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
++	drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
++				      psb_intel_sdvo_connector->tv_format, 0);
++	return true;
++
++}
++
++#define ENHANCEMENT(name, NAME) do { \
++	if (enhancements.name) { \
++		if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
++		    !psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
++			return false; \
++		psb_intel_sdvo_connector->max_##name = data_value[0]; \
++		psb_intel_sdvo_connector->cur_##name = response; \
++		psb_intel_sdvo_connector->name = \
++			drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
++		if (!psb_intel_sdvo_connector->name) return false; \
++		drm_connector_attach_property(connector, \
++					      psb_intel_sdvo_connector->name, \
++					      psb_intel_sdvo_connector->cur_##name); \
++		DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
++			      data_value[0], data_value[1], response); \
++	} \
++} while(0)
++
++static bool
++psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
++				      struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
++				      struct psb_intel_sdvo_enhancements_reply enhancements)
++{
++	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
++	struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
++	uint16_t response, data_value[2];
++
++	/* when horizontal overscan is supported, Add the left/right  property */
++	if (enhancements.overscan_h) {
++		if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
++					  SDVO_CMD_GET_MAX_OVERSCAN_H,
++					  &data_value, 4))
++			return false;
++
++		if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
++					  SDVO_CMD_GET_OVERSCAN_H,
++					  &response, 2))
++			return false;
++
++		psb_intel_sdvo_connector->max_hscan = data_value[0];
++		psb_intel_sdvo_connector->left_margin = data_value[0] - response;
++		psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
++		psb_intel_sdvo_connector->left =
++			drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
++		if (!psb_intel_sdvo_connector->left)
++			return false;
++
++		drm_connector_attach_property(connector,
++					      psb_intel_sdvo_connector->left,
++					      psb_intel_sdvo_connector->left_margin);
++
++		psb_intel_sdvo_connector->right =
++			drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
++		if (!psb_intel_sdvo_connector->right)
++			return false;
++
++		drm_connector_attach_property(connector,
++					      psb_intel_sdvo_connector->right,
++					      psb_intel_sdvo_connector->right_margin);
++		DRM_DEBUG_KMS("h_overscan: max %d, "
++			      "default %d, current %d\n",
++			      data_value[0], data_value[1], response);
++	}
++
++	if (enhancements.overscan_v) {
++		if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
++					  SDVO_CMD_GET_MAX_OVERSCAN_V,
++					  &data_value, 4))
++			return false;
++
++		if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
++					  SDVO_CMD_GET_OVERSCAN_V,
++					  &response, 2))
++			return false;
++
++		psb_intel_sdvo_connector->max_vscan = data_value[0];
++		psb_intel_sdvo_connector->top_margin = data_value[0] - response;
++		psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
++		psb_intel_sdvo_connector->top =
++			drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]);
++		if (!psb_intel_sdvo_connector->top)
++			return false;
++
++		drm_connector_attach_property(connector,
++					      psb_intel_sdvo_connector->top,
++					      psb_intel_sdvo_connector->top_margin);
++
++		psb_intel_sdvo_connector->bottom =
++			drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]);
++		if (!psb_intel_sdvo_connector->bottom)
++			return false;
++
++		drm_connector_attach_property(connector,
++					      psb_intel_sdvo_connector->bottom,
++					      psb_intel_sdvo_connector->bottom_margin);
++		DRM_DEBUG_KMS("v_overscan: max %d, "
++			      "default %d, current %d\n",
++			      data_value[0], data_value[1], response);
++	}
++
++	ENHANCEMENT(hpos, HPOS);
++	ENHANCEMENT(vpos, VPOS);
++	ENHANCEMENT(saturation, SATURATION);
++	ENHANCEMENT(contrast, CONTRAST);
++	ENHANCEMENT(hue, HUE);
++	ENHANCEMENT(sharpness, SHARPNESS);
++	ENHANCEMENT(brightness, BRIGHTNESS);
++	ENHANCEMENT(flicker_filter, FLICKER_FILTER);
++	ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
++	ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
++	ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
++	ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
++
++	if (enhancements.dot_crawl) {
++		if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
++			return false;
++
++		psb_intel_sdvo_connector->max_dot_crawl = 1;
++		psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
++		psb_intel_sdvo_connector->dot_crawl =
++			drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
++		if (!psb_intel_sdvo_connector->dot_crawl)
++			return false;
++
++		drm_connector_attach_property(connector,
++					      psb_intel_sdvo_connector->dot_crawl,
++					      psb_intel_sdvo_connector->cur_dot_crawl);
++		DRM_DEBUG_KMS("dot crawl: current %d\n", response);
++	}
++
++	return true;
++}
++
++static bool
++psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
++					struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
++					struct psb_intel_sdvo_enhancements_reply enhancements)
++{
++	struct drm_device *dev = psb_intel_sdvo->base.base.dev;
++	struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
++	uint16_t response, data_value[2];
++
++	ENHANCEMENT(brightness, BRIGHTNESS);
++
++	return true;
++}
++#undef ENHANCEMENT
++
++static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
++					       struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
++{
++	union {
++		struct psb_intel_sdvo_enhancements_reply reply;
++		uint16_t response;
++	} enhancements;
++
++	BUILD_BUG_ON(sizeof(enhancements) != 2);
++
++	enhancements.response = 0;
++	psb_intel_sdvo_get_value(psb_intel_sdvo,
++			     SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
++			     &enhancements, sizeof(enhancements));
++	if (enhancements.response == 0) {
++		DRM_DEBUG_KMS("No enhancement is supported\n");
++		return true;
++	}
++
++	if (IS_TV(psb_intel_sdvo_connector))
++		return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
++	else if(IS_LVDS(psb_intel_sdvo_connector))
++		return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
++	else
++		return true;
++}
++
++static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
++				     struct i2c_msg *msgs,
++				     int num)
++{
++	struct psb_intel_sdvo *sdvo = adapter->algo_data;
++
++	if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
++		return -EIO;
++
++	return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
++}
++
++static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
++{
++	struct psb_intel_sdvo *sdvo = adapter->algo_data;
++	return sdvo->i2c->algo->functionality(sdvo->i2c);
++}
++
++static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
++	.master_xfer	= psb_intel_sdvo_ddc_proxy_xfer,
++	.functionality	= psb_intel_sdvo_ddc_proxy_func
++};
++
++static bool
++psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
++			  struct drm_device *dev)
++{
++	sdvo->ddc.owner = THIS_MODULE;
++	sdvo->ddc.class = I2C_CLASS_DDC;
++	snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
++	sdvo->ddc.dev.parent = &dev->pdev->dev;
++	sdvo->ddc.algo_data = sdvo;
++	sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
++
++	return i2c_add_adapter(&sdvo->ddc) == 0;
++}
++
++bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	struct psb_intel_encoder *psb_intel_encoder;
++	struct psb_intel_sdvo *psb_intel_sdvo;
++	int i;
++
++	psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
++	if (!psb_intel_sdvo)
++		return false;
++
++	psb_intel_sdvo->sdvo_reg = sdvo_reg;
++	psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
++	psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
++	if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
++		kfree(psb_intel_sdvo);
++		return false;
++	}
++
++	/* encoder type will be decided later */
++	psb_intel_encoder = &psb_intel_sdvo->base;
++	psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
++	drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
++
++	/* Read the regs to test if we can talk to the device */
++	for (i = 0; i < 0x40; i++) {
++		u8 byte;
++
++		if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
++			DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
++				      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
++			goto err;
++		}
++	}
++
++	if (IS_SDVOB(sdvo_reg))
++		dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
++	else
++		dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
++
++	drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
++
++	/* In default case sdvo lvds is false */
++	if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
++		goto err;
++
++	if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
++				    psb_intel_sdvo->caps.output_flags) != true) {
++		DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
++			      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
++		goto err;
++	}
++
++	psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
++
++	/* Set the input timing to the screen. Assume always input 0. */
++	if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
++		goto err;
++
++	if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
++						    &psb_intel_sdvo->pixel_clock_min,
++						    &psb_intel_sdvo->pixel_clock_max))
++		goto err;
++
++	DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
++			"clock range %dMHz - %dMHz, "
++			"input 1: %c, input 2: %c, "
++			"output 1: %c, output 2: %c\n",
++			SDVO_NAME(psb_intel_sdvo),
++			psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
++			psb_intel_sdvo->caps.device_rev_id,
++			psb_intel_sdvo->pixel_clock_min / 1000,
++			psb_intel_sdvo->pixel_clock_max / 1000,
++			(psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
++			(psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
++			/* check currently supported outputs */
++			psb_intel_sdvo->caps.output_flags &
++			(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
++			psb_intel_sdvo->caps.output_flags &
++			(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
++	return true;
++
++err:
++	drm_encoder_cleanup(&psb_intel_encoder->base);
++	i2c_del_adapter(&psb_intel_sdvo->ddc);
++	kfree(psb_intel_sdvo);
++
++	return false;
++}
+diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
+new file mode 100644
+index 0000000..600e797
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
+@@ -0,0 +1,723 @@
++/*
++ * Copyright ? 2006-2007 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *	Eric Anholt <eric at anholt.net>
++ */
++
++/**
++ * @file SDVO command definitions and structures.
++ */
++
++#define SDVO_OUTPUT_FIRST   (0)
++#define SDVO_OUTPUT_TMDS0   (1 << 0)
++#define SDVO_OUTPUT_RGB0    (1 << 1)
++#define SDVO_OUTPUT_CVBS0   (1 << 2)
++#define SDVO_OUTPUT_SVID0   (1 << 3)
++#define SDVO_OUTPUT_YPRPB0  (1 << 4)
++#define SDVO_OUTPUT_SCART0  (1 << 5)
++#define SDVO_OUTPUT_LVDS0   (1 << 6)
++#define SDVO_OUTPUT_TMDS1   (1 << 8)
++#define SDVO_OUTPUT_RGB1    (1 << 9)
++#define SDVO_OUTPUT_CVBS1   (1 << 10)
++#define SDVO_OUTPUT_SVID1   (1 << 11)
++#define SDVO_OUTPUT_YPRPB1  (1 << 12)
++#define SDVO_OUTPUT_SCART1  (1 << 13)
++#define SDVO_OUTPUT_LVDS1   (1 << 14)
++#define SDVO_OUTPUT_LAST    (14)
++
++struct psb_intel_sdvo_caps {
++    u8 vendor_id;
++    u8 device_id;
++    u8 device_rev_id;
++    u8 sdvo_version_major;
++    u8 sdvo_version_minor;
++    unsigned int sdvo_inputs_mask:2;
++    unsigned int smooth_scaling:1;
++    unsigned int sharp_scaling:1;
++    unsigned int up_scaling:1;
++    unsigned int down_scaling:1;
++    unsigned int stall_support:1;
++    unsigned int pad:1;
++    u16 output_flags;
++} __attribute__((packed));
++
++/** This matches the EDID DTD structure, more or less */
++struct psb_intel_sdvo_dtd {
++    struct {
++	u16 clock;		/**< pixel clock, in 10kHz units */
++	u8 h_active;		/**< lower 8 bits (pixels) */
++	u8 h_blank;		/**< lower 8 bits (pixels) */
++	u8 h_high;		/**< upper 4 bits each h_active, h_blank */
++	u8 v_active;		/**< lower 8 bits (lines) */
++	u8 v_blank;		/**< lower 8 bits (lines) */
++	u8 v_high;		/**< upper 4 bits each v_active, v_blank */
++    } part1;
++
++    struct {
++	u8 h_sync_off;	/**< lower 8 bits, from hblank start */
++	u8 h_sync_width;	/**< lower 8 bits (pixels) */
++	/** lower 4 bits each vsync offset, vsync width */
++	u8 v_sync_off_width;
++	/**
++	 * 2 high bits of hsync offset, 2 high bits of hsync width,
++	 * bits 4-5 of vsync offset, and 2 high bits of vsync width.
++	 */
++	u8 sync_off_width_high;
++	u8 dtd_flags;
++	u8 sdvo_flags;
++	/** bits 6-7 of vsync offset at bits 6-7 */
++	u8 v_sync_off_high;
++	u8 reserved;
++    } part2;
++} __attribute__((packed));
++
++struct psb_intel_sdvo_pixel_clock_range {
++    u16 min;			/**< pixel clock, in 10kHz units */
++    u16 max;			/**< pixel clock, in 10kHz units */
++} __attribute__((packed));
++
++struct psb_intel_sdvo_preferred_input_timing_args {
++    u16 clock;
++    u16 width;
++    u16 height;
++    u8	interlace:1;
++    u8	scaled:1;
++    u8	pad:6;
++} __attribute__((packed));
++
++/* I2C registers for SDVO */
++#define SDVO_I2C_ARG_0				0x07
++#define SDVO_I2C_ARG_1				0x06
++#define SDVO_I2C_ARG_2				0x05
++#define SDVO_I2C_ARG_3				0x04
++#define SDVO_I2C_ARG_4				0x03
++#define SDVO_I2C_ARG_5				0x02
++#define SDVO_I2C_ARG_6				0x01
++#define SDVO_I2C_ARG_7				0x00
++#define SDVO_I2C_OPCODE				0x08
++#define SDVO_I2C_CMD_STATUS			0x09
++#define SDVO_I2C_RETURN_0			0x0a
++#define SDVO_I2C_RETURN_1			0x0b
++#define SDVO_I2C_RETURN_2			0x0c
++#define SDVO_I2C_RETURN_3			0x0d
++#define SDVO_I2C_RETURN_4			0x0e
++#define SDVO_I2C_RETURN_5			0x0f
++#define SDVO_I2C_RETURN_6			0x10
++#define SDVO_I2C_RETURN_7			0x11
++#define SDVO_I2C_VENDOR_BEGIN			0x20
++
++/* Status results */
++#define SDVO_CMD_STATUS_POWER_ON		0x0
++#define SDVO_CMD_STATUS_SUCCESS			0x1
++#define SDVO_CMD_STATUS_NOTSUPP			0x2
++#define SDVO_CMD_STATUS_INVALID_ARG		0x3
++#define SDVO_CMD_STATUS_PENDING			0x4
++#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED	0x5
++#define SDVO_CMD_STATUS_SCALING_NOT_SUPP	0x6
++
++/* SDVO commands, argument/result registers */
++
++#define SDVO_CMD_RESET					0x01
++
++/** Returns a struct intel_sdvo_caps */
++#define SDVO_CMD_GET_DEVICE_CAPS			0x02
++
++#define SDVO_CMD_GET_FIRMWARE_REV			0x86
++# define SDVO_DEVICE_FIRMWARE_MINOR			SDVO_I2C_RETURN_0
++# define SDVO_DEVICE_FIRMWARE_MAJOR			SDVO_I2C_RETURN_1
++# define SDVO_DEVICE_FIRMWARE_PATCH			SDVO_I2C_RETURN_2
++
++/**
++ * Reports which inputs are trained (managed to sync).
++ *
++ * Devices must have trained within 2 vsyncs of a mode change.
++ */
++#define SDVO_CMD_GET_TRAINED_INPUTS			0x03
++struct psb_intel_sdvo_get_trained_inputs_response {
++    unsigned int input0_trained:1;
++    unsigned int input1_trained:1;
++    unsigned int pad:6;
++} __attribute__((packed));
++
++/** Returns a struct intel_sdvo_output_flags of active outputs. */
++#define SDVO_CMD_GET_ACTIVE_OUTPUTS			0x04
++
++/**
++ * Sets the current set of active outputs.
++ *
++ * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
++ * on multi-output devices.
++ */
++#define SDVO_CMD_SET_ACTIVE_OUTPUTS			0x05
++
++/**
++ * Returns the current mapping of SDVO inputs to outputs on the device.
++ *
++ * Returns two struct intel_sdvo_output_flags structures.
++ */
++#define SDVO_CMD_GET_IN_OUT_MAP				0x06
++struct psb_intel_sdvo_in_out_map {
++    u16 in0, in1;
++};
++
++/**
++ * Sets the current mapping of SDVO inputs to outputs on the device.
++ *
++ * Takes two struct i380_sdvo_output_flags structures.
++ */
++#define SDVO_CMD_SET_IN_OUT_MAP				0x07
++
++/**
++ * Returns a struct intel_sdvo_output_flags of attached displays.
++ */
++#define SDVO_CMD_GET_ATTACHED_DISPLAYS			0x0b
++
++/**
++ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
++ */
++#define SDVO_CMD_GET_HOT_PLUG_SUPPORT			0x0c
++
++/**
++ * Takes a struct intel_sdvo_output_flags.
++ */
++#define SDVO_CMD_SET_ACTIVE_HOT_PLUG			0x0d
++
++/**
++ * Returns a struct intel_sdvo_output_flags of displays with hot plug
++ * interrupts enabled.
++ */
++#define SDVO_CMD_GET_ACTIVE_HOT_PLUG			0x0e
++
++#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE		0x0f
++struct intel_sdvo_get_interrupt_event_source_response {
++    u16 interrupt_status;
++    unsigned int ambient_light_interrupt:1;
++    unsigned int hdmi_audio_encrypt_change:1;
++    unsigned int pad:6;
++} __attribute__((packed));
++
++/**
++ * Selects which input is affected by future input commands.
++ *
++ * Commands affected include SET_INPUT_TIMINGS_PART[12],
++ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
++ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
++ */
++#define SDVO_CMD_SET_TARGET_INPUT			0x10
++struct psb_intel_sdvo_set_target_input_args {
++    unsigned int target_1:1;
++    unsigned int pad:7;
++} __attribute__((packed));
++
++/**
++ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
++ * future output commands.
++ *
++ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
++ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
++ */
++#define SDVO_CMD_SET_TARGET_OUTPUT			0x11
++
++#define SDVO_CMD_GET_INPUT_TIMINGS_PART1		0x12
++#define SDVO_CMD_GET_INPUT_TIMINGS_PART2		0x13
++#define SDVO_CMD_SET_INPUT_TIMINGS_PART1		0x14
++#define SDVO_CMD_SET_INPUT_TIMINGS_PART2		0x15
++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1		0x16
++#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2		0x17
++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1		0x18
++#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2		0x19
++/* Part 1 */
++# define SDVO_DTD_CLOCK_LOW				SDVO_I2C_ARG_0
++# define SDVO_DTD_CLOCK_HIGH				SDVO_I2C_ARG_1
++# define SDVO_DTD_H_ACTIVE				SDVO_I2C_ARG_2
++# define SDVO_DTD_H_BLANK				SDVO_I2C_ARG_3
++# define SDVO_DTD_H_HIGH				SDVO_I2C_ARG_4
++# define SDVO_DTD_V_ACTIVE				SDVO_I2C_ARG_5
++# define SDVO_DTD_V_BLANK				SDVO_I2C_ARG_6
++# define SDVO_DTD_V_HIGH				SDVO_I2C_ARG_7
++/* Part 2 */
++# define SDVO_DTD_HSYNC_OFF				SDVO_I2C_ARG_0
++# define SDVO_DTD_HSYNC_WIDTH				SDVO_I2C_ARG_1
++# define SDVO_DTD_VSYNC_OFF_WIDTH			SDVO_I2C_ARG_2
++# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH			SDVO_I2C_ARG_3
++# define SDVO_DTD_DTD_FLAGS				SDVO_I2C_ARG_4
++# define SDVO_DTD_DTD_FLAG_INTERLACED				(1 << 7)
++# define SDVO_DTD_DTD_FLAG_STEREO_MASK				(3 << 5)
++# define SDVO_DTD_DTD_FLAG_INPUT_MASK				(3 << 3)
++# define SDVO_DTD_DTD_FLAG_SYNC_MASK				(3 << 1)
++# define SDVO_DTD_SDVO_FLAS				SDVO_I2C_ARG_5
++# define SDVO_DTD_SDVO_FLAG_STALL				(1 << 7)
++# define SDVO_DTD_SDVO_FLAG_CENTERED				(0 << 6)
++# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT				(1 << 6)
++# define SDVO_DTD_SDVO_FLAG_SCALING_MASK			(3 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_NONE			(0 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP			(1 << 4)
++# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH			(2 << 4)
++# define SDVO_DTD_VSYNC_OFF_HIGH			SDVO_I2C_ARG_6
++
++/**
++ * Generates a DTD based on the given width, height, and flags.
++ *
++ * This will be supported by any device supporting scaling or interlaced
++ * modes.
++ */
++#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING		0x1a
++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW		SDVO_I2C_ARG_0
++# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH		SDVO_I2C_ARG_1
++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW		SDVO_I2C_ARG_2
++# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH		SDVO_I2C_ARG_3
++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW		SDVO_I2C_ARG_4
++# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH	SDVO_I2C_ARG_5
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS		SDVO_I2C_ARG_6
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED		(1 << 0)
++# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED		(1 << 1)
++
++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1	0x1b
++#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2	0x1c
++
++/** Returns a struct intel_sdvo_pixel_clock_range */
++#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE		0x1d
++/** Returns a struct intel_sdvo_pixel_clock_range */
++#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE		0x1e
++
++/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
++#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS		0x1f
++
++/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
++#define SDVO_CMD_GET_CLOCK_RATE_MULT			0x20
++/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
++#define SDVO_CMD_SET_CLOCK_RATE_MULT			0x21
++# define SDVO_CLOCK_RATE_MULT_1X				(1 << 0)
++# define SDVO_CLOCK_RATE_MULT_2X				(1 << 1)
++# define SDVO_CLOCK_RATE_MULT_4X				(1 << 3)
++
++#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS		0x27
++/** 6 bytes of bit flags for TV formats shared by all TV format functions */
++struct psb_intel_sdvo_tv_format {
++    unsigned int ntsc_m:1;
++    unsigned int ntsc_j:1;
++    unsigned int ntsc_443:1;
++    unsigned int pal_b:1;
++    unsigned int pal_d:1;
++    unsigned int pal_g:1;
++    unsigned int pal_h:1;
++    unsigned int pal_i:1;
++
++    unsigned int pal_m:1;
++    unsigned int pal_n:1;
++    unsigned int pal_nc:1;
++    unsigned int pal_60:1;
++    unsigned int secam_b:1;
++    unsigned int secam_d:1;
++    unsigned int secam_g:1;
++    unsigned int secam_k:1;
++
++    unsigned int secam_k1:1;
++    unsigned int secam_l:1;
++    unsigned int secam_60:1;
++    unsigned int hdtv_std_smpte_240m_1080i_59:1;
++    unsigned int hdtv_std_smpte_240m_1080i_60:1;
++    unsigned int hdtv_std_smpte_260m_1080i_59:1;
++    unsigned int hdtv_std_smpte_260m_1080i_60:1;
++    unsigned int hdtv_std_smpte_274m_1080i_50:1;
++
++    unsigned int hdtv_std_smpte_274m_1080i_59:1;
++    unsigned int hdtv_std_smpte_274m_1080i_60:1;
++    unsigned int hdtv_std_smpte_274m_1080p_23:1;
++    unsigned int hdtv_std_smpte_274m_1080p_24:1;
++    unsigned int hdtv_std_smpte_274m_1080p_25:1;
++    unsigned int hdtv_std_smpte_274m_1080p_29:1;
++    unsigned int hdtv_std_smpte_274m_1080p_30:1;
++    unsigned int hdtv_std_smpte_274m_1080p_50:1;
++
++    unsigned int hdtv_std_smpte_274m_1080p_59:1;
++    unsigned int hdtv_std_smpte_274m_1080p_60:1;
++    unsigned int hdtv_std_smpte_295m_1080i_50:1;
++    unsigned int hdtv_std_smpte_295m_1080p_50:1;
++    unsigned int hdtv_std_smpte_296m_720p_59:1;
++    unsigned int hdtv_std_smpte_296m_720p_60:1;
++    unsigned int hdtv_std_smpte_296m_720p_50:1;
++    unsigned int hdtv_std_smpte_293m_480p_59:1;
++
++    unsigned int hdtv_std_smpte_170m_480i_59:1;
++    unsigned int hdtv_std_iturbt601_576i_50:1;
++    unsigned int hdtv_std_iturbt601_576p_50:1;
++    unsigned int hdtv_std_eia_7702a_480i_60:1;
++    unsigned int hdtv_std_eia_7702a_480p_60:1;
++    unsigned int pad:3;
++} __attribute__((packed));
++
++#define SDVO_CMD_GET_TV_FORMAT				0x28
++
++#define SDVO_CMD_SET_TV_FORMAT				0x29
++
++/** Returns the resolutiosn that can be used with the given TV format */
++#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT		0x83
++struct psb_intel_sdvo_sdtv_resolution_request {
++    unsigned int ntsc_m:1;
++    unsigned int ntsc_j:1;
++    unsigned int ntsc_443:1;
++    unsigned int pal_b:1;
++    unsigned int pal_d:1;
++    unsigned int pal_g:1;
++    unsigned int pal_h:1;
++    unsigned int pal_i:1;
++
++    unsigned int pal_m:1;
++    unsigned int pal_n:1;
++    unsigned int pal_nc:1;
++    unsigned int pal_60:1;
++    unsigned int secam_b:1;
++    unsigned int secam_d:1;
++    unsigned int secam_g:1;
++    unsigned int secam_k:1;
++
++    unsigned int secam_k1:1;
++    unsigned int secam_l:1;
++    unsigned int secam_60:1;
++    unsigned int pad:5;
++} __attribute__((packed));
++
++struct psb_intel_sdvo_sdtv_resolution_reply {
++    unsigned int res_320x200:1;
++    unsigned int res_320x240:1;
++    unsigned int res_400x300:1;
++    unsigned int res_640x350:1;
++    unsigned int res_640x400:1;
++    unsigned int res_640x480:1;
++    unsigned int res_704x480:1;
++    unsigned int res_704x576:1;
++
++    unsigned int res_720x350:1;
++    unsigned int res_720x400:1;
++    unsigned int res_720x480:1;
++    unsigned int res_720x540:1;
++    unsigned int res_720x576:1;
++    unsigned int res_768x576:1;
++    unsigned int res_800x600:1;
++    unsigned int res_832x624:1;
++
++    unsigned int res_920x766:1;
++    unsigned int res_1024x768:1;
++    unsigned int res_1280x1024:1;
++    unsigned int pad:5;
++} __attribute__((packed));
++
++/* Get supported resolution with squire pixel aspect ratio that can be
++   scaled for the requested HDTV format */
++#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT		0x85
++
++struct psb_intel_sdvo_hdtv_resolution_request {
++    unsigned int hdtv_std_smpte_240m_1080i_59:1;
++    unsigned int hdtv_std_smpte_240m_1080i_60:1;
++    unsigned int hdtv_std_smpte_260m_1080i_59:1;
++    unsigned int hdtv_std_smpte_260m_1080i_60:1;
++    unsigned int hdtv_std_smpte_274m_1080i_50:1;
++    unsigned int hdtv_std_smpte_274m_1080i_59:1;
++    unsigned int hdtv_std_smpte_274m_1080i_60:1;
++    unsigned int hdtv_std_smpte_274m_1080p_23:1;
++
++    unsigned int hdtv_std_smpte_274m_1080p_24:1;
++    unsigned int hdtv_std_smpte_274m_1080p_25:1;
++    unsigned int hdtv_std_smpte_274m_1080p_29:1;
++    unsigned int hdtv_std_smpte_274m_1080p_30:1;
++    unsigned int hdtv_std_smpte_274m_1080p_50:1;
++    unsigned int hdtv_std_smpte_274m_1080p_59:1;
++    unsigned int hdtv_std_smpte_274m_1080p_60:1;
++    unsigned int hdtv_std_smpte_295m_1080i_50:1;
++
++    unsigned int hdtv_std_smpte_295m_1080p_50:1;
++    unsigned int hdtv_std_smpte_296m_720p_59:1;
++    unsigned int hdtv_std_smpte_296m_720p_60:1;
++    unsigned int hdtv_std_smpte_296m_720p_50:1;
++    unsigned int hdtv_std_smpte_293m_480p_59:1;
++    unsigned int hdtv_std_smpte_170m_480i_59:1;
++    unsigned int hdtv_std_iturbt601_576i_50:1;
++    unsigned int hdtv_std_iturbt601_576p_50:1;
++
++    unsigned int hdtv_std_eia_7702a_480i_60:1;
++    unsigned int hdtv_std_eia_7702a_480p_60:1;
++    unsigned int pad:6;
++} __attribute__((packed));
++
++struct psb_intel_sdvo_hdtv_resolution_reply {
++    unsigned int res_640x480:1;
++    unsigned int res_800x600:1;
++    unsigned int res_1024x768:1;
++    unsigned int res_1280x960:1;
++    unsigned int res_1400x1050:1;
++    unsigned int res_1600x1200:1;
++    unsigned int res_1920x1440:1;
++    unsigned int res_2048x1536:1;
++
++    unsigned int res_2560x1920:1;
++    unsigned int res_3200x2400:1;
++    unsigned int res_3840x2880:1;
++    unsigned int pad1:5;
++
++    unsigned int res_848x480:1;
++    unsigned int res_1064x600:1;
++    unsigned int res_1280x720:1;
++    unsigned int res_1360x768:1;
++    unsigned int res_1704x960:1;
++    unsigned int res_1864x1050:1;
++    unsigned int res_1920x1080:1;
++    unsigned int res_2128x1200:1;
++
++    unsigned int res_2560x1400:1;
++    unsigned int res_2728x1536:1;
++    unsigned int res_3408x1920:1;
++    unsigned int res_4264x2400:1;
++    unsigned int res_5120x2880:1;
++    unsigned int pad2:3;
++
++    unsigned int res_768x480:1;
++    unsigned int res_960x600:1;
++    unsigned int res_1152x720:1;
++    unsigned int res_1124x768:1;
++    unsigned int res_1536x960:1;
++    unsigned int res_1680x1050:1;
++    unsigned int res_1728x1080:1;
++    unsigned int res_1920x1200:1;
++
++    unsigned int res_2304x1440:1;
++    unsigned int res_2456x1536:1;
++    unsigned int res_3072x1920:1;
++    unsigned int res_3840x2400:1;
++    unsigned int res_4608x2880:1;
++    unsigned int pad3:3;
++
++    unsigned int res_1280x1024:1;
++    unsigned int pad4:7;
++
++    unsigned int res_1280x768:1;
++    unsigned int pad5:7;
++} __attribute__((packed));
++
++/* Get supported power state returns info for encoder and monitor, rely on
++   last SetTargetInput and SetTargetOutput calls */
++#define SDVO_CMD_GET_SUPPORTED_POWER_STATES		0x2a
++/* Get power state returns info for encoder and monitor, rely on last
++   SetTargetInput and SetTargetOutput calls */
++#define SDVO_CMD_GET_POWER_STATE			0x2b
++#define SDVO_CMD_GET_ENCODER_POWER_STATE		0x2b
++#define SDVO_CMD_SET_ENCODER_POWER_STATE		0x2c
++# define SDVO_ENCODER_STATE_ON					(1 << 0)
++# define SDVO_ENCODER_STATE_STANDBY				(1 << 1)
++# define SDVO_ENCODER_STATE_SUSPEND				(1 << 2)
++# define SDVO_ENCODER_STATE_OFF					(1 << 3)
++# define SDVO_MONITOR_STATE_ON					(1 << 4)
++# define SDVO_MONITOR_STATE_STANDBY				(1 << 5)
++# define SDVO_MONITOR_STATE_SUSPEND				(1 << 6)
++# define SDVO_MONITOR_STATE_OFF					(1 << 7)
++
++#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING		0x2d
++#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING		0x2e
++#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING		0x2f
++/**
++ * The panel power sequencing parameters are in units of milliseconds.
++ * The high fields are bits 8:9 of the 10-bit values.
++ */
++struct psb_sdvo_panel_power_sequencing {
++    u8 t0;
++    u8 t1;
++    u8 t2;
++    u8 t3;
++    u8 t4;
++
++    unsigned int t0_high:2;
++    unsigned int t1_high:2;
++    unsigned int t2_high:2;
++    unsigned int t3_high:2;
++
++    unsigned int t4_high:2;
++    unsigned int pad:6;
++} __attribute__((packed));
++
++#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL		0x30
++struct sdvo_max_backlight_reply {
++    u8 max_value;
++    u8 default_value;
++} __attribute__((packed));
++
++#define SDVO_CMD_GET_BACKLIGHT_LEVEL			0x31
++#define SDVO_CMD_SET_BACKLIGHT_LEVEL			0x32
++
++#define SDVO_CMD_GET_AMBIENT_LIGHT			0x33
++struct sdvo_get_ambient_light_reply {
++    u16 trip_low;
++    u16 trip_high;
++    u16 value;
++} __attribute__((packed));
++#define SDVO_CMD_SET_AMBIENT_LIGHT			0x34
++struct sdvo_set_ambient_light_reply {
++    u16 trip_low;
++    u16 trip_high;
++    unsigned int enable:1;
++    unsigned int pad:7;
++} __attribute__((packed));
++
++/* Set display power state */
++#define SDVO_CMD_SET_DISPLAY_POWER_STATE		0x7d
++# define SDVO_DISPLAY_STATE_ON				(1 << 0)
++# define SDVO_DISPLAY_STATE_STANDBY			(1 << 1)
++# define SDVO_DISPLAY_STATE_SUSPEND			(1 << 2)
++# define SDVO_DISPLAY_STATE_OFF				(1 << 3)
++
++#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS		0x84
++struct psb_intel_sdvo_enhancements_reply {
++    unsigned int flicker_filter:1;
++    unsigned int flicker_filter_adaptive:1;
++    unsigned int flicker_filter_2d:1;
++    unsigned int saturation:1;
++    unsigned int hue:1;
++    unsigned int brightness:1;
++    unsigned int contrast:1;
++    unsigned int overscan_h:1;
++
++    unsigned int overscan_v:1;
++    unsigned int hpos:1;
++    unsigned int vpos:1;
++    unsigned int sharpness:1;
++    unsigned int dot_crawl:1;
++    unsigned int dither:1;
++    unsigned int tv_chroma_filter:1;
++    unsigned int tv_luma_filter:1;
++} __attribute__((packed));
++
++/* Picture enhancement limits below are dependent on the current TV format,
++ * and thus need to be queried and set after it.
++ */
++#define SDVO_CMD_GET_MAX_FLICKER_FILTER			0x4d
++#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE	0x7b
++#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D		0x52
++#define SDVO_CMD_GET_MAX_SATURATION			0x55
++#define SDVO_CMD_GET_MAX_HUE				0x58
++#define SDVO_CMD_GET_MAX_BRIGHTNESS			0x5b
++#define SDVO_CMD_GET_MAX_CONTRAST			0x5e
++#define SDVO_CMD_GET_MAX_OVERSCAN_H			0x61
++#define SDVO_CMD_GET_MAX_OVERSCAN_V			0x64
++#define SDVO_CMD_GET_MAX_HPOS				0x67
++#define SDVO_CMD_GET_MAX_VPOS				0x6a
++#define SDVO_CMD_GET_MAX_SHARPNESS			0x6d
++#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER		0x74
++#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER			0x77
++struct psb_intel_sdvo_enhancement_limits_reply {
++    u16 max_value;
++    u16 default_value;
++} __attribute__((packed));
++
++#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION		0x7f
++#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION		0x80
++# define SDVO_LVDS_COLOR_DEPTH_18			(0 << 0)
++# define SDVO_LVDS_COLOR_DEPTH_24			(1 << 0)
++# define SDVO_LVDS_CONNECTOR_SPWG			(0 << 2)
++# define SDVO_LVDS_CONNECTOR_OPENLDI			(1 << 2)
++# define SDVO_LVDS_SINGLE_CHANNEL			(0 << 4)
++# define SDVO_LVDS_DUAL_CHANNEL				(1 << 4)
++
++#define SDVO_CMD_GET_FLICKER_FILTER			0x4e
++#define SDVO_CMD_SET_FLICKER_FILTER			0x4f
++#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE		0x50
++#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE		0x51
++#define SDVO_CMD_GET_FLICKER_FILTER_2D			0x53
++#define SDVO_CMD_SET_FLICKER_FILTER_2D			0x54
++#define SDVO_CMD_GET_SATURATION				0x56
++#define SDVO_CMD_SET_SATURATION				0x57
++#define SDVO_CMD_GET_HUE				0x59
++#define SDVO_CMD_SET_HUE				0x5a
++#define SDVO_CMD_GET_BRIGHTNESS				0x5c
++#define SDVO_CMD_SET_BRIGHTNESS				0x5d
++#define SDVO_CMD_GET_CONTRAST				0x5f
++#define SDVO_CMD_SET_CONTRAST				0x60
++#define SDVO_CMD_GET_OVERSCAN_H				0x62
++#define SDVO_CMD_SET_OVERSCAN_H				0x63
++#define SDVO_CMD_GET_OVERSCAN_V				0x65
++#define SDVO_CMD_SET_OVERSCAN_V				0x66
++#define SDVO_CMD_GET_HPOS				0x68
++#define SDVO_CMD_SET_HPOS				0x69
++#define SDVO_CMD_GET_VPOS				0x6b
++#define SDVO_CMD_SET_VPOS				0x6c
++#define SDVO_CMD_GET_SHARPNESS				0x6e
++#define SDVO_CMD_SET_SHARPNESS				0x6f
++#define SDVO_CMD_GET_TV_CHROMA_FILTER			0x75
++#define SDVO_CMD_SET_TV_CHROMA_FILTER			0x76
++#define SDVO_CMD_GET_TV_LUMA_FILTER			0x78
++#define SDVO_CMD_SET_TV_LUMA_FILTER			0x79
++struct psb_intel_sdvo_enhancements_arg {
++    u16 value;
++}__attribute__((packed));
++
++#define SDVO_CMD_GET_DOT_CRAWL				0x70
++#define SDVO_CMD_SET_DOT_CRAWL				0x71
++# define SDVO_DOT_CRAWL_ON					(1 << 0)
++# define SDVO_DOT_CRAWL_DEFAULT_ON				(1 << 1)
++
++#define SDVO_CMD_GET_DITHER				0x72
++#define SDVO_CMD_SET_DITHER				0x73
++# define SDVO_DITHER_ON						(1 << 0)
++# define SDVO_DITHER_DEFAULT_ON					(1 << 1)
++
++#define SDVO_CMD_SET_CONTROL_BUS_SWITCH			0x7a
++# define SDVO_CONTROL_BUS_PROM				(1 << 0)
++# define SDVO_CONTROL_BUS_DDC1				(1 << 1)
++# define SDVO_CONTROL_BUS_DDC2				(1 << 2)
++# define SDVO_CONTROL_BUS_DDC3				(1 << 3)
++
++/* HDMI op codes */
++#define SDVO_CMD_GET_SUPP_ENCODE	0x9d
++#define SDVO_CMD_GET_ENCODE		0x9e
++#define SDVO_CMD_SET_ENCODE		0x9f
++  #define SDVO_ENCODE_DVI	0x0
++  #define SDVO_ENCODE_HDMI	0x1
++#define SDVO_CMD_SET_PIXEL_REPLI	0x8b
++#define SDVO_CMD_GET_PIXEL_REPLI	0x8c
++#define SDVO_CMD_GET_COLORIMETRY_CAP	0x8d
++#define SDVO_CMD_SET_COLORIMETRY	0x8e
++  #define SDVO_COLORIMETRY_RGB256   0x0
++  #define SDVO_COLORIMETRY_RGB220   0x1
++  #define SDVO_COLORIMETRY_YCrCb422 0x3
++  #define SDVO_COLORIMETRY_YCrCb444 0x4
++#define SDVO_CMD_GET_COLORIMETRY	0x8f
++#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
++#define SDVO_CMD_SET_AUDIO_STAT		0x91
++#define SDVO_CMD_GET_AUDIO_STAT		0x92
++#define SDVO_CMD_SET_HBUF_INDEX		0x93
++#define SDVO_CMD_GET_HBUF_INDEX		0x94
++#define SDVO_CMD_GET_HBUF_INFO		0x95
++#define SDVO_CMD_SET_HBUF_AV_SPLIT	0x96
++#define SDVO_CMD_GET_HBUF_AV_SPLIT	0x97
++#define SDVO_CMD_SET_HBUF_DATA		0x98
++#define SDVO_CMD_GET_HBUF_DATA		0x99
++#define SDVO_CMD_SET_HBUF_TXRATE	0x9a
++#define SDVO_CMD_GET_HBUF_TXRATE	0x9b
++  #define SDVO_HBUF_TX_DISABLED	(0 << 6)
++  #define SDVO_HBUF_TX_ONCE	(2 << 6)
++  #define SDVO_HBUF_TX_VSYNC	(3 << 6)
++#define SDVO_CMD_GET_AUDIO_TX_INFO	0x9c
++#define SDVO_NEED_TO_STALL  (1 << 7)
++
++struct psb_intel_sdvo_encode {
++    u8 dvi_rev;
++    u8 hdmi_rev;
++} __attribute__ ((packed));
+diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
+new file mode 100644
+index 0000000..1869586
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_irq.c
+@@ -0,0 +1,622 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
++ * develop this driver.
++ *
++ **************************************************************************/
++/*
++ */
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include "power.h"
++#include "psb_irq.h"
++#include "mdfld_output.h"
++
++/*
++ * inline functions
++ */
++
++static inline u32
++psb_pipestat(int pipe)
++{
++	if (pipe == 0)
++		return PIPEASTAT;
++	if (pipe == 1)
++		return PIPEBSTAT;
++	if (pipe == 2)
++		return PIPECSTAT;
++	BUG();
++}
++
++static inline u32
++mid_pipe_event(int pipe)
++{
++	if (pipe == 0)
++		return _PSB_PIPEA_EVENT_FLAG;
++	if (pipe == 1)
++		return _MDFLD_PIPEB_EVENT_FLAG;
++	if (pipe == 2)
++		return _MDFLD_PIPEC_EVENT_FLAG;
++	BUG();
++}
++
++static inline u32
++mid_pipe_vsync(int pipe)
++{
++	if (pipe == 0)
++		return _PSB_VSYNC_PIPEA_FLAG;
++	if (pipe == 1)
++		return _PSB_VSYNC_PIPEB_FLAG;
++	if (pipe == 2)
++		return _MDFLD_PIPEC_VBLANK_FLAG;
++	BUG();
++}
++
++static inline u32
++mid_pipeconf(int pipe)
++{
++	if (pipe == 0)
++		return PIPEACONF;
++	if (pipe == 1)
++		return PIPEBCONF;
++	if (pipe == 2)
++		return PIPECCONF;
++	BUG();
++}
++
++void
++psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
++{
++	if ((dev_priv->pipestat[pipe] & mask) != mask) {
++		u32 reg = psb_pipestat(pipe);
++		dev_priv->pipestat[pipe] |= mask;
++		/* Enable the interrupt, clear any pending status */
++		if (gma_power_begin(dev_priv->dev, false)) {
++			u32 writeVal = PSB_RVDC32(reg);
++			writeVal |= (mask | (mask >> 16));
++			PSB_WVDC32(writeVal, reg);
++			(void) PSB_RVDC32(reg);
++			gma_power_end(dev_priv->dev);
++		}
++	}
++}
++
++void
++psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
++{
++	if ((dev_priv->pipestat[pipe] & mask) != 0) {
++		u32 reg = psb_pipestat(pipe);
++		dev_priv->pipestat[pipe] &= ~mask;
++		if (gma_power_begin(dev_priv->dev, false)) {
++			u32 writeVal = PSB_RVDC32(reg);
++			writeVal &= ~mask;
++			PSB_WVDC32(writeVal, reg);
++			(void) PSB_RVDC32(reg);
++			gma_power_end(dev_priv->dev);
++		}
++	}
++}
++
++static void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
++{
++	if (gma_power_begin(dev_priv->dev, false)) {
++		u32 pipe_event = mid_pipe_event(pipe);
++		dev_priv->vdc_irq_mask |= pipe_event;
++		PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++		PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++		gma_power_end(dev_priv->dev);
++	}
++}
++
++static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
++{
++	if (dev_priv->pipestat[pipe] == 0) {
++		if (gma_power_begin(dev_priv->dev, false)) {
++			u32 pipe_event = mid_pipe_event(pipe);
++			dev_priv->vdc_irq_mask &= ~pipe_event;
++			PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++			PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++			gma_power_end(dev_priv->dev);
++		}
++	}
++}
++
++/**
++ * Display controller interrupt handler for pipe event.
++ *
++ */
++static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
++{
++	struct drm_psb_private *dev_priv =
++	    (struct drm_psb_private *) dev->dev_private;
++
++	uint32_t pipe_stat_val = 0;
++	uint32_t pipe_stat_reg = psb_pipestat(pipe);
++	uint32_t pipe_enable = dev_priv->pipestat[pipe];
++	uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
++	uint32_t pipe_clear;
++	uint32_t i = 0;
++
++	spin_lock(&dev_priv->irqmask_lock);
++
++	pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
++	pipe_stat_val &= pipe_enable | pipe_status;
++	pipe_stat_val &= pipe_stat_val >> 16;
++
++	spin_unlock(&dev_priv->irqmask_lock);
++
++	/* Clear the 2nd level interrupt status bits
++	 * Sometimes the bits are very sticky so we repeat until they unstick */
++	for (i = 0; i < 0xffff; i++) {
++		PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
++		pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
++
++		if (pipe_clear == 0)
++			break;
++	}
++
++	if (pipe_clear)
++		dev_err(dev->dev,
++		"%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
++		__func__, pipe, PSB_RVDC32(pipe_stat_reg));
++
++	if (pipe_stat_val & PIPE_VBLANK_STATUS)
++		drm_handle_vblank(dev, pipe);
++
++	if (pipe_stat_val & PIPE_TE_STATUS)
++		drm_handle_vblank(dev, pipe);
++}
++
++/*
++ * Display controller interrupt handler.
++ */
++static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
++{
++	if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
++		mid_pipe_event_handler(dev, 0);
++
++	if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
++		mid_pipe_event_handler(dev, 1);
++}
++
++irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
++{
++	struct drm_device *dev = (struct drm_device *) arg;
++	struct drm_psb_private *dev_priv =
++	    (struct drm_psb_private *) dev->dev_private;
++
++	uint32_t vdc_stat, dsp_int = 0, sgx_int = 0;
++	int handled = 0;
++
++	spin_lock(&dev_priv->irqmask_lock);
++
++	vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
++
++	if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
++		dsp_int = 1;
++
++	/* FIXME: Handle Medfield
++	if (vdc_stat & _MDFLD_DISP_ALL_IRQ_FLAG)
++		dsp_int = 1;
++	*/
++
++	if (vdc_stat & _PSB_IRQ_SGX_FLAG)
++		sgx_int = 1;
++
++	vdc_stat &= dev_priv->vdc_irq_mask;
++	spin_unlock(&dev_priv->irqmask_lock);
++
++	if (dsp_int && gma_power_is_on(dev)) {
++		psb_vdc_interrupt(dev, vdc_stat);
++		handled = 1;
++	}
++
++	if (sgx_int) {
++		/* Not expected - we have it masked, shut it up */
++		u32 s, s2;
++		s = PSB_RSGX32(PSB_CR_EVENT_STATUS);
++		s2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
++		PSB_WSGX32(s, PSB_CR_EVENT_HOST_CLEAR);
++		PSB_WSGX32(s2, PSB_CR_EVENT_HOST_CLEAR2);
++		/* if s & _PSB_CE_TWOD_COMPLETE we have 2D done but
++		   we may as well poll even if we add that ! */
++		handled = 1;
++	}
++
++	PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
++	(void) PSB_RVDC32(PSB_INT_IDENTITY_R);
++	DRM_READMEMORYBARRIER();
++
++	if (!handled)
++		return IRQ_NONE;
++
++	return IRQ_HANDLED;
++}
++
++void psb_irq_preinstall(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv =
++	    (struct drm_psb_private *) dev->dev_private;
++	unsigned long irqflags;
++
++	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++	if (gma_power_is_on(dev))
++		PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++	if (dev->vblank_enabled[0])
++		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++	if (dev->vblank_enabled[1])
++		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
++
++	/* FIXME: Handle Medfield irq mask
++	if (dev->vblank_enabled[1])
++		dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
++	if (dev->vblank_enabled[2])
++		dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
++	*/
++
++	/* This register is safe even if display island is off */
++	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
++
++int psb_irq_postinstall(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv =
++	    (struct drm_psb_private *) dev->dev_private;
++	unsigned long irqflags;
++
++	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++	/* This register is safe even if display island is off */
++	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++
++	if (dev->vblank_enabled[0])
++		psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
++	else
++		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
++
++	if (dev->vblank_enabled[1])
++		psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
++	else
++		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
++
++	if (dev->vblank_enabled[2])
++		psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
++	else
++		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
++
++	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++	return 0;
++}
++
++void psb_irq_uninstall(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv =
++	    (struct drm_psb_private *) dev->dev_private;
++	unsigned long irqflags;
++
++	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++	PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
++
++	if (dev->vblank_enabled[0])
++		psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
++
++	if (dev->vblank_enabled[1])
++		psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
++
++	if (dev->vblank_enabled[2])
++		psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
++
++	dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
++				  _PSB_IRQ_MSVDX_FLAG |
++				  _LNC_IRQ_TOPAZ_FLAG;
++
++	/* These two registers are safe even if display island is off */
++	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++
++	wmb();
++
++	/* This register is safe even if display island is off */
++	PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
++	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
++
++void psb_irq_turn_on_dpst(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv =
++		(struct drm_psb_private *) dev->dev_private;
++	u32 hist_reg;
++	u32 pwm_reg;
++
++	if (gma_power_begin(dev, false)) {
++		PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
++		hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
++		PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
++		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++		PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
++		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++		PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
++						| PWM_PHASEIN_INT_ENABLE,
++							   PWM_CONTROL_LOGIC);
++		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++		psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
++
++		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++		PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
++							HISTOGRAM_INT_CONTROL);
++		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++		PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
++							PWM_CONTROL_LOGIC);
++
++		gma_power_end(dev);
++	}
++}
++
++int psb_irq_enable_dpst(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv =
++		(struct drm_psb_private *) dev->dev_private;
++	unsigned long irqflags;
++
++	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++	/* enable DPST */
++	mid_enable_pipe_event(dev_priv, 0);
++	psb_irq_turn_on_dpst(dev);
++
++	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++	return 0;
++}
++
++void psb_irq_turn_off_dpst(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv =
++	    (struct drm_psb_private *) dev->dev_private;
++	u32 hist_reg;
++	u32 pwm_reg;
++
++	if (gma_power_begin(dev, false)) {
++		PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
++		hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
++
++		psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
++
++		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++		PSB_WVDC32(pwm_reg & !(PWM_PHASEIN_INT_ENABLE),
++							PWM_CONTROL_LOGIC);
++		pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
++
++		gma_power_end(dev);
++	}
++}
++
++int psb_irq_disable_dpst(struct drm_device *dev)
++{
++	struct drm_psb_private *dev_priv =
++	    (struct drm_psb_private *) dev->dev_private;
++	unsigned long irqflags;
++
++	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++	mid_disable_pipe_event(dev_priv, 0);
++	psb_irq_turn_off_dpst(dev);
++
++	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++
++	return 0;
++}
++
++#ifdef PSB_FIXME
++static int psb_vblank_do_wait(struct drm_device *dev,
++			      unsigned int *sequence, atomic_t *counter)
++{
++	unsigned int cur_vblank;
++	int ret = 0;
++	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
++		    (((cur_vblank = atomic_read(counter))
++		      - *sequence) <= (1 << 23)));
++	*sequence = cur_vblank;
++
++	return ret;
++}
++#endif
++
++/*
++ * It is used to enable VBLANK interrupt
++ */
++int psb_enable_vblank(struct drm_device *dev, int pipe)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	unsigned long irqflags;
++	uint32_t reg_val = 0;
++	uint32_t pipeconf_reg = mid_pipeconf(pipe);
++
++	/* Medfield is different - we should perhaps extract out vblank
++	   and blacklight etc ops */
++	if (IS_MFLD(dev))
++		return mdfld_enable_te(dev, pipe);
++
++	if (gma_power_begin(dev, false)) {
++		reg_val = REG_READ(pipeconf_reg);
++		gma_power_end(dev);
++	}
++
++	if (!(reg_val & PIPEACONF_ENABLE))
++		return -EINVAL;
++
++	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++	if (pipe == 0)
++		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
++	else if (pipe == 1)
++		dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
++
++	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++	psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
++
++	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++
++	return 0;
++}
++
++/*
++ * It is used to disable VBLANK interrupt
++ */
++void psb_disable_vblank(struct drm_device *dev, int pipe)
++{
++	struct drm_psb_private *dev_priv = dev->dev_private;
++	unsigned long irqflags;
++
++	if (IS_MFLD(dev))
++		mdfld_disable_te(dev, pipe);
++	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++	if (pipe == 0)
++		dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
++	else if (pipe == 1)
++		dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
++
++	PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
++	PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
++	psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
++
++	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
++
++/*
++ * It is used to enable TE interrupt
++ */
++int mdfld_enable_te(struct drm_device *dev, int pipe)
++{
++	struct drm_psb_private *dev_priv =
++		(struct drm_psb_private *) dev->dev_private;
++	unsigned long irqflags;
++	uint32_t reg_val = 0;
++	uint32_t pipeconf_reg = mid_pipeconf(pipe);
++
++	if (gma_power_begin(dev, false)) {
++		reg_val = REG_READ(pipeconf_reg);
++		gma_power_end(dev);
++	}
++
++	if (!(reg_val & PIPEACONF_ENABLE))
++		return -EINVAL;
++
++	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++	mid_enable_pipe_event(dev_priv, pipe);
++	psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
++
++	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++
++	return 0;
++}
++
++/*
++ * It is used to disable TE interrupt
++ */
++void mdfld_disable_te(struct drm_device *dev, int pipe)
++{
++	struct drm_psb_private *dev_priv =
++		(struct drm_psb_private *) dev->dev_private;
++	unsigned long irqflags;
++
++	if (!dev_priv->dsr_enable)
++		return;
++
++	spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
++
++	mid_disable_pipe_event(dev_priv, pipe);
++	psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
++
++	spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
++}
++
++/* Called from drm generic code, passed a 'crtc', which
++ * we use as a pipe index
++ */
++u32 psb_get_vblank_counter(struct drm_device *dev, int pipe)
++{
++	uint32_t high_frame = PIPEAFRAMEHIGH;
++	uint32_t low_frame = PIPEAFRAMEPIXEL;
++	uint32_t pipeconf_reg = PIPEACONF;
++	uint32_t reg_val = 0;
++	uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
++
++	switch (pipe) {
++	case 0:
++		break;
++	case 1:
++		high_frame = PIPEBFRAMEHIGH;
++		low_frame = PIPEBFRAMEPIXEL;
++		pipeconf_reg = PIPEBCONF;
++		break;
++	case 2:
++		high_frame = PIPECFRAMEHIGH;
++		low_frame = PIPECFRAMEPIXEL;
++		pipeconf_reg = PIPECCONF;
++		break;
++	default:
++		dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
++		return 0;
++	}
++
++	if (!gma_power_begin(dev, false))
++		return 0;
++
++	reg_val = REG_READ(pipeconf_reg);
++
++	if (!(reg_val & PIPEACONF_ENABLE)) {
++		dev_err(dev->dev, "trying to get vblank count for disabled pipe %d\n",
++								pipe);
++		goto psb_get_vblank_counter_exit;
++	}
++
++	/*
++	 * High & low register fields aren't synchronized, so make sure
++	 * we get a low value that's stable across two reads of the high
++	 * register.
++	 */
++	do {
++		high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++			 PIPE_FRAME_HIGH_SHIFT);
++		low =  ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
++			PIPE_FRAME_LOW_SHIFT);
++		high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
++			 PIPE_FRAME_HIGH_SHIFT);
++	} while (high1 != high2);
++
++	count = (high1 << 8) | low;
++
++psb_get_vblank_counter_exit:
++
++	gma_power_end(dev);
++
++	return count;
++}
++
+diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
+new file mode 100644
+index 0000000..603045b
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_irq.h
+@@ -0,0 +1,47 @@
++/**************************************************************************
++ * Copyright (c) 2009-2011, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors:
++ *    Benjamin Defnet <benjamin.r.defnet at intel.com>
++ *    Rajesh Poornachandran <rajesh.poornachandran at intel.com>
++ *
++ **************************************************************************/
++
++#ifndef _SYSIRQ_H_
++#define _SYSIRQ_H_
++
++#include <drm/drmP.h>
++
++bool sysirq_init(struct drm_device *dev);
++void sysirq_uninit(struct drm_device *dev);
++
++void psb_irq_preinstall(struct drm_device *dev);
++int  psb_irq_postinstall(struct drm_device *dev);
++void psb_irq_uninstall(struct drm_device *dev);
++irqreturn_t psb_irq_handler(DRM_IRQ_ARGS);
++
++int psb_irq_enable_dpst(struct drm_device *dev);
++int psb_irq_disable_dpst(struct drm_device *dev);
++void psb_irq_turn_on_dpst(struct drm_device *dev);
++void psb_irq_turn_off_dpst(struct drm_device *dev);
++int  psb_enable_vblank(struct drm_device *dev, int pipe);
++void psb_disable_vblank(struct drm_device *dev, int pipe);
++u32  psb_get_vblank_counter(struct drm_device *dev, int pipe);
++
++int mdfld_enable_te(struct drm_device *dev, int pipe);
++void mdfld_disable_te(struct drm_device *dev, int pipe);
++#endif /* _SYSIRQ_H_ */
+diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
+new file mode 100644
+index 0000000..b867aabe
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_lid.c
+@@ -0,0 +1,88 @@
++/**************************************************************************
++ * Copyright (c) 2007, Intel Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
++ **************************************************************************/
++
++#include <drm/drmP.h>
++#include "psb_drv.h"
++#include "psb_reg.h"
++#include "psb_intel_reg.h"
++#include <linux/spinlock.h>
++
++static void psb_lid_timer_func(unsigned long data)
++{
++	struct drm_psb_private * dev_priv = (struct drm_psb_private *)data;
++	struct drm_device *dev = (struct drm_device *)dev_priv->dev;
++	struct timer_list *lid_timer = &dev_priv->lid_timer;
++	unsigned long irq_flags;
++	u32 *lid_state = dev_priv->lid_state;
++	u32 pp_status;
++
++	if (readl(lid_state) == dev_priv->lid_last_state)
++		goto lid_timer_schedule;
++
++	if ((readl(lid_state)) & 0x01) {
++		/*lid state is open*/
++		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
++		do {
++			pp_status = REG_READ(PP_STATUS);
++		} while ((pp_status & PP_ON) == 0);
++
++		/*FIXME: should be backlight level before*/
++		psb_intel_lvds_set_brightness(dev, 100);
++	} else {
++		psb_intel_lvds_set_brightness(dev, 0);
++
++		REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
++		do {
++			pp_status = REG_READ(PP_STATUS);
++		} while ((pp_status & PP_ON) == 0);
++	}
++	dev_priv->lid_last_state =  readl(lid_state);
++
++lid_timer_schedule:
++	spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
++	if (!timer_pending(lid_timer)) {
++		lid_timer->expires = jiffies + PSB_LID_DELAY;
++		add_timer(lid_timer);
++	}
++	spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
++}
++
++void psb_lid_timer_init(struct drm_psb_private *dev_priv)
++{
++	struct timer_list *lid_timer = &dev_priv->lid_timer;
++	unsigned long irq_flags;
++
++	spin_lock_init(&dev_priv->lid_lock);
++	spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
++
++	init_timer(lid_timer);
++
++	lid_timer->data = (unsigned long)dev_priv;
++	lid_timer->function = psb_lid_timer_func;
++	lid_timer->expires = jiffies + PSB_LID_DELAY;
++
++	add_timer(lid_timer);
++	spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
++}
++
++void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
++{
++	del_timer_sync(&dev_priv->lid_timer);
++}
++
+diff --git a/drivers/gpu/drm/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
+new file mode 100644
+index 0000000..b81c7c1
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/psb_reg.h
+@@ -0,0 +1,582 @@
++/**************************************************************************
++ *
++ * Copyright (c) (2005-2007) Imagination Technologies Limited.
++ * Copyright (c) 2007, Intel Corporation.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA..
++ *
++ **************************************************************************/
++
++#ifndef _PSB_REG_H_
++#define _PSB_REG_H_
++
++#define PSB_CR_CLKGATECTL		0x0000
++#define _PSB_C_CLKGATECTL_AUTO_MAN_REG		(1 << 24)
++#define _PSB_C_CLKGATECTL_USE_CLKG_SHIFT	(20)
++#define _PSB_C_CLKGATECTL_USE_CLKG_MASK		(0x3 << 20)
++#define _PSB_C_CLKGATECTL_DPM_CLKG_SHIFT	(16)
++#define _PSB_C_CLKGATECTL_DPM_CLKG_MASK		(0x3 << 16)
++#define _PSB_C_CLKGATECTL_TA_CLKG_SHIFT		(12)
++#define _PSB_C_CLKGATECTL_TA_CLKG_MASK		(0x3 << 12)
++#define _PSB_C_CLKGATECTL_TSP_CLKG_SHIFT	(8)
++#define _PSB_C_CLKGATECTL_TSP_CLKG_MASK		(0x3 << 8)
++#define _PSB_C_CLKGATECTL_ISP_CLKG_SHIFT	(4)
++#define _PSB_C_CLKGATECTL_ISP_CLKG_MASK		(0x3 << 4)
++#define _PSB_C_CLKGATECTL_2D_CLKG_SHIFT		(0)
++#define _PSB_C_CLKGATECTL_2D_CLKG_MASK		(0x3 << 0)
++#define _PSB_C_CLKGATECTL_CLKG_ENABLED		(0)
++#define _PSB_C_CLKGATECTL_CLKG_DISABLED		(1)
++#define _PSB_C_CLKGATECTL_CLKG_AUTO		(2)
++
++#define PSB_CR_CORE_ID			0x0010
++#define _PSB_CC_ID_ID_SHIFT			(16)
++#define _PSB_CC_ID_ID_MASK			(0xFFFF << 16)
++#define _PSB_CC_ID_CONFIG_SHIFT			(0)
++#define _PSB_CC_ID_CONFIG_MASK			(0xFFFF << 0)
++
++#define PSB_CR_CORE_REVISION		0x0014
++#define _PSB_CC_REVISION_DESIGNER_SHIFT		(24)
++#define _PSB_CC_REVISION_DESIGNER_MASK		(0xFF << 24)
++#define _PSB_CC_REVISION_MAJOR_SHIFT		(16)
++#define _PSB_CC_REVISION_MAJOR_MASK		(0xFF << 16)
++#define _PSB_CC_REVISION_MINOR_SHIFT		(8)
++#define _PSB_CC_REVISION_MINOR_MASK		(0xFF << 8)
++#define _PSB_CC_REVISION_MAINTENANCE_SHIFT	(0)
++#define _PSB_CC_REVISION_MAINTENANCE_MASK	(0xFF << 0)
++
++#define PSB_CR_DESIGNER_REV_FIELD1	0x0018
++
++#define PSB_CR_SOFT_RESET		0x0080
++#define _PSB_CS_RESET_TSP_RESET		(1 << 6)
++#define _PSB_CS_RESET_ISP_RESET		(1 << 5)
++#define _PSB_CS_RESET_USE_RESET		(1 << 4)
++#define _PSB_CS_RESET_TA_RESET		(1 << 3)
++#define _PSB_CS_RESET_DPM_RESET		(1 << 2)
++#define _PSB_CS_RESET_TWOD_RESET	(1 << 1)
++#define _PSB_CS_RESET_BIF_RESET			(1 << 0)
++
++#define PSB_CR_DESIGNER_REV_FIELD2	0x001C
++
++#define PSB_CR_EVENT_HOST_ENABLE2	0x0110
++
++#define PSB_CR_EVENT_STATUS2		0x0118
++
++#define PSB_CR_EVENT_HOST_CLEAR2	0x0114
++#define _PSB_CE2_BIF_REQUESTER_FAULT		(1 << 4)
++
++#define PSB_CR_EVENT_STATUS		0x012C
++
++#define PSB_CR_EVENT_HOST_ENABLE	0x0130
++
++#define PSB_CR_EVENT_HOST_CLEAR		0x0134
++#define _PSB_CE_MASTER_INTERRUPT		(1 << 31)
++#define _PSB_CE_TA_DPM_FAULT			(1 << 28)
++#define _PSB_CE_TWOD_COMPLETE			(1 << 27)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_ZLS		(1 << 25)
++#define _PSB_CE_DPM_TA_MEM_FREE			(1 << 24)
++#define _PSB_CE_PIXELBE_END_RENDER		(1 << 18)
++#define _PSB_CE_SW_EVENT			(1 << 14)
++#define _PSB_CE_TA_FINISHED			(1 << 13)
++#define _PSB_CE_TA_TERMINATE			(1 << 12)
++#define _PSB_CE_DPM_REACHED_MEM_THRESH		(1 << 3)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_GBL		(1 << 2)
++#define _PSB_CE_DPM_OUT_OF_MEMORY_MT		(1 << 1)
++#define _PSB_CE_DPM_3D_MEM_FREE			(1 << 0)
++
++
++#define PSB_USE_OFFSET_MASK		0x0007FFFF
++#define PSB_USE_OFFSET_SIZE		(PSB_USE_OFFSET_MASK + 1)
++#define PSB_CR_USE_CODE_BASE0		0x0A0C
++#define PSB_CR_USE_CODE_BASE1		0x0A10
++#define PSB_CR_USE_CODE_BASE2		0x0A14
++#define PSB_CR_USE_CODE_BASE3		0x0A18
++#define PSB_CR_USE_CODE_BASE4		0x0A1C
++#define PSB_CR_USE_CODE_BASE5		0x0A20
++#define PSB_CR_USE_CODE_BASE6		0x0A24
++#define PSB_CR_USE_CODE_BASE7		0x0A28
++#define PSB_CR_USE_CODE_BASE8		0x0A2C
++#define PSB_CR_USE_CODE_BASE9		0x0A30
++#define PSB_CR_USE_CODE_BASE10		0x0A34
++#define PSB_CR_USE_CODE_BASE11		0x0A38
++#define PSB_CR_USE_CODE_BASE12		0x0A3C
++#define PSB_CR_USE_CODE_BASE13		0x0A40
++#define PSB_CR_USE_CODE_BASE14		0x0A44
++#define PSB_CR_USE_CODE_BASE15		0x0A48
++#define PSB_CR_USE_CODE_BASE(_i)	(0x0A0C + ((_i) << 2))
++#define _PSB_CUC_BASE_DM_SHIFT			(25)
++#define _PSB_CUC_BASE_DM_MASK			(0x3 << 25)
++#define _PSB_CUC_BASE_ADDR_SHIFT		(0)	/* 1024-bit aligned address? */
++#define _PSB_CUC_BASE_ADDR_ALIGNSHIFT		(7)
++#define _PSB_CUC_BASE_ADDR_MASK			(0x1FFFFFF << 0)
++#define _PSB_CUC_DM_VERTEX			(0)
++#define _PSB_CUC_DM_PIXEL			(1)
++#define _PSB_CUC_DM_RESERVED			(2)
++#define _PSB_CUC_DM_EDM				(3)
++
++#define PSB_CR_PDS_EXEC_BASE		0x0AB8
++#define _PSB_CR_PDS_EXEC_BASE_ADDR_SHIFT	(20)	/* 1MB aligned address */
++#define _PSB_CR_PDS_EXEC_BASE_ADDR_ALIGNSHIFT	(20)
++
++#define PSB_CR_EVENT_KICKER		0x0AC4
++#define _PSB_CE_KICKER_ADDRESS_SHIFT		(4)	/* 128-bit aligned address */
++
++#define PSB_CR_EVENT_KICK		0x0AC8
++#define _PSB_CE_KICK_NOW			(1 << 0)
++
++#define PSB_CR_BIF_DIR_LIST_BASE1	0x0C38
++
++#define PSB_CR_BIF_CTRL			0x0C00
++#define _PSB_CB_CTRL_CLEAR_FAULT		(1 << 4)
++#define _PSB_CB_CTRL_INVALDC			(1 << 3)
++#define _PSB_CB_CTRL_FLUSH			(1 << 2)
++
++#define PSB_CR_BIF_INT_STAT		0x0C04
++
++#define PSB_CR_BIF_FAULT		0x0C08
++#define _PSB_CBI_STAT_PF_N_RW			(1 << 14)
++#define _PSB_CBI_STAT_FAULT_SHIFT		(0)
++#define _PSB_CBI_STAT_FAULT_MASK		(0x3FFF << 0)
++#define _PSB_CBI_STAT_FAULT_CACHE		(1 << 1)
++#define _PSB_CBI_STAT_FAULT_TA			(1 << 2)
++#define _PSB_CBI_STAT_FAULT_VDM			(1 << 3)
++#define _PSB_CBI_STAT_FAULT_2D			(1 << 4)
++#define _PSB_CBI_STAT_FAULT_PBE			(1 << 5)
++#define _PSB_CBI_STAT_FAULT_TSP			(1 << 6)
++#define _PSB_CBI_STAT_FAULT_ISP			(1 << 7)
++#define _PSB_CBI_STAT_FAULT_USSEPDS		(1 << 8)
++#define _PSB_CBI_STAT_FAULT_HOST		(1 << 9)
++
++#define PSB_CR_BIF_BANK0		0x0C78
++#define PSB_CR_BIF_BANK1		0x0C7C
++#define PSB_CR_BIF_DIR_LIST_BASE0	0x0C84
++#define PSB_CR_BIF_TWOD_REQ_BASE	0x0C88
++#define PSB_CR_BIF_3D_REQ_BASE		0x0CAC
++
++#define PSB_CR_2D_SOCIF			0x0E18
++#define _PSB_C2_SOCIF_FREESPACE_SHIFT		(0)
++#define _PSB_C2_SOCIF_FREESPACE_MASK		(0xFF << 0)
++#define _PSB_C2_SOCIF_EMPTY			(0x80 << 0)
++
++#define PSB_CR_2D_BLIT_STATUS		0x0E04
++#define _PSB_C2B_STATUS_BUSY			(1 << 24)
++#define _PSB_C2B_STATUS_COMPLETE_SHIFT		(0)
++#define _PSB_C2B_STATUS_COMPLETE_MASK		(0xFFFFFF << 0)
++
++/*
++ * 2D defs.
++ */
++
++/*
++ * 2D Slave Port Data : Block Header's Object Type
++ */
++
++#define	PSB_2D_CLIP_BH			(0x00000000)
++#define	PSB_2D_PAT_BH			(0x10000000)
++#define	PSB_2D_CTRL_BH			(0x20000000)
++#define	PSB_2D_SRC_OFF_BH		(0x30000000)
++#define	PSB_2D_MASK_OFF_BH		(0x40000000)
++#define	PSB_2D_RESERVED1_BH		(0x50000000)
++#define	PSB_2D_RESERVED2_BH		(0x60000000)
++#define	PSB_2D_FENCE_BH			(0x70000000)
++#define	PSB_2D_BLIT_BH			(0x80000000)
++#define	PSB_2D_SRC_SURF_BH		(0x90000000)
++#define	PSB_2D_DST_SURF_BH		(0xA0000000)
++#define	PSB_2D_PAT_SURF_BH		(0xB0000000)
++#define	PSB_2D_SRC_PAL_BH		(0xC0000000)
++#define	PSB_2D_PAT_PAL_BH		(0xD0000000)
++#define	PSB_2D_MASK_SURF_BH		(0xE0000000)
++#define	PSB_2D_FLUSH_BH			(0xF0000000)
++
++/*
++ * Clip Definition block (PSB_2D_CLIP_BH)
++ */
++#define PSB_2D_CLIPCOUNT_MAX		(1)
++#define PSB_2D_CLIPCOUNT_MASK		(0x00000000)
++#define PSB_2D_CLIPCOUNT_CLRMASK	(0xFFFFFFFF)
++#define PSB_2D_CLIPCOUNT_SHIFT		(0)
++/* clip rectangle min & max */
++#define PSB_2D_CLIP_XMAX_MASK		(0x00FFF000)
++#define PSB_2D_CLIP_XMAX_CLRMASK	(0xFF000FFF)
++#define PSB_2D_CLIP_XMAX_SHIFT		(12)
++#define PSB_2D_CLIP_XMIN_MASK		(0x00000FFF)
++#define PSB_2D_CLIP_XMIN_CLRMASK	(0x00FFF000)
++#define PSB_2D_CLIP_XMIN_SHIFT		(0)
++/* clip rectangle offset */
++#define PSB_2D_CLIP_YMAX_MASK		(0x00FFF000)
++#define PSB_2D_CLIP_YMAX_CLRMASK	(0xFF000FFF)
++#define PSB_2D_CLIP_YMAX_SHIFT		(12)
++#define PSB_2D_CLIP_YMIN_MASK		(0x00000FFF)
++#define PSB_2D_CLIP_YMIN_CLRMASK	(0x00FFF000)
++#define PSB_2D_CLIP_YMIN_SHIFT		(0)
++
++/*
++ * Pattern Control (PSB_2D_PAT_BH)
++ */
++#define PSB_2D_PAT_HEIGHT_MASK		(0x0000001F)
++#define PSB_2D_PAT_HEIGHT_SHIFT		(0)
++#define PSB_2D_PAT_WIDTH_MASK		(0x000003E0)
++#define PSB_2D_PAT_WIDTH_SHIFT		(5)
++#define PSB_2D_PAT_YSTART_MASK		(0x00007C00)
++#define PSB_2D_PAT_YSTART_SHIFT		(10)
++#define PSB_2D_PAT_XSTART_MASK		(0x000F8000)
++#define PSB_2D_PAT_XSTART_SHIFT		(15)
++
++/*
++ * 2D Control block (PSB_2D_CTRL_BH)
++ */
++/* Present Flags */
++#define PSB_2D_SRCCK_CTRL		(0x00000001)
++#define PSB_2D_DSTCK_CTRL		(0x00000002)
++#define PSB_2D_ALPHA_CTRL		(0x00000004)
++/* Colour Key Colour (SRC/DST)*/
++#define PSB_2D_CK_COL_MASK		(0xFFFFFFFF)
++#define PSB_2D_CK_COL_CLRMASK		(0x00000000)
++#define PSB_2D_CK_COL_SHIFT		(0)
++/* Colour Key Mask (SRC/DST)*/
++#define PSB_2D_CK_MASK_MASK		(0xFFFFFFFF)
++#define PSB_2D_CK_MASK_CLRMASK		(0x00000000)
++#define PSB_2D_CK_MASK_SHIFT		(0)
++/* Alpha Control (Alpha/RGB)*/
++#define PSB_2D_GBLALPHA_MASK		(0x000FF000)
++#define PSB_2D_GBLALPHA_CLRMASK		(0xFFF00FFF)
++#define PSB_2D_GBLALPHA_SHIFT		(12)
++#define PSB_2D_SRCALPHA_OP_MASK		(0x00700000)
++#define PSB_2D_SRCALPHA_OP_CLRMASK	(0xFF8FFFFF)
++#define PSB_2D_SRCALPHA_OP_SHIFT	(20)
++#define PSB_2D_SRCALPHA_OP_ONE		(0x00000000)
++#define PSB_2D_SRCALPHA_OP_SRC		(0x00100000)
++#define PSB_2D_SRCALPHA_OP_DST		(0x00200000)
++#define PSB_2D_SRCALPHA_OP_SG		(0x00300000)
++#define PSB_2D_SRCALPHA_OP_DG		(0x00400000)
++#define PSB_2D_SRCALPHA_OP_GBL		(0x00500000)
++#define PSB_2D_SRCALPHA_OP_ZERO		(0x00600000)
++#define PSB_2D_SRCALPHA_INVERT		(0x00800000)
++#define PSB_2D_SRCALPHA_INVERT_CLR	(0xFF7FFFFF)
++#define PSB_2D_DSTALPHA_OP_MASK		(0x07000000)
++#define PSB_2D_DSTALPHA_OP_CLRMASK	(0xF8FFFFFF)
++#define PSB_2D_DSTALPHA_OP_SHIFT	(24)
++#define PSB_2D_DSTALPHA_OP_ONE		(0x00000000)
++#define PSB_2D_DSTALPHA_OP_SRC		(0x01000000)
++#define PSB_2D_DSTALPHA_OP_DST		(0x02000000)
++#define PSB_2D_DSTALPHA_OP_SG		(0x03000000)
++#define PSB_2D_DSTALPHA_OP_DG		(0x04000000)
++#define PSB_2D_DSTALPHA_OP_GBL		(0x05000000)
++#define PSB_2D_DSTALPHA_OP_ZERO		(0x06000000)
++#define PSB_2D_DSTALPHA_INVERT		(0x08000000)
++#define PSB_2D_DSTALPHA_INVERT_CLR	(0xF7FFFFFF)
++
++#define PSB_2D_PRE_MULTIPLICATION_ENABLE	(0x10000000)
++#define PSB_2D_PRE_MULTIPLICATION_CLRMASK	(0xEFFFFFFF)
++#define PSB_2D_ZERO_SOURCE_ALPHA_ENABLE		(0x20000000)
++#define PSB_2D_ZERO_SOURCE_ALPHA_CLRMASK	(0xDFFFFFFF)
++
++/*
++ *Source Offset (PSB_2D_SRC_OFF_BH)
++ */
++#define PSB_2D_SRCOFF_XSTART_MASK	((0x00000FFF) << 12)
++#define PSB_2D_SRCOFF_XSTART_SHIFT	(12)
++#define PSB_2D_SRCOFF_YSTART_MASK	(0x00000FFF)
++#define PSB_2D_SRCOFF_YSTART_SHIFT	(0)
++
++/*
++ * Mask Offset (PSB_2D_MASK_OFF_BH)
++ */
++#define PSB_2D_MASKOFF_XSTART_MASK	((0x00000FFF) << 12)
++#define PSB_2D_MASKOFF_XSTART_SHIFT	(12)
++#define PSB_2D_MASKOFF_YSTART_MASK	(0x00000FFF)
++#define PSB_2D_MASKOFF_YSTART_SHIFT	(0)
++
++/*
++ * 2D Fence (see PSB_2D_FENCE_BH): bits 0:27 are ignored
++ */
++
++/*
++ *Blit Rectangle (PSB_2D_BLIT_BH)
++ */
++
++#define PSB_2D_ROT_MASK			(3 << 25)
++#define PSB_2D_ROT_CLRMASK		(~PSB_2D_ROT_MASK)
++#define PSB_2D_ROT_NONE			(0 << 25)
++#define PSB_2D_ROT_90DEGS		(1 << 25)
++#define PSB_2D_ROT_180DEGS		(2 << 25)
++#define PSB_2D_ROT_270DEGS		(3 << 25)
++
++#define PSB_2D_COPYORDER_MASK		(3 << 23)
++#define PSB_2D_COPYORDER_CLRMASK	(~PSB_2D_COPYORDER_MASK)
++#define PSB_2D_COPYORDER_TL2BR		(0 << 23)
++#define PSB_2D_COPYORDER_BR2TL		(1 << 23)
++#define PSB_2D_COPYORDER_TR2BL		(2 << 23)
++#define PSB_2D_COPYORDER_BL2TR		(3 << 23)
++
++#define PSB_2D_DSTCK_CLRMASK		(0xFF9FFFFF)
++#define PSB_2D_DSTCK_DISABLE		(0x00000000)
++#define PSB_2D_DSTCK_PASS		(0x00200000)
++#define PSB_2D_DSTCK_REJECT		(0x00400000)
++
++#define PSB_2D_SRCCK_CLRMASK		(0xFFE7FFFF)
++#define PSB_2D_SRCCK_DISABLE		(0x00000000)
++#define PSB_2D_SRCCK_PASS		(0x00080000)
++#define PSB_2D_SRCCK_REJECT		(0x00100000)
++
++#define PSB_2D_CLIP_ENABLE		(0x00040000)
++
++#define PSB_2D_ALPHA_ENABLE		(0x00020000)
++
++#define PSB_2D_PAT_CLRMASK		(0xFFFEFFFF)
++#define PSB_2D_PAT_MASK			(0x00010000)
++#define PSB_2D_USE_PAT			(0x00010000)
++#define PSB_2D_USE_FILL			(0x00000000)
++/*
++ * Tungsten Graphics note on rop codes: If rop A and rop B are
++ * identical, the mask surface will not be read and need not be
++ * set up.
++ */
++
++#define PSB_2D_ROP3B_MASK		(0x0000FF00)
++#define PSB_2D_ROP3B_CLRMASK		(0xFFFF00FF)
++#define PSB_2D_ROP3B_SHIFT		(8)
++/* rop code A */
++#define PSB_2D_ROP3A_MASK		(0x000000FF)
++#define PSB_2D_ROP3A_CLRMASK		(0xFFFFFF00)
++#define PSB_2D_ROP3A_SHIFT		(0)
++
++#define PSB_2D_ROP4_MASK		(0x0000FFFF)
++/*
++ *	DWORD0:	(Only pass if Pattern control == Use Fill Colour)
++ *	Fill Colour RGBA8888
++ */
++#define PSB_2D_FILLCOLOUR_MASK		(0xFFFFFFFF)
++#define PSB_2D_FILLCOLOUR_SHIFT		(0)
++/*
++ *	DWORD1: (Always Present)
++ *	X Start (Dest)
++ *	Y Start (Dest)
++ */
++#define PSB_2D_DST_XSTART_MASK		(0x00FFF000)
++#define PSB_2D_DST_XSTART_CLRMASK	(0xFF000FFF)
++#define PSB_2D_DST_XSTART_SHIFT		(12)
++#define PSB_2D_DST_YSTART_MASK		(0x00000FFF)
++#define PSB_2D_DST_YSTART_CLRMASK	(0xFFFFF000)
++#define PSB_2D_DST_YSTART_SHIFT		(0)
++/*
++ *	DWORD2: (Always Present)
++ *	X Size (Dest)
++ *	Y Size (Dest)
++ */
++#define PSB_2D_DST_XSIZE_MASK		(0x00FFF000)
++#define PSB_2D_DST_XSIZE_CLRMASK	(0xFF000FFF)
++#define PSB_2D_DST_XSIZE_SHIFT		(12)
++#define PSB_2D_DST_YSIZE_MASK		(0x00000FFF)
++#define PSB_2D_DST_YSIZE_CLRMASK	(0xFFFFF000)
++#define PSB_2D_DST_YSIZE_SHIFT		(0)
++
++/*
++ * Source Surface (PSB_2D_SRC_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_SRC_FORMAT_MASK		(0x00078000)
++#define PSB_2D_SRC_1_PAL		(0x00000000)
++#define PSB_2D_SRC_2_PAL		(0x00008000)
++#define PSB_2D_SRC_4_PAL		(0x00010000)
++#define PSB_2D_SRC_8_PAL		(0x00018000)
++#define PSB_2D_SRC_8_ALPHA		(0x00020000)
++#define PSB_2D_SRC_4_ALPHA		(0x00028000)
++#define PSB_2D_SRC_332RGB		(0x00030000)
++#define PSB_2D_SRC_4444ARGB		(0x00038000)
++#define PSB_2D_SRC_555RGB		(0x00040000)
++#define PSB_2D_SRC_1555ARGB		(0x00048000)
++#define PSB_2D_SRC_565RGB		(0x00050000)
++#define PSB_2D_SRC_0888ARGB		(0x00058000)
++#define PSB_2D_SRC_8888ARGB		(0x00060000)
++#define PSB_2D_SRC_8888UYVY		(0x00068000)
++#define PSB_2D_SRC_RESERVED		(0x00070000)
++#define PSB_2D_SRC_1555ARGB_LOOKUP	(0x00078000)
++
++
++#define PSB_2D_SRC_STRIDE_MASK		(0x00007FFF)
++#define PSB_2D_SRC_STRIDE_CLRMASK	(0xFFFF8000)
++#define PSB_2D_SRC_STRIDE_SHIFT		(0)
++/*
++ *  WORD 1 - Base Address
++ */
++#define PSB_2D_SRC_ADDR_MASK		(0x0FFFFFFC)
++#define PSB_2D_SRC_ADDR_CLRMASK		(0x00000003)
++#define PSB_2D_SRC_ADDR_SHIFT		(2)
++#define PSB_2D_SRC_ADDR_ALIGNSHIFT	(2)
++
++/*
++ * Pattern Surface (PSB_2D_PAT_SURF_BH)
++ */
++/*
++ *  WORD 0
++ */
++
++#define PSB_2D_PAT_FORMAT_MASK		(0x00078000)
++#define PSB_2D_PAT_1_PAL		(0x00000000)
++#define PSB_2D_PAT_2_PAL		(0x00008000)
++#define PSB_2D_PAT_4_PAL		(0x00010000)
++#define PSB_2D_PAT_8_PAL		(0x00018000)
++#define PSB_2D_PAT_8_ALPHA		(0x00020000)
++#define PSB_2D_PAT_4_ALPHA		(0x00028000)
++#define PSB_2D_PAT_332RGB		(0x00030000)
++#define PSB_2D_PAT_4444ARGB		(0x00038000)
++#define PSB_2D_PAT_555RGB		(0x00040000)
++#define PSB_2D_PAT_1555ARGB		(0x00048000)
++#define PSB_2D_PAT_565RGB		(0x00050000)
++#define PSB_2D_PAT_0888ARGB		(0x00058000)
++#define PSB_2D_PAT_8888ARGB		(0x00060000)
++
++#define PSB_2D_PAT_STRIDE_MASK		(0x00007FFF)
++#define PSB_2D_PAT_STRIDE_CLRMASK	(0xFFFF8000)
++#define PSB_2D_PAT_STRIDE_SHIFT		(0)
++/*
++ *  WORD 1 - Base Address
++ */
++#define PSB_2D_PAT_ADDR_MASK		(0x0FFFFFFC)
++#define PSB_2D_PAT_ADDR_CLRMASK		(0x00000003)
++#define PSB_2D_PAT_ADDR_SHIFT		(2)
++#define PSB_2D_PAT_ADDR_ALIGNSHIFT	(2)
++
++/*
++ * Destination Surface (PSB_2D_DST_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++
++#define PSB_2D_DST_FORMAT_MASK		(0x00078000)
++#define PSB_2D_DST_332RGB		(0x00030000)
++#define PSB_2D_DST_4444ARGB		(0x00038000)
++#define PSB_2D_DST_555RGB		(0x00040000)
++#define PSB_2D_DST_1555ARGB		(0x00048000)
++#define PSB_2D_DST_565RGB		(0x00050000)
++#define PSB_2D_DST_0888ARGB		(0x00058000)
++#define PSB_2D_DST_8888ARGB		(0x00060000)
++#define PSB_2D_DST_8888AYUV		(0x00070000)
++
++#define PSB_2D_DST_STRIDE_MASK		(0x00007FFF)
++#define PSB_2D_DST_STRIDE_CLRMASK	(0xFFFF8000)
++#define PSB_2D_DST_STRIDE_SHIFT		(0)
++/*
++ * WORD 1 - Base Address
++ */
++#define PSB_2D_DST_ADDR_MASK		(0x0FFFFFFC)
++#define PSB_2D_DST_ADDR_CLRMASK		(0x00000003)
++#define PSB_2D_DST_ADDR_SHIFT		(2)
++#define PSB_2D_DST_ADDR_ALIGNSHIFT	(2)
++
++/*
++ * Mask Surface (PSB_2D_MASK_SURF_BH)
++ */
++/*
++ * WORD 0
++ */
++#define PSB_2D_MASK_STRIDE_MASK		(0x00007FFF)
++#define PSB_2D_MASK_STRIDE_CLRMASK	(0xFFFF8000)
++#define PSB_2D_MASK_STRIDE_SHIFT	(0)
++/*
++ *  WORD 1 - Base Address
++ */
++#define PSB_2D_MASK_ADDR_MASK		(0x0FFFFFFC)
++#define PSB_2D_MASK_ADDR_CLRMASK	(0x00000003)
++#define PSB_2D_MASK_ADDR_SHIFT		(2)
++#define PSB_2D_MASK_ADDR_ALIGNSHIFT	(2)
++
++/*
++ * Source Palette (PSB_2D_SRC_PAL_BH)
++ */
++
++#define PSB_2D_SRCPAL_ADDR_SHIFT	(0)
++#define PSB_2D_SRCPAL_ADDR_CLRMASK	(0xF0000007)
++#define PSB_2D_SRCPAL_ADDR_MASK		(0x0FFFFFF8)
++#define PSB_2D_SRCPAL_BYTEALIGN		(1024)
++
++/*
++ * Pattern Palette (PSB_2D_PAT_PAL_BH)
++ */
++
++#define PSB_2D_PATPAL_ADDR_SHIFT	(0)
++#define PSB_2D_PATPAL_ADDR_CLRMASK	(0xF0000007)
++#define PSB_2D_PATPAL_ADDR_MASK		(0x0FFFFFF8)
++#define PSB_2D_PATPAL_BYTEALIGN		(1024)
++
++/*
++ * Rop3 Codes (2 LS bytes)
++ */
++
++#define PSB_2D_ROP3_SRCCOPY		(0xCCCC)
++#define PSB_2D_ROP3_PATCOPY		(0xF0F0)
++#define PSB_2D_ROP3_WHITENESS		(0xFFFF)
++#define PSB_2D_ROP3_BLACKNESS		(0x0000)
++#define PSB_2D_ROP3_SRC			(0xCC)
++#define PSB_2D_ROP3_PAT			(0xF0)
++#define PSB_2D_ROP3_DST			(0xAA)
++
++/*
++ * Sizes.
++ */
++
++#define PSB_SCENE_HW_COOKIE_SIZE	16
++#define PSB_TA_MEM_HW_COOKIE_SIZE	16
++
++/*
++ * Scene stuff.
++ */
++
++#define PSB_NUM_HW_SCENES		2
++
++/*
++ * Scheduler completion actions.
++ */
++
++#define PSB_RASTER_BLOCK		0
++#define PSB_RASTER			1
++#define PSB_RETURN			2
++#define PSB_TA				3
++
++/* Power management */
++#define PSB_PUNIT_PORT			0x04
++#define PSB_OSPMBA			0x78
++#define PSB_APMBA			0x7a
++#define PSB_APM_CMD			0x0
++#define PSB_APM_STS			0x04
++#define PSB_PWRGT_VID_ENC_MASK		0x30
++#define PSB_PWRGT_VID_DEC_MASK		0xc
++#define PSB_PWRGT_GL3_MASK		0xc0
++
++#define PSB_PM_SSC			0x20
++#define PSB_PM_SSS			0x30
++#define PSB_PWRGT_DISPLAY_MASK		0xc /*on a different BA than video/gfx*/
++#define MDFLD_PWRGT_DISPLAY_A_CNTR	0x0000000c
++#define MDFLD_PWRGT_DISPLAY_B_CNTR	0x0000c000
++#define MDFLD_PWRGT_DISPLAY_C_CNTR	0x00030000
++#define MDFLD_PWRGT_DISP_MIPI_CNTR	0x000c0000
++#define MDFLD_PWRGT_DISPLAY_CNTR    (MDFLD_PWRGT_DISPLAY_A_CNTR | MDFLD_PWRGT_DISPLAY_B_CNTR | MDFLD_PWRGT_DISPLAY_C_CNTR | MDFLD_PWRGT_DISP_MIPI_CNTR) /* 0x000fc00c */
++/* Display SSS register bits are different in A0 vs. B0 */
++#define PSB_PWRGT_GFX_MASK		0x3
++#define MDFLD_PWRGT_DISPLAY_A_STS	0x000000c0
++#define MDFLD_PWRGT_DISPLAY_B_STS	0x00000300
++#define MDFLD_PWRGT_DISPLAY_C_STS	0x00000c00
++#define PSB_PWRGT_GFX_MASK_B0		0xc3
++#define MDFLD_PWRGT_DISPLAY_A_STS_B0	0x0000000c
++#define MDFLD_PWRGT_DISPLAY_B_STS_B0	0x0000c000
++#define MDFLD_PWRGT_DISPLAY_C_STS_B0	0x00030000
++#define MDFLD_PWRGT_DISP_MIPI_STS	0x000c0000
++#define MDFLD_PWRGT_DISPLAY_STS_A0    (MDFLD_PWRGT_DISPLAY_A_STS | MDFLD_PWRGT_DISPLAY_B_STS | MDFLD_PWRGT_DISPLAY_C_STS | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
++#define MDFLD_PWRGT_DISPLAY_STS_B0    (MDFLD_PWRGT_DISPLAY_A_STS_B0 | MDFLD_PWRGT_DISPLAY_B_STS_B0 | MDFLD_PWRGT_DISPLAY_C_STS_B0 | MDFLD_PWRGT_DISP_MIPI_STS) /* 0x000fc00c */
++#endif
+diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+new file mode 100644
+index 0000000..4a07ab5
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+@@ -0,0 +1,829 @@
++/*
++ * Copyright © 2011 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "mdfld_dsi_dpi.h"
++#include "mdfld_output.h"
++#include "mdfld_dsi_pkg_sender.h"
++#include "tc35876x-dsi-lvds.h"
++#include <linux/i2c/tc35876x.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <asm/intel_scu_ipc.h>
++
++static struct i2c_client *tc35876x_client;
++static struct i2c_client *cmi_lcd_i2c_client;
++
++#define FLD_MASK(start, end)	(((1 << ((start) - (end) + 1)) - 1) << (end))
++#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
++
++/* DSI D-PHY Layer Registers */
++#define D0W_DPHYCONTTX		0x0004
++#define CLW_DPHYCONTRX		0x0020
++#define D0W_DPHYCONTRX		0x0024
++#define D1W_DPHYCONTRX		0x0028
++#define D2W_DPHYCONTRX		0x002C
++#define D3W_DPHYCONTRX		0x0030
++#define COM_DPHYCONTRX		0x0038
++#define CLW_CNTRL		0x0040
++#define D0W_CNTRL		0x0044
++#define D1W_CNTRL		0x0048
++#define D2W_CNTRL		0x004C
++#define D3W_CNTRL		0x0050
++#define DFTMODE_CNTRL		0x0054
++
++/* DSI PPI Layer Registers */
++#define PPI_STARTPPI		0x0104
++#define PPI_BUSYPPI		0x0108
++#define PPI_LINEINITCNT		0x0110
++#define PPI_LPTXTIMECNT		0x0114
++#define PPI_LANEENABLE		0x0134
++#define PPI_TX_RX_TA		0x013C
++#define PPI_CLS_ATMR		0x0140
++#define PPI_D0S_ATMR		0x0144
++#define PPI_D1S_ATMR		0x0148
++#define PPI_D2S_ATMR		0x014C
++#define PPI_D3S_ATMR		0x0150
++#define PPI_D0S_CLRSIPOCOUNT	0x0164
++#define PPI_D1S_CLRSIPOCOUNT	0x0168
++#define PPI_D2S_CLRSIPOCOUNT	0x016C
++#define PPI_D3S_CLRSIPOCOUNT	0x0170
++#define CLS_PRE			0x0180
++#define D0S_PRE			0x0184
++#define D1S_PRE			0x0188
++#define D2S_PRE			0x018C
++#define D3S_PRE			0x0190
++#define CLS_PREP		0x01A0
++#define D0S_PREP		0x01A4
++#define D1S_PREP		0x01A8
++#define D2S_PREP		0x01AC
++#define D3S_PREP		0x01B0
++#define CLS_ZERO		0x01C0
++#define D0S_ZERO		0x01C4
++#define D1S_ZERO		0x01C8
++#define D2S_ZERO		0x01CC
++#define D3S_ZERO		0x01D0
++#define PPI_CLRFLG		0x01E0
++#define PPI_CLRSIPO		0x01E4
++#define HSTIMEOUT		0x01F0
++#define HSTIMEOUTENABLE		0x01F4
++
++/* DSI Protocol Layer Registers */
++#define DSI_STARTDSI		0x0204
++#define DSI_BUSYDSI		0x0208
++#define DSI_LANEENABLE		0x0210
++#define DSI_LANESTATUS0		0x0214
++#define DSI_LANESTATUS1		0x0218
++#define DSI_INTSTATUS		0x0220
++#define DSI_INTMASK		0x0224
++#define DSI_INTCLR		0x0228
++#define DSI_LPTXTO		0x0230
++
++/* DSI General Registers */
++#define DSIERRCNT		0x0300
++
++/* DSI Application Layer Registers */
++#define APLCTRL			0x0400
++#define RDPKTLN			0x0404
++
++/* Video Path Registers */
++#define VPCTRL			0x0450
++#define HTIM1			0x0454
++#define HTIM2			0x0458
++#define VTIM1			0x045C
++#define VTIM2			0x0460
++#define VFUEN			0x0464
++
++/* LVDS Registers */
++#define LVMX0003		0x0480
++#define LVMX0407		0x0484
++#define LVMX0811		0x0488
++#define LVMX1215		0x048C
++#define LVMX1619		0x0490
++#define LVMX2023		0x0494
++#define LVMX2427		0x0498
++#define LVCFG			0x049C
++#define LVPHY0			0x04A0
++#define LVPHY1			0x04A4
++
++/* System Registers */
++#define SYSSTAT			0x0500
++#define SYSRST			0x0504
++
++/* GPIO Registers */
++/*#define GPIOC			0x0520*/
++#define GPIOO			0x0524
++#define GPIOI			0x0528
++
++/* I2C Registers */
++#define I2CTIMCTRL		0x0540
++#define I2CMADDR		0x0544
++#define WDATAQ			0x0548
++#define RDATAQ			0x054C
++
++/* Chip/Rev Registers */
++#define IDREG			0x0580
++
++/* Debug Registers */
++#define DEBUG00			0x05A0
++#define DEBUG01			0x05A4
++
++/* Panel CABC registers */
++#define PANEL_PWM_CONTROL	0x90
++#define PANEL_FREQ_DIVIDER_HI	0x91
++#define PANEL_FREQ_DIVIDER_LO	0x92
++#define PANEL_DUTY_CONTROL	0x93
++#define PANEL_MODIFY_RGB	0x94
++#define PANEL_FRAMERATE_CONTROL	0x96
++#define PANEL_PWM_MIN		0x97
++#define PANEL_PWM_REF		0x98
++#define PANEL_PWM_MAX		0x99
++#define PANEL_ALLOW_DISTORT	0x9A
++#define PANEL_BYPASS_PWMI	0x9B
++
++/* Panel color management registers */
++#define PANEL_CM_ENABLE		0x700
++#define PANEL_CM_HUE		0x701
++#define PANEL_CM_SATURATION	0x702
++#define PANEL_CM_INTENSITY	0x703
++#define PANEL_CM_BRIGHTNESS	0x704
++#define PANEL_CM_CE_ENABLE	0x705
++#define PANEL_CM_PEAK_EN	0x710
++#define PANEL_CM_GAIN		0x711
++#define PANEL_CM_HUETABLE_START	0x730
++#define PANEL_CM_HUETABLE_END	0x747 /* inclusive */
++
++/* Input muxing for registers LVMX0003...LVMX2427 */
++enum {
++	INPUT_R0,	/* 0 */
++	INPUT_R1,
++	INPUT_R2,
++	INPUT_R3,
++	INPUT_R4,
++	INPUT_R5,
++	INPUT_R6,
++	INPUT_R7,
++	INPUT_G0,	/* 8 */
++	INPUT_G1,
++	INPUT_G2,
++	INPUT_G3,
++	INPUT_G4,
++	INPUT_G5,
++	INPUT_G6,
++	INPUT_G7,
++	INPUT_B0,	/* 16 */
++	INPUT_B1,
++	INPUT_B2,
++	INPUT_B3,
++	INPUT_B4,
++	INPUT_B5,
++	INPUT_B6,
++	INPUT_B7,
++	INPUT_HSYNC,	/* 24 */
++	INPUT_VSYNC,
++	INPUT_DE,
++	LOGIC_0,
++	/* 28...31 undefined */
++};
++
++#define INPUT_MUX(lvmx03, lvmx02, lvmx01, lvmx00)		\
++	(FLD_VAL(lvmx03, 29, 24) | FLD_VAL(lvmx02, 20, 16) |	\
++	FLD_VAL(lvmx01, 12, 8) | FLD_VAL(lvmx00, 4, 0))
++
++/**
++ * tc35876x_regw - Write DSI-LVDS bridge register using I2C
++ * @client: struct i2c_client to use
++ * @reg: register address
++ * @value: value to write
++ *
++ * Returns 0 on success, or a negative error value.
++ */
++static int tc35876x_regw(struct i2c_client *client, u16 reg, u32 value)
++{
++	int r;
++	u8 tx_data[] = {
++		/* NOTE: Register address big-endian, data little-endian. */
++		(reg >> 8) & 0xff,
++		reg & 0xff,
++		value & 0xff,
++		(value >> 8) & 0xff,
++		(value >> 16) & 0xff,
++		(value >> 24) & 0xff,
++	};
++	struct i2c_msg msgs[] = {
++		{
++			.addr = client->addr,
++			.flags = 0,
++			.buf = tx_data,
++			.len = ARRAY_SIZE(tx_data),
++		},
++	};
++
++	r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
++	if (r < 0) {
++		dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x error %d\n",
++			__func__, reg, value, r);
++		return r;
++	}
++
++	if (r < ARRAY_SIZE(msgs)) {
++		dev_err(&client->dev, "%s: reg 0x%04x val 0x%08x msgs %d\n",
++			__func__, reg, value, r);
++		return -EAGAIN;
++	}
++
++	dev_dbg(&client->dev, "%s: reg 0x%04x val 0x%08x\n",
++			__func__, reg, value);
++
++	return 0;
++}
++
++/**
++ * tc35876x_regr - Read DSI-LVDS bridge register using I2C
++ * @client: struct i2c_client to use
++ * @reg: register address
++ * @value: pointer for storing the value
++ *
++ * Returns 0 on success, or a negative error value.
++ */
++static int tc35876x_regr(struct i2c_client *client, u16 reg, u32 *value)
++{
++	int r;
++	u8 tx_data[] = {
++		(reg >> 8) & 0xff,
++		reg & 0xff,
++	};
++	u8 rx_data[4];
++	struct i2c_msg msgs[] = {
++		{
++			.addr = client->addr,
++			.flags = 0,
++			.buf = tx_data,
++			.len = ARRAY_SIZE(tx_data),
++		},
++		{
++			.addr = client->addr,
++			.flags = I2C_M_RD,
++			.buf = rx_data,
++			.len = ARRAY_SIZE(rx_data),
++		 },
++	};
++
++	r = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
++	if (r < 0) {
++		dev_err(&client->dev, "%s: reg 0x%04x error %d\n", __func__,
++			reg, r);
++		return r;
++	}
++
++	if (r < ARRAY_SIZE(msgs)) {
++		dev_err(&client->dev, "%s: reg 0x%04x msgs %d\n", __func__,
++			reg, r);
++		return -EAGAIN;
++	}
++
++	*value = rx_data[0] << 24 | rx_data[1] << 16 |
++		rx_data[2] << 8 | rx_data[3];
++
++	dev_dbg(&client->dev, "%s: reg 0x%04x value 0x%08x\n", __func__,
++		reg, *value);
++
++	return 0;
++}
++
++void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state)
++{
++	struct tc35876x_platform_data *pdata;
++
++	if (WARN(!tc35876x_client, "%s called before probe", __func__))
++		return;
++
++	dev_dbg(&tc35876x_client->dev, "%s: state %d\n", __func__, state);
++
++	pdata = dev_get_platdata(&tc35876x_client->dev);
++
++	if (pdata->gpio_bridge_reset == -1)
++		return;
++
++	if (state) {
++		gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
++		mdelay(10);
++	} else {
++		/* Pull MIPI Bridge reset pin to Low */
++		gpio_set_value_cansleep(pdata->gpio_bridge_reset, 0);
++		mdelay(20);
++		/* Pull MIPI Bridge reset pin to High */
++		gpio_set_value_cansleep(pdata->gpio_bridge_reset, 1);
++		mdelay(40);
++	}
++}
++
++void tc35876x_configure_lvds_bridge(struct drm_device *dev)
++{
++	struct i2c_client *i2c = tc35876x_client;
++	u32 ppi_lptxtimecnt;
++	u32 txtagocnt;
++	u32 txtasurecnt;
++	u32 id;
++
++	if (WARN(!tc35876x_client, "%s called before probe", __func__))
++		return;
++
++	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
++
++	if (!tc35876x_regr(i2c, IDREG, &id))
++		dev_info(&tc35876x_client->dev, "tc35876x ID 0x%08x\n", id);
++	else
++		dev_err(&tc35876x_client->dev, "Cannot read ID\n");
++
++	ppi_lptxtimecnt = 4;
++	txtagocnt = (5 * ppi_lptxtimecnt - 3) / 4;
++	txtasurecnt = 3 * ppi_lptxtimecnt / 2;
++	tc35876x_regw(i2c, PPI_TX_RX_TA, FLD_VAL(txtagocnt, 26, 16) |
++		FLD_VAL(txtasurecnt, 10, 0));
++	tc35876x_regw(i2c, PPI_LPTXTIMECNT, FLD_VAL(ppi_lptxtimecnt, 10, 0));
++
++	tc35876x_regw(i2c, PPI_D0S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
++	tc35876x_regw(i2c, PPI_D1S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
++	tc35876x_regw(i2c, PPI_D2S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
++	tc35876x_regw(i2c, PPI_D3S_CLRSIPOCOUNT, FLD_VAL(1, 5, 0));
++
++	/* Enabling MIPI & PPI lanes, Enable 4 lanes */
++	tc35876x_regw(i2c, PPI_LANEENABLE,
++		BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
++	tc35876x_regw(i2c, DSI_LANEENABLE,
++		BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0));
++	tc35876x_regw(i2c, PPI_STARTPPI, BIT(0));
++	tc35876x_regw(i2c, DSI_STARTDSI, BIT(0));
++
++	/* Setting LVDS output frequency */
++	tc35876x_regw(i2c, LVPHY0, FLD_VAL(1, 20, 16) |
++		FLD_VAL(2, 15, 14) | FLD_VAL(6, 4, 0)); /* 0x00048006 */
++
++	/* Setting video panel control register,0x00000120 VTGen=ON ?!?!? */
++	tc35876x_regw(i2c, VPCTRL, BIT(8) | BIT(5));
++
++	/* Horizontal back porch and horizontal pulse width. 0x00280028 */
++	tc35876x_regw(i2c, HTIM1, FLD_VAL(40, 24, 16) | FLD_VAL(40, 8, 0));
++
++	/* Horizontal front porch and horizontal active video size. 0x00500500*/
++	tc35876x_regw(i2c, HTIM2, FLD_VAL(80, 24, 16) | FLD_VAL(1280, 10, 0));
++
++	/* Vertical back porch and vertical sync pulse width. 0x000e000a */
++	tc35876x_regw(i2c, VTIM1, FLD_VAL(14, 23, 16) | FLD_VAL(10, 7, 0));
++
++	/* Vertical front porch and vertical display size. 0x000e0320 */
++	tc35876x_regw(i2c, VTIM2, FLD_VAL(14, 23, 16) | FLD_VAL(800, 10, 0));
++
++	/* Set above HTIM1, HTIM2, VTIM1, and VTIM2 at next VSYNC. */
++	tc35876x_regw(i2c, VFUEN, BIT(0));
++
++	/* Soft reset LCD controller. */
++	tc35876x_regw(i2c, SYSRST, BIT(2));
++
++	/* LVDS-TX input muxing */
++	tc35876x_regw(i2c, LVMX0003,
++		INPUT_MUX(INPUT_R5, INPUT_R4, INPUT_R3, INPUT_R2));
++	tc35876x_regw(i2c, LVMX0407,
++		INPUT_MUX(INPUT_G2, INPUT_R7, INPUT_R1, INPUT_R6));
++	tc35876x_regw(i2c, LVMX0811,
++		INPUT_MUX(INPUT_G1, INPUT_G0, INPUT_G4, INPUT_G3));
++	tc35876x_regw(i2c, LVMX1215,
++		INPUT_MUX(INPUT_B2, INPUT_G7, INPUT_G6, INPUT_G5));
++	tc35876x_regw(i2c, LVMX1619,
++		INPUT_MUX(INPUT_B4, INPUT_B3, INPUT_B1, INPUT_B0));
++	tc35876x_regw(i2c, LVMX2023,
++		INPUT_MUX(LOGIC_0,  INPUT_B7, INPUT_B6, INPUT_B5));
++	tc35876x_regw(i2c, LVMX2427,
++		INPUT_MUX(INPUT_R0, INPUT_DE, INPUT_VSYNC, INPUT_HSYNC));
++
++	/* Enable LVDS transmitter. */
++	tc35876x_regw(i2c, LVCFG, BIT(0));
++
++	/* Clear notifications. Don't write reserved bits. Was write 0xffffffff
++	 * to 0x0288, must be in error?! */
++	tc35876x_regw(i2c, DSI_INTCLR, FLD_MASK(31, 30) | FLD_MASK(22, 0));
++}
++
++#define GPIOPWMCTRL	0x38F
++#define PWM0CLKDIV0	0x62 /* low byte */
++#define PWM0CLKDIV1	0x61 /* high byte */
++
++#define SYSTEMCLK	19200000UL /* 19.2 MHz */
++#define PWM_FREQUENCY	9600 /* Hz */
++
++/* f = baseclk / (clkdiv + 1) => clkdiv = (baseclk - f) / f */
++static inline u16 calc_clkdiv(unsigned long baseclk, unsigned int f)
++{
++	return (baseclk - f) / f;
++}
++
++static void tc35876x_brightness_init(struct drm_device *dev)
++{
++	int ret;
++	u8 pwmctrl;
++	u16 clkdiv;
++
++	/* Make sure the PWM reference is the 19.2 MHz system clock. Read first
++	 * instead of setting directly to catch potential conflicts between PWM
++	 * users. */
++	ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl);
++	if (ret || pwmctrl != 0x01) {
++		if (ret)
++			dev_err(&dev->pdev->dev, "GPIOPWMCTRL read failed\n");
++		else
++			dev_warn(&dev->pdev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl);
++
++		ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01);
++		if (ret)
++			dev_err(&dev->pdev->dev, "GPIOPWMCTRL set failed\n");
++	}
++
++	clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY);
++
++	ret = intel_scu_ipc_iowrite8(PWM0CLKDIV1, (clkdiv >> 8) & 0xff);
++	if (!ret)
++		ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff);
++
++	if (ret)
++		dev_err(&dev->pdev->dev, "PWM0CLKDIV set failed\n");
++	else
++		dev_dbg(&dev->pdev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n",
++			clkdiv, PWM_FREQUENCY);
++}
++
++#define PWM0DUTYCYCLE			0x67
++
++void tc35876x_brightness_control(struct drm_device *dev, int level)
++{
++	int ret;
++	u8 duty_val;
++	u8 panel_duty_val;
++
++	level = clamp(level, 0, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
++
++	/* PWM duty cycle 0x00...0x63 corresponds to 0...99% */
++	duty_val = level * 0x63 / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL;
++
++	/* I won't pretend to understand this formula. The panel spec is quite
++	 * bad engrish.
++	 */
++	panel_duty_val = (2 * level - 100) * 0xA9 /
++			 MDFLD_DSI_BRIGHTNESS_MAX_LEVEL + 0x56;
++
++	ret = intel_scu_ipc_iowrite8(PWM0DUTYCYCLE, duty_val);
++	if (ret)
++		dev_err(&tc35876x_client->dev, "%s: ipc write fail\n",
++			__func__);
++
++	if (cmi_lcd_i2c_client) {
++		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
++						PANEL_PWM_MAX, panel_duty_val);
++		if (ret < 0)
++			dev_err(&cmi_lcd_i2c_client->dev, "%s: i2c write failed\n",
++				__func__);
++	}
++}
++
++void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev)
++{
++	struct tc35876x_platform_data *pdata;
++
++	if (WARN(!tc35876x_client, "%s called before probe", __func__))
++		return;
++
++	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
++
++	pdata = dev_get_platdata(&tc35876x_client->dev);
++
++	if (pdata->gpio_panel_bl_en != -1)
++		gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 0);
++
++	if (pdata->gpio_panel_vadd != -1)
++		gpio_set_value_cansleep(pdata->gpio_panel_vadd, 0);
++}
++
++void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev)
++{
++	struct tc35876x_platform_data *pdata;
++	struct drm_psb_private *dev_priv = dev->dev_private;
++
++	if (WARN(!tc35876x_client, "%s called before probe", __func__))
++		return;
++
++	dev_dbg(&tc35876x_client->dev, "%s\n", __func__);
++
++	pdata = dev_get_platdata(&tc35876x_client->dev);
++
++	if (pdata->gpio_panel_vadd != -1) {
++		gpio_set_value_cansleep(pdata->gpio_panel_vadd, 1);
++		msleep(260);
++	}
++
++	if (cmi_lcd_i2c_client) {
++		int ret;
++		dev_dbg(&cmi_lcd_i2c_client->dev, "setting TCON\n");
++		/* Bit 4 is average_saving. Setting it to 1, the brightness is
++		 * referenced to the average of the frame content. 0 means
++		 * reference to the maximum of frame contents. Bits 3:0 are
++		 * allow_distort. When set to a nonzero value, all color values
++		 * between 255-allow_distort*2 and 255 are mapped to the
++		 * 255-allow_distort*2 value.
++		 */
++		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
++						PANEL_ALLOW_DISTORT, 0x10);
++		if (ret < 0)
++			dev_err(&cmi_lcd_i2c_client->dev,
++				"i2c write failed (%d)\n", ret);
++		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
++						PANEL_BYPASS_PWMI, 0);
++		if (ret < 0)
++			dev_err(&cmi_lcd_i2c_client->dev,
++				"i2c write failed (%d)\n", ret);
++		/* Set minimum brightness value - this is tunable */
++		ret = i2c_smbus_write_byte_data(cmi_lcd_i2c_client,
++						PANEL_PWM_MIN, 0x35);
++		if (ret < 0)
++			dev_err(&cmi_lcd_i2c_client->dev,
++				"i2c write failed (%d)\n", ret);
++	}
++
++	if (pdata->gpio_panel_bl_en != -1)
++		gpio_set_value_cansleep(pdata->gpio_panel_bl_en, 1);
++
++	tc35876x_brightness_control(dev, dev_priv->brightness_adjusted);
++}
++
++static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev)
++{
++	struct drm_display_mode *mode;
++
++	dev_dbg(&dev->pdev->dev, "%s\n", __func__);
++
++	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
++	if (!mode)
++		return NULL;
++
++	/* FIXME: do this properly. */
++	mode->hdisplay = 1280;
++	mode->vdisplay = 800;
++	mode->hsync_start = 1360;
++	mode->hsync_end = 1400;
++	mode->htotal = 1440;
++	mode->vsync_start = 814;
++	mode->vsync_end = 824;
++	mode->vtotal = 838;
++	mode->clock = 33324 << 1;
++
++	dev_info(&dev->pdev->dev, "hdisplay(w) = %d\n", mode->hdisplay);
++	dev_info(&dev->pdev->dev, "vdisplay(h) = %d\n", mode->vdisplay);
++	dev_info(&dev->pdev->dev, "HSS = %d\n", mode->hsync_start);
++	dev_info(&dev->pdev->dev, "HSE = %d\n", mode->hsync_end);
++	dev_info(&dev->pdev->dev, "htotal = %d\n", mode->htotal);
++	dev_info(&dev->pdev->dev, "VSS = %d\n", mode->vsync_start);
++	dev_info(&dev->pdev->dev, "VSE = %d\n", mode->vsync_end);
++	dev_info(&dev->pdev->dev, "vtotal = %d\n", mode->vtotal);
++	dev_info(&dev->pdev->dev, "clock = %d\n", mode->clock);
++
++	drm_mode_set_name(mode);
++	drm_mode_set_crtcinfo(mode, 0);
++
++	mode->type |= DRM_MODE_TYPE_PREFERRED;
++
++	return mode;
++}
++
++/* DV1 Active area 216.96 x 135.6 mm */
++#define DV1_PANEL_WIDTH 217
++#define DV1_PANEL_HEIGHT 136
++
++static int tc35876x_get_panel_info(struct drm_device *dev, int pipe,
++				struct panel_info *pi)
++{
++	if (!dev || !pi)
++		return -EINVAL;
++
++	pi->width_mm = DV1_PANEL_WIDTH;
++	pi->height_mm = DV1_PANEL_HEIGHT;
++
++	return 0;
++}
++
++static int tc35876x_bridge_probe(struct i2c_client *client,
++				const struct i2c_device_id *id)
++{
++	struct tc35876x_platform_data *pdata;
++
++	dev_info(&client->dev, "%s\n", __func__);
++
++	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
++		dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
++			__func__);
++		return -ENODEV;
++	}
++
++	pdata = dev_get_platdata(&client->dev);
++	if (!pdata) {
++		dev_err(&client->dev, "%s: no platform data\n", __func__);
++		return -ENODEV;
++	}
++
++	if (pdata->gpio_bridge_reset != -1) {
++		gpio_request(pdata->gpio_bridge_reset, "tc35876x bridge reset");
++		gpio_direction_output(pdata->gpio_bridge_reset, 0);
++	}
++
++	if (pdata->gpio_panel_bl_en != -1) {
++		gpio_request(pdata->gpio_panel_bl_en, "tc35876x panel bl en");
++		gpio_direction_output(pdata->gpio_panel_bl_en, 0);
++	}
++
++	if (pdata->gpio_panel_vadd != -1) {
++		gpio_request(pdata->gpio_panel_vadd, "tc35876x panel vadd");
++		gpio_direction_output(pdata->gpio_panel_vadd, 0);
++	}
++
++	tc35876x_client = client;
++
++	return 0;
++}
++
++static int tc35876x_bridge_remove(struct i2c_client *client)
++{
++	struct tc35876x_platform_data *pdata = dev_get_platdata(&client->dev);
++
++	dev_dbg(&client->dev, "%s\n", __func__);
++
++	if (pdata->gpio_bridge_reset != -1)
++		gpio_free(pdata->gpio_bridge_reset);
++
++	if (pdata->gpio_panel_bl_en != -1)
++		gpio_free(pdata->gpio_panel_bl_en);
++
++	if (pdata->gpio_panel_vadd != -1)
++		gpio_free(pdata->gpio_panel_vadd);
++
++	tc35876x_client = NULL;
++
++	return 0;
++}
++
++static const struct i2c_device_id tc35876x_bridge_id[] = {
++	{ "i2c_disp_brig", 0 },
++	{ }
++};
++MODULE_DEVICE_TABLE(i2c, tc35876x_bridge_id);
++
++static struct i2c_driver tc35876x_bridge_i2c_driver = {
++	.driver = {
++		.name = "i2c_disp_brig",
++	},
++	.id_table = tc35876x_bridge_id,
++	.probe = tc35876x_bridge_probe,
++	.remove = __devexit_p(tc35876x_bridge_remove),
++};
++
++/* LCD panel I2C */
++static int cmi_lcd_i2c_probe(struct i2c_client *client,
++			     const struct i2c_device_id *id)
++{
++	dev_info(&client->dev, "%s\n", __func__);
++
++	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
++		dev_err(&client->dev, "%s: i2c_check_functionality() failed\n",
++			__func__);
++		return -ENODEV;
++	}
++
++	cmi_lcd_i2c_client = client;
++
++	return 0;
++}
++
++static int cmi_lcd_i2c_remove(struct i2c_client *client)
++{
++	dev_dbg(&client->dev, "%s\n", __func__);
++
++	cmi_lcd_i2c_client = NULL;
++
++	return 0;
++}
++
++static const struct i2c_device_id cmi_lcd_i2c_id[] = {
++	{ "cmi-lcd", 0 },
++	{ }
++};
++MODULE_DEVICE_TABLE(i2c, cmi_lcd_i2c_id);
++
++static struct i2c_driver cmi_lcd_i2c_driver = {
++	.driver = {
++		.name = "cmi-lcd",
++	},
++	.id_table = cmi_lcd_i2c_id,
++	.probe = cmi_lcd_i2c_probe,
++	.remove = __devexit_p(cmi_lcd_i2c_remove),
++};
++
++/* HACK to create I2C device while it's not created by platform code */
++#define CMI_LCD_I2C_ADAPTER	2
++#define CMI_LCD_I2C_ADDR	0x60
++
++static int cmi_lcd_hack_create_device(void)
++{
++	struct i2c_adapter *adapter;
++	struct i2c_client *client;
++	struct i2c_board_info info = {
++		.type = "cmi-lcd",
++		.addr = CMI_LCD_I2C_ADDR,
++	};
++
++	pr_debug("%s\n", __func__);
++
++	adapter = i2c_get_adapter(CMI_LCD_I2C_ADAPTER);
++	if (!adapter) {
++		pr_err("%s: i2c_get_adapter(%d) failed\n", __func__,
++			CMI_LCD_I2C_ADAPTER);
++		return -EINVAL;
++	}
++
++	client = i2c_new_device(adapter, &info);
++	if (!client) {
++		pr_err("%s: i2c_new_device() failed\n", __func__);
++		i2c_put_adapter(adapter);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
++	.dpms = mdfld_dsi_dpi_dpms,
++	.mode_fixup = mdfld_dsi_dpi_mode_fixup,
++	.prepare = mdfld_dsi_dpi_prepare,
++	.mode_set = mdfld_dsi_dpi_mode_set,
++	.commit = mdfld_dsi_dpi_commit,
++};
++
++static const struct drm_encoder_funcs tc35876x_encoder_funcs = {
++	.destroy = drm_encoder_cleanup,
++};
++
++const struct panel_funcs mdfld_tc35876x_funcs = {
++	.encoder_funcs = &tc35876x_encoder_funcs,
++	.encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
++	.get_config_mode = tc35876x_get_config_mode,
++	.get_panel_info = tc35876x_get_panel_info,
++};
++
++void tc35876x_init(struct drm_device *dev)
++{
++	int r;
++
++	dev_dbg(&dev->pdev->dev, "%s\n", __func__);
++
++	cmi_lcd_hack_create_device();
++
++	r = i2c_add_driver(&cmi_lcd_i2c_driver);
++	if (r < 0)
++		dev_err(&dev->pdev->dev,
++			"%s: i2c_add_driver() for %s failed (%d)\n",
++			__func__, cmi_lcd_i2c_driver.driver.name, r);
++
++	r = i2c_add_driver(&tc35876x_bridge_i2c_driver);
++	if (r < 0)
++		dev_err(&dev->pdev->dev,
++			"%s: i2c_add_driver() for %s failed (%d)\n",
++			__func__, tc35876x_bridge_i2c_driver.driver.name, r);
++
++	tc35876x_brightness_init(dev);
++}
++
++void tc35876x_exit(void)
++{
++	pr_debug("%s\n", __func__);
++
++	i2c_del_driver(&tc35876x_bridge_i2c_driver);
++
++	if (cmi_lcd_i2c_client)
++		i2c_del_driver(&cmi_lcd_i2c_driver);
++}
+diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
+new file mode 100644
+index 0000000..b14b7f9
+--- /dev/null
++++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.h
+@@ -0,0 +1,38 @@
++/*
++ * Copyright © 2011 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __MDFLD_DSI_LVDS_BRIDGE_H__
++#define __MDFLD_DSI_LVDS_BRIDGE_H__
++
++void tc35876x_set_bridge_reset_state(struct drm_device *dev, int state);
++void tc35876x_configure_lvds_bridge(struct drm_device *dev);
++void tc35876x_brightness_control(struct drm_device *dev, int level);
++void tc35876x_toshiba_bridge_panel_off(struct drm_device *dev);
++void tc35876x_toshiba_bridge_panel_on(struct drm_device *dev);
++void tc35876x_init(struct drm_device *dev);
++void tc35876x_exit(void);
++
++extern const struct panel_funcs mdfld_tc35876x_funcs;
++
++#endif /*__MDFLD_DSI_LVDS_BRIDGE_H__*/
+diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
+index 07d55df..d3f2e87 100644
+--- a/drivers/gpu/drm/i2c/ch7006_drv.c
++++ b/drivers/gpu/drm/i2c/ch7006_drv.c
+@@ -252,10 +252,7 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
+ 
+ 	drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
+ 
+-	priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-						   "scale", 2);
+-	priv->scale_property->values[0] = 0;
+-	priv->scale_property->values[1] = 2;
++	priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
+ 
+ 	drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
+ 				      priv->select_subconnector);
+diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
+index 8f371e8..f920fb5 100644
+--- a/drivers/gpu/drm/i810/i810_dma.c
++++ b/drivers/gpu/drm/i810/i810_dma.c
+@@ -99,7 +99,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
+ 	buf_priv = buf->dev_private;
+ 
+ 	vma->vm_flags |= (VM_IO | VM_DONTCOPY);
+-	vma->vm_file = filp;
+ 
+ 	buf_priv->currently_mapped = I810_BUF_MAPPED;
+ 
+@@ -130,6 +129,7 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
+ 	if (buf_priv->currently_mapped == I810_BUF_MAPPED)
+ 		return -EINVAL;
+ 
++	/* This is all entirely broken */
+ 	down_write(&current->mm->mmap_sem);
+ 	old_fops = file_priv->filp->f_op;
+ 	file_priv->filp->f_op = &i810_buffer_fops;
+@@ -158,11 +158,8 @@ static int i810_unmap_buffer(struct drm_buf *buf)
+ 	if (buf_priv->currently_mapped != I810_BUF_MAPPED)
+ 		return -EINVAL;
+ 
+-	down_write(&current->mm->mmap_sem);
+-	retcode = do_munmap(current->mm,
+-			    (unsigned long)buf_priv->virtual,
++	retcode = vm_munmap((unsigned long)buf_priv->virtual,
+ 			    (size_t) buf->total);
+-	up_write(&current->mm->mmap_sem);
+ 
+ 	buf_priv->currently_mapped = I810_BUF_UNMAPPED;
+ 	buf_priv->virtual = NULL;
+@@ -222,8 +219,6 @@ static int i810_dma_cleanup(struct drm_device *dev)
+ 			pci_free_consistent(dev->pdev, PAGE_SIZE,
+ 					    dev_priv->hw_status_page,
+ 					    dev_priv->dma_status_page);
+-			/* Need to rewrite hardware status page */
+-			I810_WRITE(0x02080, 0x1ffff000);
+ 		}
+ 		kfree(dev->dev_private);
+ 		dev->dev_private = NULL;
+@@ -1210,6 +1205,8 @@ int i810_driver_load(struct drm_device *dev, unsigned long flags)
+ 	dev->types[8] = _DRM_STAT_SECONDARY;
+ 	dev->types[9] = _DRM_STAT_DMA;
+ 
++	pci_set_master(dev->pdev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
+index d4266bd..ec12f7d 100644
+--- a/drivers/gpu/drm/i810/i810_drv.c
++++ b/drivers/gpu/drm/i810/i810_drv.c
+@@ -43,6 +43,17 @@ static struct pci_device_id pciidlist[] = {
+ 	i810_PCI_IDS
+ };
+ 
++static const struct file_operations i810_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = drm_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++	.llseek = noop_llseek,
++};
++
+ static struct drm_driver driver = {
+ 	.driver_features =
+ 	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+@@ -55,17 +66,7 @@ static struct drm_driver driver = {
+ 	.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
+ 	.dma_quiescent = i810_driver_dma_quiescent,
+ 	.ioctls = i810_ioctls,
+-	.fops = {
+-		 .owner = THIS_MODULE,
+-		 .open = drm_open,
+-		 .release = drm_release,
+-		 .unlocked_ioctl = drm_ioctl,
+-		 .mmap = drm_mmap,
+-		 .poll = drm_poll,
+-		 .fasync = drm_fasync,
+-		 .llseek = noop_llseek,
+-	},
+-
++	.fops = &i810_driver_fops,
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ 	.date = DRIVER_DATE,
+diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
+index 0ae6a7c..ce7fc77 100644
+--- a/drivers/gpu/drm/i915/Makefile
++++ b/drivers/gpu/drm/i915/Makefile
+@@ -3,7 +3,7 @@
+ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+ 
+ ccflags-y := -Iinclude/drm
+-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
++i915-y := i915_drv.o i915_dma.o i915_irq.o \
+ 	  i915_debugfs.o \
+           i915_suspend.o \
+ 	  i915_gem.o \
+@@ -28,6 +28,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
+ 	  intel_dvo.o \
+ 	  intel_ringbuffer.o \
+ 	  intel_overlay.o \
++	  intel_sprite.o \
+ 	  intel_opregion.o \
+ 	  dvo_ch7xxx.o \
+ 	  dvo_ch7017.o \
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index d00f905..e6162a1 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -83,6 +83,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
+ 	B(supports_tv);
+ 	B(has_bsd_ring);
+ 	B(has_blt_ring);
++	B(has_llc);
+ #undef B
+ 
+ 	return 0;
+@@ -121,11 +122,11 @@ static const char *cache_level_str(int type)
+ static void
+ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+ {
+-	seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
++	seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
+ 		   &obj->base,
+ 		   get_pin_flag(obj),
+ 		   get_tiling_flag(obj),
+-		   obj->base.size,
++		   obj->base.size / 1024,
+ 		   obj->base.read_domains,
+ 		   obj->base.write_domain,
+ 		   obj->last_rendering_seqno,
+@@ -339,7 +340,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+ 			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
+ 				   pipe, plane);
+ 		} else {
+-			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
++			if (!work->pending) {
+ 				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
+ 					   pipe, plane);
+ 			} else {
+@@ -350,7 +351,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
+ 				seq_printf(m, "Stall check enabled, ");
+ 			else
+ 				seq_printf(m, "Stall check waiting for page flip ioctl, ");
+-			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
++			seq_printf(m, "%d prepares\n", work->pending);
+ 
+ 			if (work->old_fb_obj) {
+ 				struct drm_i915_gem_object *obj = work->old_fb_obj;
+@@ -563,45 +564,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
+ 	return 0;
+ }
+ 
+-static void i915_dump_object(struct seq_file *m,
+-			     struct io_mapping *mapping,
+-			     struct drm_i915_gem_object *obj)
+-{
+-	int page, page_count, i;
+-
+-	page_count = obj->base.size / PAGE_SIZE;
+-	for (page = 0; page < page_count; page++) {
+-		u32 *mem = io_mapping_map_wc(mapping,
+-					     obj->gtt_offset + page * PAGE_SIZE);
+-		for (i = 0; i < PAGE_SIZE; i += 4)
+-			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
+-		io_mapping_unmap(mem);
+-	}
+-}
+-
+-static int i915_batchbuffer_info(struct seq_file *m, void *data)
+-{
+-	struct drm_info_node *node = (struct drm_info_node *) m->private;
+-	struct drm_device *dev = node->minor->dev;
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_i915_gem_object *obj;
+-	int ret;
+-
+-	ret = mutex_lock_interruptible(&dev->struct_mutex);
+-	if (ret)
+-		return ret;
+-
+-	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+-		if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
+-		    seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+-		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
+-		}
+-	}
+-
+-	mutex_unlock(&dev->struct_mutex);
+-	return 0;
+-}
+-
+ static int i915_ringbuffer_data(struct seq_file *m, void *data)
+ {
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+@@ -653,7 +615,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
+ 	seq_printf(m, "  Size :    %08x\n", ring->size);
+ 	seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
+ 	seq_printf(m, "  NOPID :   %08x\n", I915_READ_NOPID(ring));
+-	if (IS_GEN6(dev)) {
++	if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ 		seq_printf(m, "  Sync 0 :   %08x\n", I915_READ_SYNC_0(ring));
+ 		seq_printf(m, "  Sync 1 :   %08x\n", I915_READ_SYNC_1(ring));
+ 	}
+@@ -668,9 +630,9 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
+ static const char *ring_str(int ring)
+ {
+ 	switch (ring) {
+-	case RING_RENDER: return " render";
+-	case RING_BSD: return " bsd";
+-	case RING_BLT: return " blt";
++	case RCS: return "render";
++	case VCS: return "bsd";
++	case BCS: return "blt";
+ 	default: return "";
+ 	}
+ }
+@@ -713,7 +675,7 @@ static void print_error_buffers(struct seq_file *m,
+ 	seq_printf(m, "%s [%d]:\n", name, count);
+ 
+ 	while (count--) {
+-		seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s",
++		seq_printf(m, "  %08x %8u %04x %04x %08x%s%s%s%s%s%s%s",
+ 			   err->gtt_offset,
+ 			   err->size,
+ 			   err->read_domains,
+@@ -723,6 +685,7 @@ static void print_error_buffers(struct seq_file *m,
+ 			   tiling_flag(err->tiling),
+ 			   dirty_flag(err->dirty),
+ 			   purgeable_flag(err->purgeable),
++			   err->ring != -1 ? " " : "",
+ 			   ring_str(err->ring),
+ 			   cache_level_str(err->cache_level));
+ 
+@@ -736,6 +699,38 @@ static void print_error_buffers(struct seq_file *m,
+ 	}
+ }
+ 
++static void i915_ring_error_state(struct seq_file *m,
++				  struct drm_device *dev,
++				  struct drm_i915_error_state *error,
++				  unsigned ring)
++{
++	seq_printf(m, "%s command stream:\n", ring_str(ring));
++	seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
++	seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
++	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
++	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
++	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
++	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
++	if (ring == RCS && INTEL_INFO(dev)->gen >= 4) {
++		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
++		seq_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
++	}
++	if (INTEL_INFO(dev)->gen >= 4)
++		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
++	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
++	if (INTEL_INFO(dev)->gen >= 6) {
++		seq_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
++		seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
++		seq_printf(m, "  SYNC_0: 0x%08x\n",
++			   error->semaphore_mboxes[ring][0]);
++		seq_printf(m, "  SYNC_1: 0x%08x\n",
++			   error->semaphore_mboxes[ring][1]);
++	}
++	seq_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
++	seq_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
++	seq_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
++}
++
+ static int i915_error_state(struct seq_file *m, void *unused)
+ {
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+@@ -743,7 +738,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct drm_i915_error_state *error;
+ 	unsigned long flags;
+-	int i, page, offset, elt;
++	int i, j, page, offset, elt;
+ 
+ 	spin_lock_irqsave(&dev_priv->error_lock, flags);
+ 	if (!dev_priv->first_error) {
+@@ -758,35 +753,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
+ 	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ 	seq_printf(m, "EIR: 0x%08x\n", error->eir);
+ 	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
++
++	for (i = 0; i < dev_priv->num_fence_regs; i++)
++		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
++
+ 	if (INTEL_INFO(dev)->gen >= 6) {
+ 		seq_printf(m, "ERROR: 0x%08x\n", error->error);
+-		seq_printf(m, "Blitter command stream:\n");
+-		seq_printf(m, "  ACTHD:    0x%08x\n", error->bcs_acthd);
+-		seq_printf(m, "  IPEIR:    0x%08x\n", error->bcs_ipeir);
+-		seq_printf(m, "  IPEHR:    0x%08x\n", error->bcs_ipehr);
+-		seq_printf(m, "  INSTDONE: 0x%08x\n", error->bcs_instdone);
+-		seq_printf(m, "  seqno:    0x%08x\n", error->bcs_seqno);
+-		seq_printf(m, "Video (BSD) command stream:\n");
+-		seq_printf(m, "  ACTHD:    0x%08x\n", error->vcs_acthd);
+-		seq_printf(m, "  IPEIR:    0x%08x\n", error->vcs_ipeir);
+-		seq_printf(m, "  IPEHR:    0x%08x\n", error->vcs_ipehr);
+-		seq_printf(m, "  INSTDONE: 0x%08x\n", error->vcs_instdone);
+-		seq_printf(m, "  seqno:    0x%08x\n", error->vcs_seqno);
++		seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+ 	}
+-	seq_printf(m, "Render command stream:\n");
+-	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
+-	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
+-	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
+-	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
+-	if (INTEL_INFO(dev)->gen >= 4) {
+-		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
+-		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
+-	}
+-	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
+-	seq_printf(m, "  seqno: 0x%08x\n", error->seqno);
+ 
+-	for (i = 0; i < dev_priv->num_fence_regs; i++)
+-		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
++	i915_ring_error_state(m, dev, error, RCS);
++	if (HAS_BLT(dev))
++		i915_ring_error_state(m, dev, error, BCS);
++	if (HAS_BSD(dev))
++		i915_ring_error_state(m, dev, error, VCS);
+ 
+ 	if (error->active_bo)
+ 		print_error_buffers(m, "Active",
+@@ -798,10 +778,10 @@ static int i915_error_state(struct seq_file *m, void *unused)
+ 				    error->pinned_bo,
+ 				    error->pinned_bo_count);
+ 
+-	for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
+-		if (error->batchbuffer[i]) {
+-			struct drm_i915_error_object *obj = error->batchbuffer[i];
++	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
++		struct drm_i915_error_object *obj;
+ 
++		if ((obj = error->ring[i].batchbuffer)) {
+ 			seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
+ 				   dev_priv->ring[i].name,
+ 				   obj->gtt_offset);
+@@ -813,11 +793,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
+ 				}
+ 			}
+ 		}
+-	}
+ 
+-	for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
+-		if (error->ringbuffer[i]) {
+-			struct drm_i915_error_object *obj = error->ringbuffer[i];
++		if (error->ring[i].num_requests) {
++			seq_printf(m, "%s --- %d requests\n",
++				   dev_priv->ring[i].name,
++				   error->ring[i].num_requests);
++			for (j = 0; j < error->ring[i].num_requests; j++) {
++				seq_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
++					   error->ring[i].requests[j].seqno,
++					   error->ring[i].requests[j].jiffies,
++					   error->ring[i].requests[j].tail);
++			}
++		}
++
++		if ((obj = error->ring[i].ringbuffer)) {
+ 			seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
+ 				   dev_priv->ring[i].name,
+ 				   obj->gtt_offset);
+@@ -1001,7 +990,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
+ 	return 0;
+ }
+ 
+-static int i915_drpc_info(struct seq_file *m, void *unused)
++static int ironlake_drpc_info(struct seq_file *m)
+ {
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+ 	struct drm_device *dev = node->minor->dev;
+@@ -1068,6 +1057,95 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
+ 	return 0;
+ }
+ 
++static int gen6_drpc_info(struct seq_file *m)
++{
++
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct drm_device *dev = node->minor->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	u32 rpmodectl1, gt_core_status, rcctl1;
++	unsigned forcewake_count;
++	int count=0, ret;
++
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
++
++	spin_lock_irq(&dev_priv->gt_lock);
++	forcewake_count = dev_priv->forcewake_count;
++	spin_unlock_irq(&dev_priv->gt_lock);
++
++	if (forcewake_count) {
++		seq_printf(m, "RC information inaccurate because somebody "
++			      "holds a forcewake reference \n");
++	} else {
++		/* NB: we cannot use forcewake, else we read the wrong values */
++		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
++			udelay(10);
++		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
++	}
++
++	gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
++	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
++
++	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
++	rcctl1 = I915_READ(GEN6_RC_CONTROL);
++	mutex_unlock(&dev->struct_mutex);
++
++	seq_printf(m, "Video Turbo Mode: %s\n",
++		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
++	seq_printf(m, "HW control enabled: %s\n",
++		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
++	seq_printf(m, "SW control enabled: %s\n",
++		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
++			  GEN6_RP_MEDIA_SW_MODE));
++	seq_printf(m, "RC1e Enabled: %s\n",
++		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
++	seq_printf(m, "RC6 Enabled: %s\n",
++		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
++	seq_printf(m, "Deep RC6 Enabled: %s\n",
++		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
++	seq_printf(m, "Deepest RC6 Enabled: %s\n",
++		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
++	seq_printf(m, "Current RC state: ");
++	switch (gt_core_status & GEN6_RCn_MASK) {
++	case GEN6_RC0:
++		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
++			seq_printf(m, "Core Power Down\n");
++		else
++			seq_printf(m, "on\n");
++		break;
++	case GEN6_RC3:
++		seq_printf(m, "RC3\n");
++		break;
++	case GEN6_RC6:
++		seq_printf(m, "RC6\n");
++		break;
++	case GEN6_RC7:
++		seq_printf(m, "RC7\n");
++		break;
++	default:
++		seq_printf(m, "Unknown\n");
++		break;
++	}
++
++	seq_printf(m, "Core Power Down: %s\n",
++		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
++	return 0;
++}
++
++static int i915_drpc_info(struct seq_file *m, void *unused)
++{
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct drm_device *dev = node->minor->dev;
++
++	if (IS_GEN6(dev) || IS_GEN7(dev))
++		return gen6_drpc_info(m);
++	else
++		return ironlake_drpc_info(m);
++}
++
+ static int i915_fbc_status(struct seq_file *m, void *unused)
+ {
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+@@ -1146,6 +1224,9 @@ static int i915_emon_status(struct seq_file *m, void *unused)
+ 	unsigned long temp, chipset, gfx;
+ 	int ret;
+ 
++	if (!IS_GEN5(dev))
++		return -ENODEV;
++
+ 	ret = mutex_lock_interruptible(&dev->struct_mutex);
+ 	if (ret)
+ 		return ret;
+@@ -1325,11 +1406,102 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
+ 	return 0;
+ }
+ 
+-static int
+-i915_wedged_open(struct inode *inode,
+-		 struct file *filp)
++static const char *swizzle_string(unsigned swizzle)
+ {
+-	filp->private_data = inode->i_private;
++	switch(swizzle) {
++	case I915_BIT_6_SWIZZLE_NONE:
++		return "none";
++	case I915_BIT_6_SWIZZLE_9:
++		return "bit9";
++	case I915_BIT_6_SWIZZLE_9_10:
++		return "bit9/bit10";
++	case I915_BIT_6_SWIZZLE_9_11:
++		return "bit9/bit11";
++	case I915_BIT_6_SWIZZLE_9_10_11:
++		return "bit9/bit10/bit11";
++	case I915_BIT_6_SWIZZLE_9_17:
++		return "bit9/bit17";
++	case I915_BIT_6_SWIZZLE_9_10_17:
++		return "bit9/bit10/bit17";
++	case I915_BIT_6_SWIZZLE_UNKNOWN:
++		return "unkown";
++	}
++
++	return "bug";
++}
++
++static int i915_swizzle_info(struct seq_file *m, void *data)
++{
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct drm_device *dev = node->minor->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++
++	mutex_lock(&dev->struct_mutex);
++	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
++		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
++	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
++		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
++
++	if (IS_GEN3(dev) || IS_GEN4(dev)) {
++		seq_printf(m, "DDC = 0x%08x\n",
++			   I915_READ(DCC));
++		seq_printf(m, "C0DRB3 = 0x%04x\n",
++			   I915_READ16(C0DRB3));
++		seq_printf(m, "C1DRB3 = 0x%04x\n",
++			   I915_READ16(C1DRB3));
++	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
++		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
++			   I915_READ(MAD_DIMM_C0));
++		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
++			   I915_READ(MAD_DIMM_C1));
++		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
++			   I915_READ(MAD_DIMM_C2));
++		seq_printf(m, "TILECTL = 0x%08x\n",
++			   I915_READ(TILECTL));
++		seq_printf(m, "ARB_MODE = 0x%08x\n",
++			   I915_READ(ARB_MODE));
++		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
++			   I915_READ(DISP_ARB_CTL));
++	}
++	mutex_unlock(&dev->struct_mutex);
++
++	return 0;
++}
++
++static int i915_ppgtt_info(struct seq_file *m, void *data)
++{
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct drm_device *dev = node->minor->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_ring_buffer *ring;
++	int i, ret;
++
++
++	ret = mutex_lock_interruptible(&dev->struct_mutex);
++	if (ret)
++		return ret;
++	if (INTEL_INFO(dev)->gen == 6)
++		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
++
++	for (i = 0; i < I915_NUM_RINGS; i++) {
++		ring = &dev_priv->ring[i];
++
++		seq_printf(m, "%s\n", ring->name);
++		if (INTEL_INFO(dev)->gen == 7)
++			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
++		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
++		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
++		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
++	}
++	if (dev_priv->mm.aliasing_ppgtt) {
++		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
++
++		seq_printf(m, "aliasing PPGTT:\n");
++		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
++	}
++	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
++	mutex_unlock(&dev->struct_mutex);
++
+ 	return 0;
+ }
+ 
+@@ -1383,20 +1555,12 @@ i915_wedged_write(struct file *filp,
+ 
+ static const struct file_operations i915_wedged_fops = {
+ 	.owner = THIS_MODULE,
+-	.open = i915_wedged_open,
++	.open = simple_open,
+ 	.read = i915_wedged_read,
+ 	.write = i915_wedged_write,
+ 	.llseek = default_llseek,
+ };
+ 
+-static int
+-i915_max_freq_open(struct inode *inode,
+-		   struct file *filp)
+-{
+-	filp->private_data = inode->i_private;
+-	return 0;
+-}
+-
+ static ssize_t
+ i915_max_freq_read(struct file *filp,
+ 		   char __user *ubuf,
+@@ -1453,20 +1617,12 @@ i915_max_freq_write(struct file *filp,
+ 
+ static const struct file_operations i915_max_freq_fops = {
+ 	.owner = THIS_MODULE,
+-	.open = i915_max_freq_open,
++	.open = simple_open,
+ 	.read = i915_max_freq_read,
+ 	.write = i915_max_freq_write,
+ 	.llseek = default_llseek,
+ };
+ 
+-static int
+-i915_cache_sharing_open(struct inode *inode,
+-		   struct file *filp)
+-{
+-	filp->private_data = inode->i_private;
+-	return 0;
+-}
+-
+ static ssize_t
+ i915_cache_sharing_read(struct file *filp,
+ 		   char __user *ubuf,
+@@ -1532,7 +1688,7 @@ i915_cache_sharing_write(struct file *filp,
+ 
+ static const struct file_operations i915_cache_sharing_fops = {
+ 	.owner = THIS_MODULE,
+-	.open = i915_cache_sharing_open,
++	.open = simple_open,
+ 	.read = i915_cache_sharing_read,
+ 	.write = i915_cache_sharing_write,
+ 	.llseek = default_llseek,
+@@ -1564,28 +1720,13 @@ drm_add_fake_info_node(struct drm_minor *minor,
+ 	return 0;
+ }
+ 
+-static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
+-{
+-	struct drm_device *dev = minor->dev;
+-	struct dentry *ent;
+-
+-	ent = debugfs_create_file("i915_wedged",
+-				  S_IRUGO | S_IWUSR,
+-				  root, dev,
+-				  &i915_wedged_fops);
+-	if (IS_ERR(ent))
+-		return PTR_ERR(ent);
+-
+-	return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
+-}
+-
+ static int i915_forcewake_open(struct inode *inode, struct file *file)
+ {
+ 	struct drm_device *dev = inode->i_private;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	int ret;
+ 
+-	if (!IS_GEN6(dev))
++	if (INTEL_INFO(dev)->gen < 6)
+ 		return 0;
+ 
+ 	ret = mutex_lock_interruptible(&dev->struct_mutex);
+@@ -1602,7 +1743,7 @@ int i915_forcewake_release(struct inode *inode, struct file *file)
+ 	struct drm_device *dev = inode->i_private;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
+-	if (!IS_GEN6(dev))
++	if (INTEL_INFO(dev)->gen < 6)
+ 		return 0;
+ 
+ 	/*
+@@ -1640,34 +1781,22 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
+ 	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
+ }
+ 
+-static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor)
+-{
+-	struct drm_device *dev = minor->dev;
+-	struct dentry *ent;
+-
+-	ent = debugfs_create_file("i915_max_freq",
+-				  S_IRUGO | S_IWUSR,
+-				  root, dev,
+-				  &i915_max_freq_fops);
+-	if (IS_ERR(ent))
+-		return PTR_ERR(ent);
+-
+-	return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops);
+-}
+-
+-static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor)
++static int i915_debugfs_create(struct dentry *root,
++			       struct drm_minor *minor,
++			       const char *name,
++			       const struct file_operations *fops)
+ {
+ 	struct drm_device *dev = minor->dev;
+ 	struct dentry *ent;
+ 
+-	ent = debugfs_create_file("i915_cache_sharing",
++	ent = debugfs_create_file(name,
+ 				  S_IRUGO | S_IWUSR,
+ 				  root, dev,
+-				  &i915_cache_sharing_fops);
++				  fops);
+ 	if (IS_ERR(ent))
+ 		return PTR_ERR(ent);
+ 
+-	return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops);
++	return drm_add_fake_info_node(minor, ent, fops);
+ }
+ 
+ static struct drm_info_list i915_debugfs_list[] = {
+@@ -1693,7 +1822,6 @@ static struct drm_info_list i915_debugfs_list[] = {
+ 	{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
+ 	{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
+ 	{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
+-	{"i915_batchbuffers", i915_batchbuffer_info, 0},
+ 	{"i915_error_state", i915_error_state, 0},
+ 	{"i915_rstdby_delays", i915_rstdby_delays, 0},
+ 	{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
+@@ -1709,6 +1837,8 @@ static struct drm_info_list i915_debugfs_list[] = {
+ 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+ 	{"i915_context_status", i915_context_status, 0},
+ 	{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
++	{"i915_swizzle_info", i915_swizzle_info, 0},
++	{"i915_ppgtt_info", i915_ppgtt_info, 0},
+ };
+ #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
+ 
+@@ -1716,17 +1846,25 @@ int i915_debugfs_init(struct drm_minor *minor)
+ {
+ 	int ret;
+ 
+-	ret = i915_wedged_create(minor->debugfs_root, minor);
++	ret = i915_debugfs_create(minor->debugfs_root, minor,
++				  "i915_wedged",
++				  &i915_wedged_fops);
+ 	if (ret)
+ 		return ret;
+ 
+ 	ret = i915_forcewake_create(minor->debugfs_root, minor);
+ 	if (ret)
+ 		return ret;
+-	ret = i915_max_freq_create(minor->debugfs_root, minor);
++
++	ret = i915_debugfs_create(minor->debugfs_root, minor,
++				  "i915_max_freq",
++				  &i915_max_freq_fops);
+ 	if (ret)
+ 		return ret;
+-	ret = i915_cache_sharing_create(minor->debugfs_root, minor);
++
++	ret = i915_debugfs_create(minor->debugfs_root, minor,
++				  "i915_cache_sharing",
++				  &i915_cache_sharing_fops);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index ca67338..ba60f3c 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -781,6 +781,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
+ 	case I915_PARAM_HAS_RELAXED_DELTA:
+ 		value = 1;
+ 		break;
++	case I915_PARAM_HAS_GEN7_SOL_RESET:
++		value = 1;
++		break;
++	case I915_PARAM_HAS_LLC:
++		value = HAS_LLC(dev);
++		break;
+ 	default:
+ 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
+ 				 param->param);
+@@ -1177,6 +1183,21 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+ 	return can_switch;
+ }
+ 
++static bool
++intel_enable_ppgtt(struct drm_device *dev)
++{
++	if (i915_enable_ppgtt >= 0)
++		return i915_enable_ppgtt;
++
++#ifdef CONFIG_INTEL_IOMMU
++	/* Disable ppgtt on SNB if VT-d is on. */
++	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
++		return false;
++#endif
++
++	return true;
++}
++
+ static int i915_load_gem_init(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -1190,22 +1211,41 @@ static int i915_load_gem_init(struct drm_device *dev)
+ 	/* Basic memrange allocator for stolen space */
+ 	drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+ 
+-	/* Let GEM Manage all of the aperture.
+-	 *
+-	 * However, leave one page at the end still bound to the scratch page.
+-	 * There are a number of places where the hardware apparently
+-	 * prefetches past the end of the object, and we've seen multiple
+-	 * hangs with the GPU head pointer stuck in a batchbuffer bound
+-	 * at the last page of the aperture.  One page should be enough to
+-	 * keep any prefetching inside of the aperture.
+-	 */
+-	i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
+-
+ 	mutex_lock(&dev->struct_mutex);
+-	ret = i915_gem_init_ringbuffer(dev);
++	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
++		/* PPGTT pdes are stolen from global gtt ptes, so shrink the
++		 * aperture accordingly when using aliasing ppgtt. */
++		gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
++		/* For paranoia keep the guard page in between. */
++		gtt_size -= PAGE_SIZE;
++
++		i915_gem_do_init(dev, 0, mappable_size, gtt_size);
++
++		ret = i915_gem_init_aliasing_ppgtt(dev);
++		if (ret) {
++			mutex_unlock(&dev->struct_mutex);
++			return ret;
++		}
++	} else {
++		/* Let GEM Manage all of the aperture.
++		 *
++		 * However, leave one page at the end still bound to the scratch
++		 * page.  There are a number of places where the hardware
++		 * apparently prefetches past the end of the object, and we've
++		 * seen multiple hangs with the GPU head pointer stuck in a
++		 * batchbuffer bound at the last page of the aperture.  One page
++		 * should be enough to keep any prefetching inside of the
++		 * aperture.
++		 */
++		i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
++	}
++
++	ret = i915_gem_init_hw(dev);
+ 	mutex_unlock(&dev->struct_mutex);
+-	if (ret)
++	if (ret) {
++		i915_gem_cleanup_aliasing_ppgtt(dev);
+ 		return ret;
++	}
+ 
+ 	/* Try to set up FBC with a reasonable compressed buffer size */
+ 	if (I915_HAS_FBC(dev) && i915_powersave) {
+@@ -1292,6 +1332,7 @@ cleanup_gem:
+ 	mutex_lock(&dev->struct_mutex);
+ 	i915_gem_cleanup_ringbuffer(dev);
+ 	mutex_unlock(&dev->struct_mutex);
++	i915_gem_cleanup_aliasing_ppgtt(dev);
+ cleanup_vga_switcheroo:
+ 	vga_switcheroo_unregister_client(dev->pdev);
+ cleanup_vga_client:
+@@ -1660,6 +1701,9 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
+ 	unsigned long diffms;
+ 	u32 count;
+ 
++	if (dev_priv->info->gen != 5)
++		return;
++
+ 	getrawmonotonic(&now);
+ 	diff1 = timespec_sub(now, dev_priv->last_time2);
+ 
+@@ -1890,27 +1934,6 @@ ips_ping_for_i915_load(void)
+ 	}
+ }
+ 
+-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+-{
+-	struct apertures_struct *ap;
+-	struct pci_dev *pdev = dev_priv->dev->pdev;
+-	bool primary;
+-
+-	ap = alloc_apertures(1);
+-	if (!ap)
+-		return;
+-
+-	ap->ranges[0].base = dev_priv->dev->agp->base;
+-	ap->ranges[0].size =
+-		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+-	primary =
+-		pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+-
+-	remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+-
+-	kfree(ap);
+-}
+-
+ /**
+  * i915_driver_load - setup chip and create an initial config
+  * @dev: DRM device
+@@ -1948,14 +1971,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 		goto free_priv;
+ 	}
+ 
+-	dev_priv->mm.gtt = intel_gtt_get();
+-	if (!dev_priv->mm.gtt) {
+-		DRM_ERROR("Failed to initialize GTT\n");
+-		ret = -ENODEV;
+-		goto put_bridge;
+-	}
+-
+-	i915_kick_out_firmware_fb(dev_priv);
++	pci_set_master(dev->pdev);
+ 
+ 	/* overlay on gen2 is broken and can't address above 1G */
+ 	if (IS_GEN2(dev))
+@@ -1980,6 +1996,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 		goto put_bridge;
+ 	}
+ 
++	dev_priv->mm.gtt = intel_gtt_get();
++	if (!dev_priv->mm.gtt) {
++		DRM_ERROR("Failed to initialize GTT\n");
++		ret = -ENODEV;
++		goto out_rmmap;
++	}
++
+ 	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ 
+ 	dev_priv->mm.gtt_mapping =
+@@ -2101,12 +2124,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
+ 		    (unsigned long) dev);
+ 
+-	spin_lock(&mchdev_lock);
+-	i915_mch_dev = dev_priv;
+-	dev_priv->mchdev_lock = &mchdev_lock;
+-	spin_unlock(&mchdev_lock);
++	if (IS_GEN5(dev)) {
++		spin_lock(&mchdev_lock);
++		i915_mch_dev = dev_priv;
++		dev_priv->mchdev_lock = &mchdev_lock;
++		spin_unlock(&mchdev_lock);
+ 
+-	ips_ping_for_i915_load();
++		ips_ping_for_i915_load();
++	}
+ 
+ 	return 0;
+ 
+@@ -2149,7 +2174,7 @@ int i915_driver_unload(struct drm_device *dev)
+ 		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	ret = i915_gpu_idle(dev);
++	ret = i915_gpu_idle(dev, true);
+ 	if (ret)
+ 		DRM_ERROR("failed to idle hardware: %d\n", ret);
+ 	mutex_unlock(&dev->struct_mutex);
+@@ -2202,6 +2227,7 @@ int i915_driver_unload(struct drm_device *dev)
+ 		i915_gem_free_all_phys_object(dev);
+ 		i915_gem_cleanup_ringbuffer(dev);
+ 		mutex_unlock(&dev->struct_mutex);
++		i915_gem_cleanup_aliasing_ppgtt(dev);
+ 		if (I915_HAS_FBC(dev) && i915_powersave)
+ 			i915_cleanup_compression(dev);
+ 		drm_mm_takedown(&dev_priv->mm.stolen);
+@@ -2267,18 +2293,12 @@ void i915_driver_lastclose(struct drm_device * dev)
+ 
+ 	i915_gem_lastclose(dev);
+ 
+-	if (dev_priv->agp_heap)
+-		i915_mem_takedown(&(dev_priv->agp_heap));
+-
+ 	i915_dma_cleanup(dev);
+ }
+ 
+ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+ {
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	i915_gem_release(dev, file_priv);
+-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+-		i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+ }
+ 
+ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+@@ -2297,11 +2317,11 @@ struct drm_ioctl_desc i915_ioctls[] = {
+ 	DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ 	DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+ 	DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+-	DRM_IOCTL_DEF_DRV(I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+-	DRM_IOCTL_DEF_DRV(I915_FREE, i915_mem_free, DRM_AUTH),
+-	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++	DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
++	DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
++	DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+-	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++	DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
+ 	DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+@@ -2329,6 +2349,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
+ 	DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
++	DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ };
+ 
+ int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 452bc51..89f3d4a 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -66,7 +66,11 @@ MODULE_PARM_DESC(semaphores,
+ int i915_enable_rc6 __read_mostly = -1;
+ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
+ MODULE_PARM_DESC(i915_enable_rc6,
+-		"Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
++		"Enable power-saving render C-state 6. "
++		"Different stages can be selected via bitmask values "
++		"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
++		"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
++		"default: -1 (use per-chip default)");
+ 
+ int i915_enable_fbc __read_mostly = -1;
+ module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
+@@ -103,6 +107,11 @@ MODULE_PARM_DESC(enable_hangcheck,
+ 		"WARNING: Disabling this can cause system wide hangs. "
+ 		"(default: true)");
+ 
++int i915_enable_ppgtt __read_mostly = -1;
++module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
++MODULE_PARM_DESC(i915_enable_ppgtt,
++		"Enable PPGTT (default: true)");
++
+ static struct drm_driver driver;
+ extern int intel_agp_enabled;
+ 
+@@ -198,7 +207,7 @@ static const struct intel_device_info intel_pineview_info = {
+ 
+ static const struct intel_device_info intel_ironlake_d_info = {
+ 	.gen = 5,
+-	.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
++	.need_gfx_hws = 1, .has_hotplug = 1,
+ 	.has_bsd_ring = 1,
+ };
+ 
+@@ -214,6 +223,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
+ 	.need_gfx_hws = 1, .has_hotplug = 1,
+ 	.has_bsd_ring = 1,
+ 	.has_blt_ring = 1,
++	.has_llc = 1,
+ 	.has_force_wake = 1,
+ };
+ 
+@@ -223,6 +233,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
+ 	.has_fbc = 1,
+ 	.has_bsd_ring = 1,
+ 	.has_blt_ring = 1,
++	.has_llc = 1,
+ 	.has_force_wake = 1,
+ };
+ 
+@@ -231,6 +242,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
+ 	.need_gfx_hws = 1, .has_hotplug = 1,
+ 	.has_bsd_ring = 1,
+ 	.has_blt_ring = 1,
++	.has_llc = 1,
+ 	.has_force_wake = 1,
+ };
+ 
+@@ -240,6 +252,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
+ 	.has_fbc = 0,	/* FBC is not enabled on Ivybridge mobile yet */
+ 	.has_bsd_ring = 1,
+ 	.has_blt_ring = 1,
++	.has_llc = 1,
+ 	.has_force_wake = 1,
+ };
+ 
+@@ -381,16 +394,27 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+ 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ }
+ 
++static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
++{
++	u32 gtfifodbg;
++	gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
++	if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
++	     "MMIO read or write has been dropped %x\n", gtfifodbg))
++		I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
++}
++
+ void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+ {
+ 	I915_WRITE_NOTRACE(FORCEWAKE, 0);
+-	POSTING_READ(FORCEWAKE);
++	/* The below doubles as a POSTING_READ */
++	gen6_gt_check_fifodbg(dev_priv);
+ }
+ 
+ void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+ {
+ 	I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+-	POSTING_READ(FORCEWAKE_MT);
++	/* The below doubles as a POSTING_READ */
++	gen6_gt_check_fifodbg(dev_priv);
+ }
+ 
+ /*
+@@ -406,8 +430,10 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+ 	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ }
+ 
+-void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
++int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+ {
++	int ret = 0;
++
+ 	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ 		int loop = 500;
+ 		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+@@ -415,10 +441,13 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+ 			udelay(10);
+ 			fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ 		}
+-		WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
++		if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++			++ret;
+ 		dev_priv->gt_fifo_count = fifo;
+ 	}
+ 	dev_priv->gt_fifo_count--;
++
++	return ret;
+ }
+ 
+ static int i915_drm_freeze(struct drm_device *dev)
+@@ -503,7 +532,7 @@ static int i915_drm_thaw(struct drm_device *dev)
+ 		mutex_lock(&dev->struct_mutex);
+ 		dev_priv->mm.suspended = 0;
+ 
+-		error = i915_gem_init_ringbuffer(dev);
++		error = i915_gem_init_hw(dev);
+ 		mutex_unlock(&dev->struct_mutex);
+ 
+ 		if (HAS_PCH_SPLIT(dev))
+@@ -614,13 +643,40 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+ static int gen6_do_reset(struct drm_device *dev, u8 flags)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
++	int	ret;
++	unsigned long irqflags;
++
++	/* Hold gt_lock across reset to prevent any register access
++	 * with forcewake not set correctly
++	 */
++	spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
++
++	/* Reset the chip */
++
++	/* GEN6_GDRST is not in the gt power well, no need to check
++	 * for fifo space for the write or forcewake the chip for
++	 * the read
++	 */
++	I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
++
++	/* Spin waiting for the device to ack the reset request */
++	ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+ 
+-	I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
+-	return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
++	/* If reset with a user forcewake, try to restore, otherwise turn it off */
++	if (dev_priv->forcewake_count)
++		dev_priv->display.force_wake_get(dev_priv);
++	else
++		dev_priv->display.force_wake_put(dev_priv);
++
++	/* Restore fifo count */
++	dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
++
++	spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
++	return ret;
+ }
+ 
+ /**
+- * i965_reset - reset chip after a hang
++ * i915_reset - reset chip after a hang
+  * @dev: drm device to reset
+  * @flags: reset domains
+  *
+@@ -643,7 +699,6 @@ int i915_reset(struct drm_device *dev, u8 flags)
+ 	 * need to
+ 	 */
+ 	bool need_display = true;
+-	unsigned long irqflags;
+ 	int ret;
+ 
+ 	if (!i915_try_reset)
+@@ -661,11 +716,6 @@ int i915_reset(struct drm_device *dev, u8 flags)
+ 	case 7:
+ 	case 6:
+ 		ret = gen6_do_reset(dev, flags);
+-		/* If reset with a user forcewake, try to restore */
+-		spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+-		if (dev_priv->forcewake_count)
+-			dev_priv->display.force_wake_get(dev_priv);
+-		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ 		break;
+ 	case 5:
+ 		ret = ironlake_do_reset(dev, flags);
+@@ -702,12 +752,16 @@ int i915_reset(struct drm_device *dev, u8 flags)
+ 			!dev_priv->mm.suspended) {
+ 		dev_priv->mm.suspended = 0;
+ 
++		i915_gem_init_swizzling(dev);
++
+ 		dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
+ 		if (HAS_BSD(dev))
+ 		    dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
+ 		if (HAS_BLT(dev))
+ 		    dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+ 
++		i915_gem_init_ppgtt(dev);
++
+ 		mutex_unlock(&dev->struct_mutex);
+ 		drm_irq_uninstall(dev);
+ 		drm_mode_config_reset(dev);
+@@ -830,6 +884,21 @@ static struct vm_operations_struct i915_gem_vm_ops = {
+ 	.close = drm_gem_vm_close,
+ };
+ 
++static const struct file_operations i915_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = drm_gem_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++	.read = drm_read,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = i915_compat_ioctl,
++#endif
++	.llseek = noop_llseek,
++};
++
+ static struct drm_driver driver = {
+ 	/* Don't use MTRRs here; the Xserver or userspace app should
+ 	 * deal with them for Intel hardware.
+@@ -863,21 +932,7 @@ static struct drm_driver driver = {
+ 	.dumb_map_offset = i915_gem_mmap_gtt,
+ 	.dumb_destroy = i915_gem_dumb_destroy,
+ 	.ioctls = i915_ioctls,
+-	.fops = {
+-		 .owner = THIS_MODULE,
+-		 .open = drm_open,
+-		 .release = drm_release,
+-		 .unlocked_ioctl = drm_ioctl,
+-		 .mmap = drm_gem_mmap,
+-		 .poll = drm_poll,
+-		 .fasync = drm_fasync,
+-		 .read = drm_read,
+-#ifdef CONFIG_COMPAT
+-		 .compat_ioctl = i915_compat_ioctl,
+-#endif
+-		 .llseek = noop_llseek,
+-	},
+-
++	.fops = &i915_driver_fops,
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ 	.date = DRIVER_DATE,
+@@ -945,17 +1000,21 @@ MODULE_LICENSE("GPL and additional rights");
+ /* We give fast paths for the really cool registers */
+ #define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ 	((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+-	 ((reg) < 0x40000) &&		 \
+-	 ((reg) != FORCEWAKE) &&	 \
+-	 ((reg) != ECOBUS))
++        ((reg) < 0x40000) &&            \
++        ((reg) != FORCEWAKE))
+ 
+ #define __i915_read(x, y) \
+ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ 	u##x val = 0; \
+ 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+-		gen6_gt_force_wake_get(dev_priv); \
++		unsigned long irqflags; \
++		spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
++		if (dev_priv->forcewake_count == 0) \
++			dev_priv->display.force_wake_get(dev_priv); \
+ 		val = read##y(dev_priv->regs + reg); \
+-		gen6_gt_force_wake_put(dev_priv); \
++		if (dev_priv->forcewake_count == 0) \
++			dev_priv->display.force_wake_put(dev_priv); \
++		spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
+ 	} else { \
+ 		val = read##y(dev_priv->regs + reg); \
+ 	} \
+@@ -971,11 +1030,15 @@ __i915_read(64, q)
+ 
+ #define __i915_write(x, y) \
+ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
++	u32 __fifo_ret = 0; \
+ 	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+ 	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+-		__gen6_gt_wait_for_fifo(dev_priv); \
++		__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+ 	} \
+ 	write##y(val, dev_priv->regs + reg); \
++	if (unlikely(__fifo_ret)) { \
++		gen6_gt_check_fifodbg(dev_priv); \
++	} \
+ }
+ __i915_write(8, b)
+ __i915_write(16, w)
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 012a9d2..45c5cf8 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -35,6 +35,7 @@
+ #include "intel_ringbuffer.h"
+ #include <linux/io-mapping.h>
+ #include <linux/i2c.h>
++#include <linux/i2c-algo-bit.h>
+ #include <drm/intel-gtt.h>
+ #include <linux/backlight.h>
+ 
+@@ -135,6 +136,7 @@ struct drm_i915_fence_reg {
+ 	struct list_head lru_list;
+ 	struct drm_i915_gem_object *obj;
+ 	uint32_t setup_seqno;
++	int pin_count;
+ };
+ 
+ struct sdvo_device_mapping {
+@@ -152,33 +154,40 @@ struct drm_i915_error_state {
+ 	u32 eir;
+ 	u32 pgtbl_er;
+ 	u32 pipestat[I915_MAX_PIPES];
+-	u32 ipeir;
+-	u32 ipehr;
+-	u32 instdone;
+-	u32 acthd;
++	u32 tail[I915_NUM_RINGS];
++	u32 head[I915_NUM_RINGS];
++	u32 ipeir[I915_NUM_RINGS];
++	u32 ipehr[I915_NUM_RINGS];
++	u32 instdone[I915_NUM_RINGS];
++	u32 acthd[I915_NUM_RINGS];
++	u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
++	/* our own tracking of ring head and tail */
++	u32 cpu_ring_head[I915_NUM_RINGS];
++	u32 cpu_ring_tail[I915_NUM_RINGS];
+ 	u32 error; /* gen6+ */
+-	u32 bcs_acthd; /* gen6+ blt engine */
+-	u32 bcs_ipehr;
+-	u32 bcs_ipeir;
+-	u32 bcs_instdone;
+-	u32 bcs_seqno;
+-	u32 vcs_acthd; /* gen6+ bsd engine */
+-	u32 vcs_ipehr;
+-	u32 vcs_ipeir;
+-	u32 vcs_instdone;
+-	u32 vcs_seqno;
+-	u32 instpm;
+-	u32 instps;
++	u32 instpm[I915_NUM_RINGS];
++	u32 instps[I915_NUM_RINGS];
+ 	u32 instdone1;
+-	u32 seqno;
++	u32 seqno[I915_NUM_RINGS];
+ 	u64 bbaddr;
++	u32 fault_reg[I915_NUM_RINGS];
++	u32 done_reg;
++	u32 faddr[I915_NUM_RINGS];
+ 	u64 fence[I915_MAX_NUM_FENCES];
+ 	struct timeval time;
+-	struct drm_i915_error_object {
+-		int page_count;
+-		u32 gtt_offset;
+-		u32 *pages[0];
+-	} *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
++	struct drm_i915_error_ring {
++		struct drm_i915_error_object {
++			int page_count;
++			u32 gtt_offset;
++			u32 *pages[0];
++		} *ringbuffer, *batchbuffer;
++		struct drm_i915_error_request {
++			long jiffies;
++			u32 seqno;
++			u32 tail;
++		} *requests;
++		int num_requests;
++	} ring[I915_NUM_RINGS];
+ 	struct drm_i915_error_buffer {
+ 		u32 size;
+ 		u32 name;
+@@ -191,7 +200,7 @@ struct drm_i915_error_state {
+ 		u32 tiling:2;
+ 		u32 dirty:1;
+ 		u32 purgeable:1;
+-		u32 ring:4;
++		s32 ring:4;
+ 		u32 cache_level:2;
+ 	} *active_bo, *pinned_bo;
+ 	u32 active_bo_count, pinned_bo_count;
+@@ -207,6 +216,8 @@ struct drm_i915_display_funcs {
+ 	int (*get_display_clock_speed)(struct drm_device *dev);
+ 	int (*get_fifo_size)(struct drm_device *dev, int plane);
+ 	void (*update_wm)(struct drm_device *dev);
++	void (*update_sprite_wm)(struct drm_device *dev, int pipe,
++				 uint32_t sprite_width, int pixel_size);
+ 	int (*crtc_mode_set)(struct drm_crtc *crtc,
+ 			     struct drm_display_mode *mode,
+ 			     struct drm_display_mode *adjusted_mode,
+@@ -254,6 +265,17 @@ struct intel_device_info {
+ 	u8 supports_tv:1;
+ 	u8 has_bsd_ring:1;
+ 	u8 has_blt_ring:1;
++	u8 has_llc:1;
++};
++
++#define I915_PPGTT_PD_ENTRIES 512
++#define I915_PPGTT_PT_ENTRIES 1024
++struct i915_hw_ppgtt {
++	unsigned num_pd_entries;
++	struct page **pt_pages;
++	uint32_t pd_offset;
++	dma_addr_t *pt_dma_addr;
++	dma_addr_t scratch_page_dma_addr;
+ };
+ 
+ enum no_fbc_reason {
+@@ -278,6 +300,16 @@ enum intel_pch {
+ struct intel_fbdev;
+ struct intel_fbc_work;
+ 
++struct intel_gmbus {
++	struct i2c_adapter adapter;
++	bool force_bit;
++	bool has_gpio;
++	u32 reg0;
++	u32 gpio_reg;
++	struct i2c_algo_bit_data bit_algo;
++	struct drm_i915_private *dev_priv;
++};
++
+ typedef struct drm_i915_private {
+ 	struct drm_device *dev;
+ 
+@@ -295,11 +327,11 @@ typedef struct drm_i915_private {
+ 	/** gt_lock is also taken in irq contexts. */
+ 	struct spinlock gt_lock;
+ 
+-	struct intel_gmbus {
+-		struct i2c_adapter adapter;
+-		struct i2c_adapter *force_bit;
+-		u32 reg0;
+-	} *gmbus;
++	struct intel_gmbus *gmbus;
++
++	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
++	 * controller on different i2c buses. */
++	struct mutex gmbus_mutex;
+ 
+ 	struct pci_dev *bridge_dev;
+ 	struct intel_ring_buffer ring[I915_NUM_RINGS];
+@@ -334,7 +366,6 @@ typedef struct drm_i915_private {
+ 
+ 	int tex_lru_log_granularity;
+ 	int allow_batchbuffer;
+-	struct mem_block *agp_heap;
+ 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
+ 	int vblank_pipe;
+ 	int num_pipe;
+@@ -359,6 +390,7 @@ typedef struct drm_i915_private {
+ 
+ 	/* overlay */
+ 	struct intel_overlay *overlay;
++	bool sprite_scaling_enabled;
+ 
+ 	/* LVDS info */
+ 	int backlight_level;  /* restore backlight to this value */
+@@ -584,6 +616,9 @@ typedef struct drm_i915_private {
+ 		struct io_mapping *gtt_mapping;
+ 		int gtt_mtrr;
+ 
++		/** PPGTT used for aliasing the PPGTT with the GTT */
++		struct i915_hw_ppgtt *aliasing_ppgtt;
++
+ 		struct shrinker inactive_shrinker;
+ 
+ 		/**
+@@ -749,6 +784,13 @@ typedef struct drm_i915_private {
+ 	struct drm_property *force_audio_property;
+ } drm_i915_private_t;
+ 
++enum hdmi_force_audio {
++	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
++	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
++	HDMI_AUDIO_AUTO,		/* trust EDID */
++	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
++};
++
+ enum i915_cache_level {
+ 	I915_CACHE_NONE,
+ 	I915_CACHE_LLC,
+@@ -841,6 +883,8 @@ struct drm_i915_gem_object {
+ 
+ 	unsigned int cache_level:2;
+ 
++	unsigned int has_aliasing_ppgtt_mapping:1;
++
+ 	struct page **pages;
+ 
+ 	/**
+@@ -918,6 +962,9 @@ struct drm_i915_gem_request {
+ 	/** GEM sequence number associated with this request. */
+ 	uint32_t seqno;
+ 
++	/** Postion in the ringbuffer of the end of the request */
++	u32 tail;
++
+ 	/** Time at which this request was emitted, in jiffies. */
+ 	unsigned long emitted_jiffies;
+ 
+@@ -974,8 +1021,11 @@ struct drm_i915_file_private {
+ 
+ #define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
+ #define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
++#define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
+ #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
+ 
++#define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >=6)
++
+ #define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
+ #define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
+ 
+@@ -1008,6 +1058,27 @@ struct drm_i915_file_private {
+ 
+ #include "i915_trace.h"
+ 
++/**
++ * RC6 is a special power stage which allows the GPU to enter an very
++ * low-voltage mode when idle, using down to 0V while at this stage.  This
++ * stage is entered automatically when the GPU is idle when RC6 support is
++ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
++ *
++ * There are different RC6 modes available in Intel GPU, which differentiate
++ * among each other with the latency required to enter and leave RC6 and
++ * voltage consumed by the GPU in different states.
++ *
++ * The combination of the following flags define which states GPU is allowed
++ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
++ * RC6pp is deepest RC6. Their support by hardware varies according to the
++ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
++ * which brings the most power savings; deeper states save more power, but
++ * require higher latency to switch to and wake up.
++ */
++#define INTEL_RC6_ENABLE			(1<<0)
++#define INTEL_RC6p_ENABLE			(1<<1)
++#define INTEL_RC6pp_ENABLE			(1<<2)
++
+ extern struct drm_ioctl_desc i915_ioctls[];
+ extern int i915_max_ioctl;
+ extern unsigned int i915_fbpercrtc __always_unused;
+@@ -1020,6 +1091,7 @@ extern int i915_vbt_sdvo_panel_type __read_mostly;
+ extern int i915_enable_rc6 __read_mostly;
+ extern int i915_enable_fbc __read_mostly;
+ extern bool i915_enable_hangcheck __read_mostly;
++extern int i915_enable_ppgtt __read_mostly;
+ 
+ extern int i915_suspend(struct drm_device *dev, pm_message_t state);
+ extern int i915_resume(struct drm_device *dev);
+@@ -1081,18 +1153,6 @@ extern void i915_destroy_error_state(struct drm_device *dev);
+ #endif
+ 
+ 
+-/* i915_mem.c */
+-extern int i915_mem_alloc(struct drm_device *dev, void *data,
+-			  struct drm_file *file_priv);
+-extern int i915_mem_free(struct drm_device *dev, void *data,
+-			 struct drm_file *file_priv);
+-extern int i915_mem_init_heap(struct drm_device *dev, void *data,
+-			      struct drm_file *file_priv);
+-extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
+-				 struct drm_file *file_priv);
+-extern void i915_mem_takedown(struct mem_block **heap);
+-extern void i915_mem_release(struct drm_device * dev,
+-			     struct drm_file *file_priv, struct mem_block *heap);
+ /* i915_gem.c */
+ int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ 			struct drm_file *file_priv);
+@@ -1178,26 +1238,49 @@ int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ 					   struct intel_ring_buffer *pipelined);
+ int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+ 
++static inline void
++i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
++{
++	if (obj->fence_reg != I915_FENCE_REG_NONE) {
++		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
++		dev_priv->fence_regs[obj->fence_reg].pin_count++;
++	}
++}
++
++static inline void
++i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
++{
++	if (obj->fence_reg != I915_FENCE_REG_NONE) {
++		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
++		dev_priv->fence_regs[obj->fence_reg].pin_count--;
++	}
++}
++
+ void i915_gem_retire_requests(struct drm_device *dev);
++void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
++
+ void i915_gem_reset(struct drm_device *dev);
+ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+ 					    uint32_t read_domains,
+ 					    uint32_t write_domain);
+ int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
+-int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
++int __must_check i915_gem_init_hw(struct drm_device *dev);
++void i915_gem_init_swizzling(struct drm_device *dev);
++void i915_gem_init_ppgtt(struct drm_device *dev);
+ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+ void i915_gem_do_init(struct drm_device *dev,
+ 		      unsigned long start,
+ 		      unsigned long mappable_end,
+ 		      unsigned long end);
+-int __must_check i915_gpu_idle(struct drm_device *dev);
++int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
+ int __must_check i915_gem_idle(struct drm_device *dev);
+ int __must_check i915_add_request(struct intel_ring_buffer *ring,
+ 				  struct drm_file *file,
+ 				  struct drm_i915_gem_request *request);
+ int __must_check i915_wait_request(struct intel_ring_buffer *ring,
+-				   uint32_t seqno);
++				   uint32_t seqno,
++				   bool do_retire);
+ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int __must_check
+ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+@@ -1224,6 +1307,14 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ 				    enum i915_cache_level cache_level);
+ 
+ /* i915_gem_gtt.c */
++int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
++void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
++void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
++			    struct drm_i915_gem_object *obj,
++			    enum i915_cache_level cache_level);
++void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
++			      struct drm_i915_gem_object *obj);
++
+ void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+ int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+@@ -1305,7 +1396,6 @@ static inline void intel_unregister_dsm_handler(void) { return; }
+ #endif /* CONFIG_ACPI */
+ 
+ /* modesetting */
+-extern void i915_redisable_vga(struct drm_device *dev);
+ extern void intel_modeset_init(struct drm_device *dev);
+ extern void intel_modeset_gem_init(struct drm_device *dev);
+ extern void intel_modeset_cleanup(struct drm_device *dev);
+@@ -1363,14 +1453,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
+  */
+ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+-void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+-
+-/* We give fast paths for the really cool registers */
+-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+-	(((dev_priv)->info->gen >= 6) && \
+-	 ((reg) < 0x40000) &&		 \
+-	 ((reg) != FORCEWAKE) &&	 \
+-	 ((reg) != ECOBUS))
++int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+ 
+ #define __i915_read(x, y) \
+ 	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 5950ba3..a230a93 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -58,6 +58,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
+ 
+ static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+ 				    struct shrink_control *sc);
++static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+ 
+ /* some bookkeeping */
+ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
+@@ -258,73 +259,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+ 		obj->tiling_mode != I915_TILING_NONE;
+ }
+ 
+-static inline void
+-slow_shmem_copy(struct page *dst_page,
+-		int dst_offset,
+-		struct page *src_page,
+-		int src_offset,
+-		int length)
+-{
+-	char *dst_vaddr, *src_vaddr;
+-
+-	dst_vaddr = kmap(dst_page);
+-	src_vaddr = kmap(src_page);
+-
+-	memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
+-
+-	kunmap(src_page);
+-	kunmap(dst_page);
+-}
+-
+-static inline void
+-slow_shmem_bit17_copy(struct page *gpu_page,
+-		      int gpu_offset,
+-		      struct page *cpu_page,
+-		      int cpu_offset,
+-		      int length,
+-		      int is_read)
+-{
+-	char *gpu_vaddr, *cpu_vaddr;
+-
+-	/* Use the unswizzled path if this page isn't affected. */
+-	if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
+-		if (is_read)
+-			return slow_shmem_copy(cpu_page, cpu_offset,
+-					       gpu_page, gpu_offset, length);
+-		else
+-			return slow_shmem_copy(gpu_page, gpu_offset,
+-					       cpu_page, cpu_offset, length);
+-	}
+-
+-	gpu_vaddr = kmap(gpu_page);
+-	cpu_vaddr = kmap(cpu_page);
+-
+-	/* Copy the data, XORing A6 with A17 (1). The user already knows he's
+-	 * XORing with the other bits (A9 for Y, A9 and A10 for X)
+-	 */
+-	while (length > 0) {
+-		int cacheline_end = ALIGN(gpu_offset + 1, 64);
+-		int this_length = min(cacheline_end - gpu_offset, length);
+-		int swizzled_gpu_offset = gpu_offset ^ 64;
+-
+-		if (is_read) {
+-			memcpy(cpu_vaddr + cpu_offset,
+-			       gpu_vaddr + swizzled_gpu_offset,
+-			       this_length);
+-		} else {
+-			memcpy(gpu_vaddr + swizzled_gpu_offset,
+-			       cpu_vaddr + cpu_offset,
+-			       this_length);
+-		}
+-		cpu_offset += this_length;
+-		gpu_offset += this_length;
+-		length -= this_length;
+-	}
+-
+-	kunmap(cpu_page);
+-	kunmap(gpu_page);
+-}
+-
+ /**
+  * This is the fast shmem pread path, which attempts to copy_from_user directly
+  * from the backing pages of the object to the user's address space.  On a
+@@ -385,6 +319,58 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
+ 	return 0;
+ }
+ 
++static inline int
++__copy_to_user_swizzled(char __user *cpu_vaddr,
++			const char *gpu_vaddr, int gpu_offset,
++			int length)
++{
++	int ret, cpu_offset = 0;
++
++	while (length > 0) {
++		int cacheline_end = ALIGN(gpu_offset + 1, 64);
++		int this_length = min(cacheline_end - gpu_offset, length);
++		int swizzled_gpu_offset = gpu_offset ^ 64;
++
++		ret = __copy_to_user(cpu_vaddr + cpu_offset,
++				     gpu_vaddr + swizzled_gpu_offset,
++				     this_length);
++		if (ret)
++			return ret + length;
++
++		cpu_offset += this_length;
++		gpu_offset += this_length;
++		length -= this_length;
++	}
++
++	return 0;
++}
++
++static inline int
++__copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
++			  const char *cpu_vaddr,
++			  int length)
++{
++	int ret, cpu_offset = 0;
++
++	while (length > 0) {
++		int cacheline_end = ALIGN(gpu_offset + 1, 64);
++		int this_length = min(cacheline_end - gpu_offset, length);
++		int swizzled_gpu_offset = gpu_offset ^ 64;
++
++		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
++				       cpu_vaddr + cpu_offset,
++				       this_length);
++		if (ret)
++			return ret + length;
++
++		cpu_offset += this_length;
++		gpu_offset += this_length;
++		length -= this_length;
++	}
++
++	return 0;
++}
++
+ /**
+  * This is the fallback shmem pread path, which allocates temporary storage
+  * in kernel space to copy_to_user into outside of the struct_mutex, so we
+@@ -398,72 +384,34 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
+ 			  struct drm_file *file)
+ {
+ 	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+-	struct mm_struct *mm = current->mm;
+-	struct page **user_pages;
++	char __user *user_data;
+ 	ssize_t remain;
+-	loff_t offset, pinned_pages, i;
+-	loff_t first_data_page, last_data_page, num_pages;
+-	int shmem_page_offset;
+-	int data_page_index, data_page_offset;
+-	int page_length;
+-	int ret;
+-	uint64_t data_ptr = args->data_ptr;
+-	int do_bit17_swizzling;
++	loff_t offset;
++	int shmem_page_offset, page_length, ret;
++	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ 
++	user_data = (char __user *) (uintptr_t) args->data_ptr;
+ 	remain = args->size;
+ 
+-	/* Pin the user pages containing the data.  We can't fault while
+-	 * holding the struct mutex, yet we want to hold it while
+-	 * dereferencing the user data.
+-	 */
+-	first_data_page = data_ptr / PAGE_SIZE;
+-	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+-	num_pages = last_data_page - first_data_page + 1;
++	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ 
+-	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+-	if (user_pages == NULL)
+-		return -ENOMEM;
++	offset = args->offset;
+ 
+ 	mutex_unlock(&dev->struct_mutex);
+-	down_read(&mm->mmap_sem);
+-	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+-				      num_pages, 1, 0, user_pages, NULL);
+-	up_read(&mm->mmap_sem);
+-	mutex_lock(&dev->struct_mutex);
+-	if (pinned_pages < num_pages) {
+-		ret = -EFAULT;
+-		goto out;
+-	}
+-
+-	ret = i915_gem_object_set_cpu_read_domain_range(obj,
+-							args->offset,
+-							args->size);
+-	if (ret)
+-		goto out;
+-
+-	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+-
+-	offset = args->offset;
+ 
+ 	while (remain > 0) {
+ 		struct page *page;
++		char *vaddr;
+ 
+ 		/* Operation in this page
+ 		 *
+ 		 * shmem_page_offset = offset within page in shmem file
+-		 * data_page_index = page number in get_user_pages return
+-		 * data_page_offset = offset with data_page_index page.
+ 		 * page_length = bytes to copy for this page
+ 		 */
+ 		shmem_page_offset = offset_in_page(offset);
+-		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+-		data_page_offset = offset_in_page(data_ptr);
+-
+ 		page_length = remain;
+ 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
+ 			page_length = PAGE_SIZE - shmem_page_offset;
+-		if ((data_page_offset + page_length) > PAGE_SIZE)
+-			page_length = PAGE_SIZE - data_page_offset;
+ 
+ 		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ 		if (IS_ERR(page)) {
+@@ -471,36 +419,38 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
+ 			goto out;
+ 		}
+ 
+-		if (do_bit17_swizzling) {
+-			slow_shmem_bit17_copy(page,
+-					      shmem_page_offset,
+-					      user_pages[data_page_index],
+-					      data_page_offset,
+-					      page_length,
+-					      1);
+-		} else {
+-			slow_shmem_copy(user_pages[data_page_index],
+-					data_page_offset,
+-					page,
+-					shmem_page_offset,
+-					page_length);
+-		}
++		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
++			(page_to_phys(page) & (1 << 17)) != 0;
++
++		vaddr = kmap(page);
++		if (page_do_bit17_swizzling)
++			ret = __copy_to_user_swizzled(user_data,
++						      vaddr, shmem_page_offset,
++						      page_length);
++		else
++			ret = __copy_to_user(user_data,
++					     vaddr + shmem_page_offset,
++					     page_length);
++		kunmap(page);
+ 
+ 		mark_page_accessed(page);
+ 		page_cache_release(page);
+ 
++		if (ret) {
++			ret = -EFAULT;
++			goto out;
++		}
++
+ 		remain -= page_length;
+-		data_ptr += page_length;
++		user_data += page_length;
+ 		offset += page_length;
+ 	}
+ 
+ out:
+-	for (i = 0; i < pinned_pages; i++) {
+-		SetPageDirty(user_pages[i]);
+-		mark_page_accessed(user_pages[i]);
+-		page_cache_release(user_pages[i]);
+-	}
+-	drm_free_large(user_pages);
++	mutex_lock(&dev->struct_mutex);
++	/* Fixup: Kill any reinstated backing storage pages */
++	if (obj->madv == __I915_MADV_PURGED)
++		i915_gem_object_truncate(obj);
+ 
+ 	return ret;
+ }
+@@ -841,71 +791,36 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
+ 			   struct drm_file *file)
+ {
+ 	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+-	struct mm_struct *mm = current->mm;
+-	struct page **user_pages;
+ 	ssize_t remain;
+-	loff_t offset, pinned_pages, i;
+-	loff_t first_data_page, last_data_page, num_pages;
+-	int shmem_page_offset;
+-	int data_page_index,  data_page_offset;
+-	int page_length;
+-	int ret;
+-	uint64_t data_ptr = args->data_ptr;
+-	int do_bit17_swizzling;
++	loff_t offset;
++	char __user *user_data;
++	int shmem_page_offset, page_length, ret;
++	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
+ 
++	user_data = (char __user *) (uintptr_t) args->data_ptr;
+ 	remain = args->size;
+ 
+-	/* Pin the user pages containing the data.  We can't fault while
+-	 * holding the struct mutex, and all of the pwrite implementations
+-	 * want to hold it while dereferencing the user data.
+-	 */
+-	first_data_page = data_ptr / PAGE_SIZE;
+-	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+-	num_pages = last_data_page - first_data_page + 1;
+-
+-	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
+-	if (user_pages == NULL)
+-		return -ENOMEM;
+-
+-	mutex_unlock(&dev->struct_mutex);
+-	down_read(&mm->mmap_sem);
+-	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
+-				      num_pages, 0, 0, user_pages, NULL);
+-	up_read(&mm->mmap_sem);
+-	mutex_lock(&dev->struct_mutex);
+-	if (pinned_pages < num_pages) {
+-		ret = -EFAULT;
+-		goto out;
+-	}
+-
+-	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+-	if (ret)
+-		goto out;
+-
+-	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
++	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+ 
+ 	offset = args->offset;
+ 	obj->dirty = 1;
+ 
++	mutex_unlock(&dev->struct_mutex);
++
+ 	while (remain > 0) {
+ 		struct page *page;
++		char *vaddr;
+ 
+ 		/* Operation in this page
+ 		 *
+ 		 * shmem_page_offset = offset within page in shmem file
+-		 * data_page_index = page number in get_user_pages return
+-		 * data_page_offset = offset with data_page_index page.
+ 		 * page_length = bytes to copy for this page
+ 		 */
+ 		shmem_page_offset = offset_in_page(offset);
+-		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
+-		data_page_offset = offset_in_page(data_ptr);
+ 
+ 		page_length = remain;
+ 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
+ 			page_length = PAGE_SIZE - shmem_page_offset;
+-		if ((data_page_offset + page_length) > PAGE_SIZE)
+-			page_length = PAGE_SIZE - data_page_offset;
+ 
+ 		page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
+ 		if (IS_ERR(page)) {
+@@ -913,34 +828,45 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
+ 			goto out;
+ 		}
+ 
+-		if (do_bit17_swizzling) {
+-			slow_shmem_bit17_copy(page,
+-					      shmem_page_offset,
+-					      user_pages[data_page_index],
+-					      data_page_offset,
+-					      page_length,
+-					      0);
+-		} else {
+-			slow_shmem_copy(page,
+-					shmem_page_offset,
+-					user_pages[data_page_index],
+-					data_page_offset,
+-					page_length);
+-		}
++		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
++			(page_to_phys(page) & (1 << 17)) != 0;
++
++		vaddr = kmap(page);
++		if (page_do_bit17_swizzling)
++			ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
++							user_data,
++							page_length);
++		else
++			ret = __copy_from_user(vaddr + shmem_page_offset,
++					       user_data,
++					       page_length);
++		kunmap(page);
+ 
+ 		set_page_dirty(page);
+ 		mark_page_accessed(page);
+ 		page_cache_release(page);
+ 
++		if (ret) {
++			ret = -EFAULT;
++			goto out;
++		}
++
+ 		remain -= page_length;
+-		data_ptr += page_length;
++		user_data += page_length;
+ 		offset += page_length;
+ 	}
+ 
+ out:
+-	for (i = 0; i < pinned_pages; i++)
+-		page_cache_release(user_pages[i]);
+-	drm_free_large(user_pages);
++	mutex_lock(&dev->struct_mutex);
++	/* Fixup: Kill any reinstated backing storage pages */
++	if (obj->madv == __I915_MADV_PURGED)
++		i915_gem_object_truncate(obj);
++	/* and flush dirty cachelines in case the object isn't in the cpu write
++	 * domain anymore. */
++	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
++		i915_gem_clflush_object(obj);
++		intel_gtt_chipset_flush();
++	}
+ 
+ 	return ret;
+ }
+@@ -996,11 +922,14 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ 	 * pread/pwrite currently are reading and writing from the CPU
+ 	 * perspective, requiring manual detiling by the client.
+ 	 */
+-	if (obj->phys_obj)
++	if (obj->phys_obj) {
+ 		ret = i915_gem_phys_pwrite(dev, obj, args, file);
+-	else if (obj->gtt_space &&
+-		 obj->tiling_mode == I915_TILING_NONE &&
+-		 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
++		goto out;
++	}
++
++	if (obj->gtt_space &&
++	    obj->tiling_mode == I915_TILING_NONE &&
++	    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ 		ret = i915_gem_object_pin(obj, 0, true);
+ 		if (ret)
+ 			goto out;
+@@ -1019,18 +948,24 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ 
+ out_unpin:
+ 		i915_gem_object_unpin(obj);
+-	} else {
+-		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+-		if (ret)
+-			goto out;
+ 
+-		ret = -EFAULT;
+-		if (!i915_gem_object_needs_bit17_swizzle(obj))
+-			ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
+-		if (ret == -EFAULT)
+-			ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
++		if (ret != -EFAULT)
++			goto out;
++		/* Fall through to the shmfs paths because the gtt paths might
++		 * fail with non-page-backed user pointers (e.g. gtt mappings
++		 * when moving data between textures). */
+ 	}
+ 
++	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
++	if (ret)
++		goto out;
++
++	ret = -EFAULT;
++	if (!i915_gem_object_needs_bit17_swizzle(obj))
++		ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
++	if (ret == -EFAULT)
++		ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
++
+ out:
+ 	drm_gem_object_unreference(&obj->base);
+ unlock:
+@@ -1142,7 +1077,6 @@ int
+ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ 		    struct drm_file *file)
+ {
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_i915_gem_mmap *args = data;
+ 	struct drm_gem_object *obj;
+ 	unsigned long addr;
+@@ -1154,16 +1088,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ 	if (obj == NULL)
+ 		return -ENOENT;
+ 
+-	if (obj->size > dev_priv->mm.gtt_mappable_end) {
+-		drm_gem_object_unreference_unlocked(obj);
+-		return -E2BIG;
+-	}
+-
+-	down_write(&current->mm->mmap_sem);
+-	addr = do_mmap(obj->filp, 0, args->size,
++	addr = vm_mmap(obj->filp, 0, args->size,
+ 		       PROT_READ | PROT_WRITE, MAP_SHARED,
+ 		       args->offset);
+-	up_write(&current->mm->mmap_sem);
+ 	drm_gem_object_unreference_unlocked(obj);
+ 	if (IS_ERR((void *)addr))
+ 		return addr;
+@@ -1259,11 +1186,6 @@ out:
+ 	case 0:
+ 	case -ERESTARTSYS:
+ 	case -EINTR:
+-	case -EBUSY:
+-		/*
+-		 * EBUSY is ok: this just means that another thread
+-		 * already did the job.
+-		 */
+ 		return VM_FAULT_NOPAGE;
+ 	case -ENOMEM:
+ 		return VM_FAULT_OOM;
+@@ -1687,12 +1609,20 @@ i915_add_request(struct intel_ring_buffer *ring,
+ {
+ 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ 	uint32_t seqno;
++	u32 request_ring_position;
+ 	int was_empty;
+ 	int ret;
+ 
+ 	BUG_ON(request == NULL);
+ 	seqno = i915_gem_next_request_seqno(ring);
+ 
++	/* Record the position of the start of the request so that
++	 * should we detect the updated seqno part-way through the
++	 * GPU processing the request, we never over-estimate the
++	 * position of the head.
++	 */
++	request_ring_position = intel_ring_get_tail(ring);
++
+ 	ret = ring->add_request(ring, &seqno);
+ 	if (ret)
+ 	    return ret;
+@@ -1701,6 +1631,7 @@ i915_add_request(struct intel_ring_buffer *ring,
+ 
+ 	request->seqno = seqno;
+ 	request->ring = ring;
++	request->tail = request_ring_position;
+ 	request->emitted_jiffies = jiffies;
+ 	was_empty = list_empty(&ring->request_list);
+ 	list_add_tail(&request->list, &ring->request_list);
+@@ -1715,7 +1646,7 @@ i915_add_request(struct intel_ring_buffer *ring,
+ 		spin_unlock(&file_priv->mm.lock);
+ 	}
+ 
+-	ring->outstanding_lazy_request = false;
++	ring->outstanding_lazy_request = 0;
+ 
+ 	if (!dev_priv->mm.suspended) {
+ 		if (i915_enable_hangcheck) {
+@@ -1837,7 +1768,7 @@ void i915_gem_reset(struct drm_device *dev)
+ /**
+  * This function clears the request list as sequence numbers are passed.
+  */
+-static void
++void
+ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+ {
+ 	uint32_t seqno;
+@@ -1865,6 +1796,12 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
+ 			break;
+ 
+ 		trace_i915_gem_request_retire(ring, request->seqno);
++		/* We know the GPU must have read the request to have
++		 * sent us the seqno + interrupt, so use the position
++		 * of tail of the request to update the last known position
++		 * of the GPU head.
++		 */
++		ring->last_retired_head = request->tail;
+ 
+ 		list_del(&request->list);
+ 		i915_gem_request_remove_from_client(request);
+@@ -1977,7 +1914,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
+  */
+ int
+ i915_wait_request(struct intel_ring_buffer *ring,
+-		  uint32_t seqno)
++		  uint32_t seqno,
++		  bool do_retire)
+ {
+ 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ 	u32 ier;
+@@ -2040,9 +1978,9 @@ i915_wait_request(struct intel_ring_buffer *ring,
+ 					   || atomic_read(&dev_priv->mm.wedged));
+ 
+ 			ring->irq_put(ring);
+-		} else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+-						      seqno) ||
+-				    atomic_read(&dev_priv->mm.wedged), 3000))
++		} else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
++							     seqno) ||
++					   atomic_read(&dev_priv->mm.wedged), 3000))
+ 			ret = -EBUSY;
+ 		ring->waiting_seqno = 0;
+ 
+@@ -2051,17 +1989,12 @@ i915_wait_request(struct intel_ring_buffer *ring,
+ 	if (atomic_read(&dev_priv->mm.wedged))
+ 		ret = -EAGAIN;
+ 
+-	if (ret && ret != -ERESTARTSYS)
+-		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
+-			  __func__, ret, seqno, ring->get_seqno(ring),
+-			  dev_priv->next_seqno);
+-
+ 	/* Directly dispatch request retiring.  While we have the work queue
+ 	 * to handle this, the waiter on a request often wants an associated
+ 	 * buffer to have made it to the inactive list, and we would need
+ 	 * a separate wait queue to handle that.
+ 	 */
+-	if (ret == 0)
++	if (ret == 0 && do_retire)
+ 		i915_gem_retire_requests_ring(ring);
+ 
+ 	return ret;
+@@ -2085,7 +2018,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
+ 	 * it.
+ 	 */
+ 	if (obj->active) {
+-		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
++		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
++					true);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -2123,6 +2057,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+ int
+ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+ {
++	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ 	int ret = 0;
+ 
+ 	if (obj->gtt_space == NULL)
+@@ -2167,6 +2102,11 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+ 	trace_i915_gem_object_unbind(obj);
+ 
+ 	i915_gem_gtt_unbind_object(obj);
++	if (obj->has_aliasing_ppgtt_mapping) {
++		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
++		obj->has_aliasing_ppgtt_mapping = 0;
++	}
++
+ 	i915_gem_object_put_pages_gtt(obj);
+ 
+ 	list_del_init(&obj->gtt_list);
+@@ -2206,7 +2146,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
+ 	return 0;
+ }
+ 
+-static int i915_ring_idle(struct intel_ring_buffer *ring)
++static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+ {
+ 	int ret;
+ 
+@@ -2220,18 +2160,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
+ 			return ret;
+ 	}
+ 
+-	return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
++	return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
++				 do_retire);
+ }
+ 
+-int
+-i915_gpu_idle(struct drm_device *dev)
++int i915_gpu_idle(struct drm_device *dev, bool do_retire)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	int ret, i;
+ 
+ 	/* Flush everything onto the inactive list. */
+ 	for (i = 0; i < I915_NUM_RINGS; i++) {
+-		ret = i915_ring_idle(&dev_priv->ring[i]);
++		ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -2434,7 +2374,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
+ 		if (!ring_passed_seqno(obj->last_fenced_ring,
+ 				       obj->last_fenced_seqno)) {
+ 			ret = i915_wait_request(obj->last_fenced_ring,
+-						obj->last_fenced_seqno);
++						obj->last_fenced_seqno,
++						true);
+ 			if (ret)
+ 				return ret;
+ 		}
+@@ -2466,6 +2407,8 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+ 
+ 	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ 		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
++
++		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
+ 		i915_gem_clear_fence_reg(obj->base.dev,
+ 					 &dev_priv->fence_regs[obj->fence_reg]);
+ 
+@@ -2490,7 +2433,7 @@ i915_find_fence_reg(struct drm_device *dev,
+ 		if (!reg->obj)
+ 			return reg;
+ 
+-		if (!reg->obj->pin_count)
++		if (!reg->pin_count)
+ 			avail = reg;
+ 	}
+ 
+@@ -2500,7 +2443,7 @@ i915_find_fence_reg(struct drm_device *dev,
+ 	/* None available, try to steal one or wait for a user to finish */
+ 	avail = first = NULL;
+ 	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+-		if (reg->obj->pin_count)
++		if (reg->pin_count)
+ 			continue;
+ 
+ 		if (first == NULL)
+@@ -2575,7 +2518,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ 				if (!ring_passed_seqno(obj->last_fenced_ring,
+ 						       reg->setup_seqno)) {
+ 					ret = i915_wait_request(obj->last_fenced_ring,
+-								reg->setup_seqno);
++								reg->setup_seqno,
++								true);
+ 					if (ret)
+ 						return ret;
+ 				}
+@@ -2594,7 +2538,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ 
+ 	reg = i915_find_fence_reg(dev, pipelined);
+ 	if (reg == NULL)
+-		return -ENOSPC;
++		return -EDEADLK;
+ 
+ 	ret = i915_gem_object_flush_fence(obj, pipelined);
+ 	if (ret)
+@@ -2694,6 +2638,7 @@ i915_gem_clear_fence_reg(struct drm_device *dev,
+ 	list_del_init(&reg->lru_list);
+ 	reg->obj = NULL;
+ 	reg->setup_seqno = 0;
++	reg->pin_count = 0;
+ }
+ 
+ /**
+@@ -2980,6 +2925,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
+ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ 				    enum i915_cache_level cache_level)
+ {
++	struct drm_device *dev = obj->base.dev;
++	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	int ret;
+ 
+ 	if (obj->cache_level == cache_level)
+@@ -3008,6 +2955,9 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ 		}
+ 
+ 		i915_gem_gtt_rebind_object(obj, cache_level);
++		if (obj->has_aliasing_ppgtt_mapping)
++			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
++					       obj, cache_level);
+ 	}
+ 
+ 	if (cache_level == I915_CACHE_NONE) {
+@@ -3346,8 +3296,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
+ 
+ 			if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ 				ret = -EIO;
+-		} else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+-						      seqno) ||
++		} else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
++							     seqno) ||
+ 				    atomic_read(&dev_priv->mm.wedged), 3000)) {
+ 			ret = -EBUSY;
+ 		}
+@@ -3657,8 +3607,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ 
+-	if (IS_GEN6(dev) || IS_GEN7(dev)) {
+-		/* On Gen6, we can have the GPU use the LLC (the CPU
++	if (HAS_LLC(dev)) {
++		/* On some devices, we can have the GPU use the LLC (the CPU
+ 		 * cache) for about a 10% performance improvement
+ 		 * compared to uncached.  Graphics requests other than
+ 		 * display scanout are coherent with the CPU in
+@@ -3748,7 +3698,7 @@ i915_gem_idle(struct drm_device *dev)
+ 		return 0;
+ 	}
+ 
+-	ret = i915_gpu_idle(dev);
++	ret = i915_gpu_idle(dev, true);
+ 	if (ret) {
+ 		mutex_unlock(&dev->struct_mutex);
+ 		return ret;
+@@ -3783,12 +3733,91 @@ i915_gem_idle(struct drm_device *dev)
+ 	return 0;
+ }
+ 
++void i915_gem_init_swizzling(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++
++	if (INTEL_INFO(dev)->gen < 5 ||
++	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
++		return;
++
++	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
++				 DISP_TILE_SURFACE_SWIZZLING);
++
++	if (IS_GEN5(dev))
++		return;
++
++	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
++	if (IS_GEN6(dev))
++		I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_SNB));
++	else
++		I915_WRITE(ARB_MODE, ARB_MODE_ENABLE(ARB_MODE_SWIZZLE_IVB));
++}
++
++void i915_gem_init_ppgtt(struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	uint32_t pd_offset;
++	struct intel_ring_buffer *ring;
++	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
++	uint32_t __iomem *pd_addr;
++	uint32_t pd_entry;
++	int i;
++
++	if (!dev_priv->mm.aliasing_ppgtt)
++		return;
++
++
++	pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
++	for (i = 0; i < ppgtt->num_pd_entries; i++) {
++		dma_addr_t pt_addr;
++
++		if (dev_priv->mm.gtt->needs_dmar)
++			pt_addr = ppgtt->pt_dma_addr[i];
++		else
++			pt_addr = page_to_phys(ppgtt->pt_pages[i]);
++
++		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
++		pd_entry |= GEN6_PDE_VALID;
++
++		writel(pd_entry, pd_addr + i);
++	}
++	readl(pd_addr);
++
++	pd_offset = ppgtt->pd_offset;
++	pd_offset /= 64; /* in cachelines, */
++	pd_offset <<= 16;
++
++	if (INTEL_INFO(dev)->gen == 6) {
++		uint32_t ecochk = I915_READ(GAM_ECOCHK);
++		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
++				       ECOCHK_PPGTT_CACHE64B);
++		I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
++	} else if (INTEL_INFO(dev)->gen >= 7) {
++		I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
++		/* GFX_MODE is per-ring on gen7+ */
++	}
++
++	for (i = 0; i < I915_NUM_RINGS; i++) {
++		ring = &dev_priv->ring[i];
++
++		if (INTEL_INFO(dev)->gen >= 7)
++			I915_WRITE(RING_MODE_GEN7(ring),
++				   GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
++
++		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
++		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
++	}
++}
++
+ int
+-i915_gem_init_ringbuffer(struct drm_device *dev)
++i915_gem_init_hw(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	int ret;
+ 
++	i915_gem_init_swizzling(dev);
++
+ 	ret = intel_init_render_ring_buffer(dev);
+ 	if (ret)
+ 		return ret;
+@@ -3807,6 +3836,8 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
+ 
+ 	dev_priv->next_seqno = 1;
+ 
++	i915_gem_init_ppgtt(dev);
++
+ 	return 0;
+ 
+ cleanup_bsd_ring:
+@@ -3844,7 +3875,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ 	mutex_lock(&dev->struct_mutex);
+ 	dev_priv->mm.suspended = 0;
+ 
+-	ret = i915_gem_init_ringbuffer(dev);
++	ret = i915_gem_init_hw(dev);
+ 	if (ret != 0) {
+ 		mutex_unlock(&dev->struct_mutex);
+ 		return ret;
+@@ -4239,7 +4270,7 @@ rescan:
+ 		 * This has a dramatic impact to reduce the number of
+ 		 * OOM-killer events whilst running the GPU aggressively.
+ 		 */
+-		if (i915_gpu_idle(dev) == 0)
++		if (i915_gpu_idle(dev, true) == 0)
+ 			goto rescan;
+ 	}
+ 	mutex_unlock(&dev->struct_mutex);
+diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
+index ead5d00..21a8271 100644
+--- a/drivers/gpu/drm/i915/i915_gem_evict.c
++++ b/drivers/gpu/drm/i915/i915_gem_evict.c
+@@ -36,7 +36,6 @@ static bool
+ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+ {
+ 	list_add(&obj->exec_list, unwind);
+-	drm_gem_object_reference(&obj->base);
+ 	return drm_mm_scan_add_block(obj->gtt_space);
+ }
+ 
+@@ -49,21 +48,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
+ 	struct drm_i915_gem_object *obj;
+ 	int ret = 0;
+ 
+-	i915_gem_retire_requests(dev);
+-
+-	/* Re-check for free space after retiring requests */
+-	if (mappable) {
+-		if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+-						min_size, alignment, 0,
+-						dev_priv->mm.gtt_mappable_end,
+-						0))
+-			return 0;
+-	} else {
+-		if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+-				       min_size, alignment, 0))
+-			return 0;
+-	}
+-
+ 	trace_i915_gem_evict(dev, min_size, alignment, mappable);
+ 
+ 	/*
+@@ -139,7 +123,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
+ 		BUG_ON(ret);
+ 
+ 		list_del_init(&obj->exec_list);
+-		drm_gem_object_unreference(&obj->base);
+ 	}
+ 
+ 	/* We expect the caller to unpin, evict all and try again, or give up.
+@@ -158,10 +141,10 @@ found:
+ 				       exec_list);
+ 		if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ 			list_move(&obj->exec_list, &eviction_list);
++			drm_gem_object_reference(&obj->base);
+ 			continue;
+ 		}
+ 		list_del_init(&obj->exec_list);
+-		drm_gem_object_unreference(&obj->base);
+ 	}
+ 
+ 	/* Unbinding will emit any required flushes */
+@@ -195,7 +178,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
+ 	trace_i915_gem_evict_everything(dev, purgeable_only);
+ 
+ 	/* Flush everything (on to the inactive lists) and evict */
+-	ret = i915_gpu_idle(dev);
++	ret = i915_gpu_idle(dev, true);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index 1202198..d4417e3 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -203,9 +203,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
+ 	cd->invalidate_domains |= invalidate_domains;
+ 	cd->flush_domains |= flush_domains;
+ 	if (flush_domains & I915_GEM_GPU_DOMAINS)
+-		cd->flush_rings |= obj->ring->id;
++		cd->flush_rings |= intel_ring_flag(obj->ring);
+ 	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+-		cd->flush_rings |= ring->id;
++		cd->flush_rings |= intel_ring_flag(ring);
+ }
+ 
+ struct eb_objects {
+@@ -287,14 +287,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ 	 * exec_object list, so it should have a GTT space bound by now.
+ 	 */
+ 	if (unlikely(target_offset == 0)) {
+-		DRM_ERROR("No GTT space found for object %d\n",
++		DRM_DEBUG("No GTT space found for object %d\n",
+ 			  reloc->target_handle);
+ 		return ret;
+ 	}
+ 
+ 	/* Validate that the target is in a valid r/w GPU domain */
+ 	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+-		DRM_ERROR("reloc with multiple write domains: "
++		DRM_DEBUG("reloc with multiple write domains: "
+ 			  "obj %p target %d offset %d "
+ 			  "read %08x write %08x",
+ 			  obj, reloc->target_handle,
+@@ -303,8 +303,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ 			  reloc->write_domain);
+ 		return ret;
+ 	}
+-	if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
+-		DRM_ERROR("reloc with read/write CPU domains: "
++	if (unlikely((reloc->write_domain | reloc->read_domains)
++		     & ~I915_GEM_GPU_DOMAINS)) {
++		DRM_DEBUG("reloc with read/write non-GPU domains: "
+ 			  "obj %p target %d offset %d "
+ 			  "read %08x write %08x",
+ 			  obj, reloc->target_handle,
+@@ -315,7 +316,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ 	}
+ 	if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
+ 		     reloc->write_domain != target_obj->pending_write_domain)) {
+-		DRM_ERROR("Write domain conflict: "
++		DRM_DEBUG("Write domain conflict: "
+ 			  "obj %p target %d offset %d "
+ 			  "new %08x old %08x\n",
+ 			  obj, reloc->target_handle,
+@@ -336,7 +337,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ 
+ 	/* Check that the relocation address is valid... */
+ 	if (unlikely(reloc->offset > obj->base.size - 4)) {
+-		DRM_ERROR("Relocation beyond object bounds: "
++		DRM_DEBUG("Relocation beyond object bounds: "
+ 			  "obj %p target %d offset %d size %d.\n",
+ 			  obj, reloc->target_handle,
+ 			  (int) reloc->offset,
+@@ -344,7 +345,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ 		return ret;
+ 	}
+ 	if (unlikely(reloc->offset & 3)) {
+-		DRM_ERROR("Relocation not 4-byte aligned: "
++		DRM_DEBUG("Relocation not 4-byte aligned: "
+ 			  "obj %p target %d offset %d.\n",
+ 			  obj, reloc->target_handle,
+ 			  (int) reloc->offset);
+@@ -461,11 +462,60 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
+ 	return ret;
+ }
+ 
++#define  __EXEC_OBJECT_HAS_FENCE (1<<31)
++
++static int
++pin_and_fence_object(struct drm_i915_gem_object *obj,
++		     struct intel_ring_buffer *ring)
++{
++	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
++	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
++	bool need_fence, need_mappable;
++	int ret;
++
++	need_fence =
++		has_fenced_gpu_access &&
++		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
++		obj->tiling_mode != I915_TILING_NONE;
++	need_mappable =
++		entry->relocation_count ? true : need_fence;
++
++	ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
++	if (ret)
++		return ret;
++
++	if (has_fenced_gpu_access) {
++		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
++			if (obj->tiling_mode) {
++				ret = i915_gem_object_get_fence(obj, ring);
++				if (ret)
++					goto err_unpin;
++
++				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
++				i915_gem_object_pin_fence(obj);
++			} else {
++				ret = i915_gem_object_put_fence(obj);
++				if (ret)
++					goto err_unpin;
++			}
++			obj->pending_fenced_gpu_access = true;
++		}
++	}
++
++	entry->offset = obj->gtt_offset;
++	return 0;
++
++err_unpin:
++	i915_gem_object_unpin(obj);
++	return ret;
++}
++
+ static int
+ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ 			    struct drm_file *file,
+ 			    struct list_head *objects)
+ {
++	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ 	struct drm_i915_gem_object *obj;
+ 	int ret, retry;
+ 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+@@ -518,6 +568,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ 		list_for_each_entry(obj, objects, exec_list) {
+ 			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+ 			bool need_fence, need_mappable;
++
+ 			if (!obj->gtt_space)
+ 				continue;
+ 
+@@ -532,59 +583,55 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ 			    (need_mappable && !obj->map_and_fenceable))
+ 				ret = i915_gem_object_unbind(obj);
+ 			else
+-				ret = i915_gem_object_pin(obj,
+-							  entry->alignment,
+-							  need_mappable);
++				ret = pin_and_fence_object(obj, ring);
+ 			if (ret)
+ 				goto err;
+-
+-			entry++;
+ 		}
+ 
+ 		/* Bind fresh objects */
+ 		list_for_each_entry(obj, objects, exec_list) {
+-			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+-			bool need_fence;
++			if (obj->gtt_space)
++				continue;
+ 
+-			need_fence =
+-				has_fenced_gpu_access &&
+-				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+-				obj->tiling_mode != I915_TILING_NONE;
++			ret = pin_and_fence_object(obj, ring);
++			if (ret) {
++				int ret_ignore;
++
++				/* This can potentially raise a harmless
++				 * -EINVAL if we failed to bind in the above
++				 * call. It cannot raise -EINTR since we know
++				 * that the bo is freshly bound and so will
++				 * not need to be flushed or waited upon.
++				 */
++				ret_ignore = i915_gem_object_unbind(obj);
++				(void)ret_ignore;
++				WARN_ON(obj->gtt_space);
++				break;
++			}
++		}
+ 
+-			if (!obj->gtt_space) {
+-				bool need_mappable =
+-					entry->relocation_count ? true : need_fence;
++		/* Decrement pin count for bound objects */
++		list_for_each_entry(obj, objects, exec_list) {
++			struct drm_i915_gem_exec_object2 *entry;
+ 
+-				ret = i915_gem_object_pin(obj,
+-							  entry->alignment,
+-							  need_mappable);
+-				if (ret)
+-					break;
+-			}
++			if (!obj->gtt_space)
++				continue;
+ 
+-			if (has_fenced_gpu_access) {
+-				if (need_fence) {
+-					ret = i915_gem_object_get_fence(obj, ring);
+-					if (ret)
+-						break;
+-				} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+-					   obj->tiling_mode == I915_TILING_NONE) {
+-					/* XXX pipelined! */
+-					ret = i915_gem_object_put_fence(obj);
+-					if (ret)
+-						break;
+-				}
+-				obj->pending_fenced_gpu_access =
+-					!!(entry->flags & EXEC_OBJECT_NEEDS_FENCE);
++			entry = obj->exec_entry;
++			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
++				i915_gem_object_unpin_fence(obj);
++				entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
+ 			}
+ 
+-			entry->offset = obj->gtt_offset;
+-		}
++			i915_gem_object_unpin(obj);
+ 
+-		/* Decrement pin count for bound objects */
+-		list_for_each_entry(obj, objects, exec_list) {
+-			if (obj->gtt_space)
+-				i915_gem_object_unpin(obj);
++			/* ... and ensure ppgtt mapping exist if needed. */
++			if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
++				i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
++						       obj, obj->cache_level);
++
++				obj->has_aliasing_ppgtt_mapping = 1;
++			}
+ 		}
+ 
+ 		if (ret != -ENOSPC || retry > 1)
+@@ -601,16 +648,19 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ 	} while (1);
+ 
+ err:
+-	obj = list_entry(obj->exec_list.prev,
+-			 struct drm_i915_gem_object,
+-			 exec_list);
+-	while (objects != &obj->exec_list) {
+-		if (obj->gtt_space)
+-			i915_gem_object_unpin(obj);
++	list_for_each_entry_continue_reverse(obj, objects, exec_list) {
++		struct drm_i915_gem_exec_object2 *entry;
++
++		if (!obj->gtt_space)
++			continue;
+ 
+-		obj = list_entry(obj->exec_list.prev,
+-				 struct drm_i915_gem_object,
+-				 exec_list);
++		entry = obj->exec_entry;
++		if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
++			i915_gem_object_unpin_fence(obj);
++			entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
++		}
++
++		i915_gem_object_unpin(obj);
+ 	}
+ 
+ 	return ret;
+@@ -657,6 +707,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ 	total = 0;
+ 	for (i = 0; i < count; i++) {
+ 		struct drm_i915_gem_relocation_entry __user *user_relocs;
++		u64 invalid_offset = (u64)-1;
++		int j;
+ 
+ 		user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+ 
+@@ -667,6 +719,25 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ 			goto err;
+ 		}
+ 
++		/* As we do not update the known relocation offsets after
++		 * relocating (due to the complexities in lock handling),
++		 * we need to mark them as invalid now so that we force the
++		 * relocation processing next time. Just in case the target
++		 * object is evicted and then rebound into its old
++		 * presumed_offset before the next execbuffer - if that
++		 * happened we would make the mistake of assuming that the
++		 * relocations were valid.
++		 */
++		for (j = 0; j < exec[i].relocation_count; j++) {
++			if (copy_to_user(&user_relocs[j].presumed_offset,
++					 &invalid_offset,
++					 sizeof(invalid_offset))) {
++				ret = -EFAULT;
++				mutex_lock(&dev->struct_mutex);
++				goto err;
++			}
++		}
++
+ 		reloc_offset[i] = total;
+ 		total += exec[i].relocation_count;
+ 	}
+@@ -683,7 +754,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ 		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ 							exec[i].handle));
+ 		if (&obj->base == NULL) {
+-			DRM_ERROR("Invalid object handle %d at index %d\n",
++			DRM_DEBUG("Invalid object handle %d at index %d\n",
+ 				   exec[i].handle, i);
+ 			ret = -ENOENT;
+ 			goto err;
+@@ -972,6 +1043,31 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+ }
+ 
+ static int
++i915_reset_gen7_sol_offsets(struct drm_device *dev,
++			    struct intel_ring_buffer *ring)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret, i;
++
++	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
++		return 0;
++
++	ret = intel_ring_begin(ring, 4 * 3);
++	if (ret)
++		return ret;
++
++	for (i = 0; i < 4; i++) {
++		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
++		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
++		intel_ring_emit(ring, 0);
++	}
++
++	intel_ring_advance(ring);
++
++	return 0;
++}
++
++static int
+ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 		       struct drm_file *file,
+ 		       struct drm_i915_gem_execbuffer2 *args,
+@@ -989,7 +1085,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 	int ret, mode, i;
+ 
+ 	if (!i915_gem_check_execbuffer(args)) {
+-		DRM_ERROR("execbuf with invalid offset/length\n");
++		DRM_DEBUG("execbuf with invalid offset/length\n");
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1004,20 +1100,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 		break;
+ 	case I915_EXEC_BSD:
+ 		if (!HAS_BSD(dev)) {
+-			DRM_ERROR("execbuf with invalid ring (BSD)\n");
++			DRM_DEBUG("execbuf with invalid ring (BSD)\n");
+ 			return -EINVAL;
+ 		}
+ 		ring = &dev_priv->ring[VCS];
+ 		break;
+ 	case I915_EXEC_BLT:
+ 		if (!HAS_BLT(dev)) {
+-			DRM_ERROR("execbuf with invalid ring (BLT)\n");
++			DRM_DEBUG("execbuf with invalid ring (BLT)\n");
+ 			return -EINVAL;
+ 		}
+ 		ring = &dev_priv->ring[BCS];
+ 		break;
+ 	default:
+-		DRM_ERROR("execbuf with unknown ring: %d\n",
++		DRM_DEBUG("execbuf with unknown ring: %d\n",
+ 			  (int)(args->flags & I915_EXEC_RING_MASK));
+ 		return -EINVAL;
+ 	}
+@@ -1043,18 +1139,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 		}
+ 		break;
+ 	default:
+-		DRM_ERROR("execbuf with unknown constants: %d\n", mode);
++		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (args->buffer_count < 1) {
+-		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
++		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (args->num_cliprects != 0) {
+ 		if (ring != &dev_priv->ring[RCS]) {
+-			DRM_ERROR("clip rectangles are only valid with the render ring\n");
++			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+ 			return -EINVAL;
+ 		}
+ 
+@@ -1104,7 +1200,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ 							exec[i].handle));
+ 		if (&obj->base == NULL) {
+-			DRM_ERROR("Invalid object handle %d at index %d\n",
++			DRM_DEBUG("Invalid object handle %d at index %d\n",
+ 				   exec[i].handle, i);
+ 			/* prevent error path from reading uninitialized data */
+ 			ret = -ENOENT;
+@@ -1112,7 +1208,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 		}
+ 
+ 		if (!list_empty(&obj->exec_list)) {
+-			DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
++			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
+ 				   obj, exec[i].handle, i);
+ 			ret = -EINVAL;
+ 			goto err;
+@@ -1150,7 +1246,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 
+ 	/* Set the pending read domains for the batch buffer to COMMAND */
+ 	if (batch_obj->base.pending_write_domain) {
+-		DRM_ERROR("Attempting to use self-modifying batch buffer\n");
++		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+ 		ret = -EINVAL;
+ 		goto err;
+ 	}
+@@ -1167,7 +1263,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 			 * so every billion or so execbuffers, we need to stall
+ 			 * the GPU in order to reset the counters.
+ 			 */
+-			ret = i915_gpu_idle(dev);
++			ret = i915_gpu_idle(dev, true);
+ 			if (ret)
+ 				goto err;
+ 
+@@ -1190,6 +1286,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ 		dev_priv->relative_constants_mode = mode;
+ 	}
+ 
++	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
++		ret = i915_reset_gen7_sol_offsets(dev, ring);
++		if (ret)
++			goto err;
++	}
++
+ 	trace_i915_gem_ring_dispatch(ring, seqno);
+ 
+ 	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+@@ -1249,7 +1351,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 	int ret, i;
+ 
+ 	if (args->buffer_count < 1) {
+-		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
++		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1257,7 +1359,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ 	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ 	if (exec_list == NULL || exec2_list == NULL) {
+-		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
++		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+ 			  args->buffer_count);
+ 		drm_free_large(exec_list);
+ 		drm_free_large(exec2_list);
+@@ -1268,7 +1370,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 			     (uintptr_t) args->buffers_ptr,
+ 			     sizeof(*exec_list) * args->buffer_count);
+ 	if (ret != 0) {
+-		DRM_ERROR("copy %d exec entries failed %d\n",
++		DRM_DEBUG("copy %d exec entries failed %d\n",
+ 			  args->buffer_count, ret);
+ 		drm_free_large(exec_list);
+ 		drm_free_large(exec2_list);
+@@ -1309,7 +1411,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 				   sizeof(*exec_list) * args->buffer_count);
+ 		if (ret) {
+ 			ret = -EFAULT;
+-			DRM_ERROR("failed to copy %d exec entries "
++			DRM_DEBUG("failed to copy %d exec entries "
+ 				  "back to user (%d)\n",
+ 				  args->buffer_count, ret);
+ 		}
+@@ -1330,7 +1432,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ 
+ 	if (args->buffer_count < 1 ||
+ 	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
+-		DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
++		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -1340,7 +1442,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ 		exec2_list = drm_malloc_ab(sizeof(*exec2_list),
+ 					   args->buffer_count);
+ 	if (exec2_list == NULL) {
+-		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
++		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
+ 			  args->buffer_count);
+ 		return -ENOMEM;
+ 	}
+@@ -1349,7 +1451,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ 			     (uintptr_t) args->buffers_ptr,
+ 			     sizeof(*exec2_list) * args->buffer_count);
+ 	if (ret != 0) {
+-		DRM_ERROR("copy %d exec entries failed %d\n",
++		DRM_DEBUG("copy %d exec entries failed %d\n",
+ 			  args->buffer_count, ret);
+ 		drm_free_large(exec2_list);
+ 		return -EFAULT;
+@@ -1364,7 +1466,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ 				   sizeof(*exec2_list) * args->buffer_count);
+ 		if (ret) {
+ 			ret = -EFAULT;
+-			DRM_ERROR("failed to copy %d exec entries "
++			DRM_DEBUG("failed to copy %d exec entries "
+ 				  "back to user (%d)\n",
+ 				  args->buffer_count, ret);
+ 		}
+diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
+index 6042c5e..a135c61 100644
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -29,6 +29,270 @@
+ #include "i915_trace.h"
+ #include "intel_drv.h"
+ 
++/* PPGTT support for Sandybdrige/Gen6 and later */
++static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
++				   unsigned first_entry,
++				   unsigned num_entries)
++{
++	uint32_t *pt_vaddr;
++	uint32_t scratch_pte;
++	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
++	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
++	unsigned last_pte, i;
++
++	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
++	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
++
++	while (num_entries) {
++		last_pte = first_pte + num_entries;
++		if (last_pte > I915_PPGTT_PT_ENTRIES)
++			last_pte = I915_PPGTT_PT_ENTRIES;
++
++		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
++
++		for (i = first_pte; i < last_pte; i++)
++			pt_vaddr[i] = scratch_pte;
++
++		kunmap_atomic(pt_vaddr);
++
++		num_entries -= last_pte - first_pte;
++		first_pte = 0;
++		act_pd++;
++	}
++}
++
++int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct i915_hw_ppgtt *ppgtt;
++	unsigned first_pd_entry_in_global_pt;
++	int i;
++	int ret = -ENOMEM;
++
++	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
++	 * entries. For aliasing ppgtt support we just steal them at the end for
++	 * now. */
++	first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
++
++	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
++	if (!ppgtt)
++		return ret;
++
++	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
++	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
++				  GFP_KERNEL);
++	if (!ppgtt->pt_pages)
++		goto err_ppgtt;
++
++	for (i = 0; i < ppgtt->num_pd_entries; i++) {
++		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
++		if (!ppgtt->pt_pages[i])
++			goto err_pt_alloc;
++	}
++
++	if (dev_priv->mm.gtt->needs_dmar) {
++		ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
++						*ppgtt->num_pd_entries,
++					     GFP_KERNEL);
++		if (!ppgtt->pt_dma_addr)
++			goto err_pt_alloc;
++	}
++
++	for (i = 0; i < ppgtt->num_pd_entries; i++) {
++		dma_addr_t pt_addr;
++		if (dev_priv->mm.gtt->needs_dmar) {
++			pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
++					       0, 4096,
++					       PCI_DMA_BIDIRECTIONAL);
++
++			if (pci_dma_mapping_error(dev->pdev,
++						  pt_addr)) {
++				ret = -EIO;
++				goto err_pd_pin;
++
++			}
++			ppgtt->pt_dma_addr[i] = pt_addr;
++		} else
++			pt_addr = page_to_phys(ppgtt->pt_pages[i]);
++	}
++
++	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
++
++	i915_ppgtt_clear_range(ppgtt, 0,
++			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
++
++	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
++
++	dev_priv->mm.aliasing_ppgtt = ppgtt;
++
++	return 0;
++
++err_pd_pin:
++	if (ppgtt->pt_dma_addr) {
++		for (i--; i >= 0; i--)
++			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
++				       4096, PCI_DMA_BIDIRECTIONAL);
++	}
++err_pt_alloc:
++	kfree(ppgtt->pt_dma_addr);
++	for (i = 0; i < ppgtt->num_pd_entries; i++) {
++		if (ppgtt->pt_pages[i])
++			__free_page(ppgtt->pt_pages[i]);
++	}
++	kfree(ppgtt->pt_pages);
++err_ppgtt:
++	kfree(ppgtt);
++
++	return ret;
++}
++
++void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
++	int i;
++
++	if (!ppgtt)
++		return;
++
++	if (ppgtt->pt_dma_addr) {
++		for (i = 0; i < ppgtt->num_pd_entries; i++)
++			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
++				       4096, PCI_DMA_BIDIRECTIONAL);
++	}
++
++	kfree(ppgtt->pt_dma_addr);
++	for (i = 0; i < ppgtt->num_pd_entries; i++)
++		__free_page(ppgtt->pt_pages[i]);
++	kfree(ppgtt->pt_pages);
++	kfree(ppgtt);
++}
++
++static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
++					 struct scatterlist *sg_list,
++					 unsigned sg_len,
++					 unsigned first_entry,
++					 uint32_t pte_flags)
++{
++	uint32_t *pt_vaddr, pte;
++	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
++	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
++	unsigned i, j, m, segment_len;
++	dma_addr_t page_addr;
++	struct scatterlist *sg;
++
++	/* init sg walking */
++	sg = sg_list;
++	i = 0;
++	segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
++	m = 0;
++
++	while (i < sg_len) {
++		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
++
++		for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
++			page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
++			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
++			pt_vaddr[j] = pte | pte_flags;
++
++			/* grab the next page */
++			m++;
++			if (m == segment_len) {
++				sg = sg_next(sg);
++				i++;
++				if (i == sg_len)
++					break;
++
++				segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
++				m = 0;
++			}
++		}
++
++		kunmap_atomic(pt_vaddr);
++
++		first_pte = 0;
++		act_pd++;
++	}
++}
++
++static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
++				    unsigned first_entry, unsigned num_entries,
++				    struct page **pages, uint32_t pte_flags)
++{
++	uint32_t *pt_vaddr, pte;
++	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
++	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
++	unsigned last_pte, i;
++	dma_addr_t page_addr;
++
++	while (num_entries) {
++		last_pte = first_pte + num_entries;
++		last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
++
++		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
++
++		for (i = first_pte; i < last_pte; i++) {
++			page_addr = page_to_phys(*pages);
++			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
++			pt_vaddr[i] = pte | pte_flags;
++
++			pages++;
++		}
++
++		kunmap_atomic(pt_vaddr);
++
++		num_entries -= last_pte - first_pte;
++		first_pte = 0;
++		act_pd++;
++	}
++}
++
++void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
++			    struct drm_i915_gem_object *obj,
++			    enum i915_cache_level cache_level)
++{
++	struct drm_device *dev = obj->base.dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	uint32_t pte_flags = GEN6_PTE_VALID;
++
++	switch (cache_level) {
++	case I915_CACHE_LLC_MLC:
++		pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
++		break;
++	case I915_CACHE_LLC:
++		pte_flags |= GEN6_PTE_CACHE_LLC;
++		break;
++	case I915_CACHE_NONE:
++		pte_flags |= GEN6_PTE_UNCACHED;
++		break;
++	default:
++		BUG();
++	}
++
++	if (dev_priv->mm.gtt->needs_dmar) {
++		BUG_ON(!obj->sg_list);
++
++		i915_ppgtt_insert_sg_entries(ppgtt,
++					     obj->sg_list,
++					     obj->num_sg,
++					     obj->gtt_space->start >> PAGE_SHIFT,
++					     pte_flags);
++	} else
++		i915_ppgtt_insert_pages(ppgtt,
++					obj->gtt_space->start >> PAGE_SHIFT,
++					obj->base.size >> PAGE_SHIFT,
++					obj->pages,
++					pte_flags);
++}
++
++void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
++			      struct drm_i915_gem_object *obj)
++{
++	i915_ppgtt_clear_range(ppgtt,
++			       obj->gtt_space->start >> PAGE_SHIFT,
++			       obj->base.size >> PAGE_SHIFT);
++}
++
+ /* XXX kill agp_type! */
+ static unsigned int cache_level_to_agp_type(struct drm_device *dev,
+ 					    enum i915_cache_level cache_level)
+@@ -55,7 +319,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
+ 
+ 	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
+ 		dev_priv->mm.interruptible = false;
+-		if (i915_gpu_idle(dev_priv->dev)) {
++		if (i915_gpu_idle(dev_priv->dev, false)) {
+ 			DRM_ERROR("Couldn't idle GPU\n");
+ 			/* Wait a bit, in hopes it avoids the hang */
+ 			udelay(10);
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 861223b..1a93066 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -93,8 +93,23 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+ 	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ 
+ 	if (INTEL_INFO(dev)->gen >= 6) {
+-		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+-		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++		uint32_t dimm_c0, dimm_c1;
++		dimm_c0 = I915_READ(MAD_DIMM_C0);
++		dimm_c1 = I915_READ(MAD_DIMM_C1);
++		dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
++		dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
++		/* Enable swizzling when the channels are populated with
++		 * identically sized dimms. We don't need to check the 3rd
++		 * channel because no cpu with gpu attached ships in that
++		 * configuration. Also, swizzling only makes sense for 2
++		 * channels anyway. */
++		if (dimm_c0 == dimm_c1) {
++			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
++			swizzle_y = I915_BIT_6_SWIZZLE_9;
++		} else {
++			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
++			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
++		}
+ 	} else if (IS_GEN5(dev)) {
+ 		/* On Ironlake whatever DRAM config, GPU always do
+ 		 * same swizzling setup.
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 93e74fb..8bca2d2 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -716,7 +716,6 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
+ 	reloc_offset = src->gtt_offset;
+ 	for (page = 0; page < page_count; page++) {
+ 		unsigned long flags;
+-		void __iomem *s;
+ 		void *d;
+ 
+ 		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+@@ -724,10 +723,29 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
+ 			goto unwind;
+ 
+ 		local_irq_save(flags);
+-		s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+-					     reloc_offset);
+-		memcpy_fromio(d, s, PAGE_SIZE);
+-		io_mapping_unmap_atomic(s);
++		if (reloc_offset < dev_priv->mm.gtt_mappable_end) {
++			void __iomem *s;
++
++			/* Simply ignore tiling or any overlapping fence.
++			 * It's part of the error state, and this hopefully
++			 * captures what the GPU read.
++			 */
++
++			s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
++						     reloc_offset);
++			memcpy_fromio(d, s, PAGE_SIZE);
++			io_mapping_unmap_atomic(s);
++		} else {
++			void *s;
++
++			drm_clflush_pages(&src->pages[page], 1);
++
++			s = kmap_atomic(src->pages[page]);
++			memcpy(d, s, PAGE_SIZE);
++			kunmap_atomic(s);
++
++			drm_clflush_pages(&src->pages[page], 1);
++		}
+ 		local_irq_restore(flags);
+ 
+ 		dst->pages[page] = d;
+@@ -766,11 +784,11 @@ i915_error_state_free(struct drm_device *dev,
+ {
+ 	int i;
+ 
+-	for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
+-		i915_error_object_free(error->batchbuffer[i]);
+-
+-	for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
+-		i915_error_object_free(error->ringbuffer[i]);
++	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
++		i915_error_object_free(error->ring[i].batchbuffer);
++		i915_error_object_free(error->ring[i].ringbuffer);
++		kfree(error->ring[i].requests);
++	}
+ 
+ 	kfree(error->active_bo);
+ 	kfree(error->overlay);
+@@ -800,7 +818,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
+ 		err->tiling = obj->tiling_mode;
+ 		err->dirty = obj->dirty;
+ 		err->purgeable = obj->madv != I915_MADV_WILLNEED;
+-		err->ring = obj->ring ? obj->ring->id : 0;
++		err->ring = obj->ring ? obj->ring->id : -1;
+ 		err->cache_level = obj->cache_level;
+ 
+ 		if (++i == count)
+@@ -872,6 +890,92 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+ 	return NULL;
+ }
+ 
++static void i915_record_ring_state(struct drm_device *dev,
++				   struct drm_i915_error_state *error,
++				   struct intel_ring_buffer *ring)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++
++	if (INTEL_INFO(dev)->gen >= 6) {
++		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
++		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
++		error->semaphore_mboxes[ring->id][0]
++			= I915_READ(RING_SYNC_0(ring->mmio_base));
++		error->semaphore_mboxes[ring->id][1]
++			= I915_READ(RING_SYNC_1(ring->mmio_base));
++	}
++
++	if (INTEL_INFO(dev)->gen >= 4) {
++		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
++		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
++		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
++		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
++		if (ring->id == RCS) {
++			error->instdone1 = I915_READ(INSTDONE1);
++			error->bbaddr = I915_READ64(BB_ADDR);
++		}
++	} else {
++		error->ipeir[ring->id] = I915_READ(IPEIR);
++		error->ipehr[ring->id] = I915_READ(IPEHR);
++		error->instdone[ring->id] = I915_READ(INSTDONE);
++	}
++
++	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
++	error->seqno[ring->id] = ring->get_seqno(ring);
++	error->acthd[ring->id] = intel_ring_get_active_head(ring);
++	error->head[ring->id] = I915_READ_HEAD(ring);
++	error->tail[ring->id] = I915_READ_TAIL(ring);
++
++	error->cpu_ring_head[ring->id] = ring->head;
++	error->cpu_ring_tail[ring->id] = ring->tail;
++}
++
++static void i915_gem_record_rings(struct drm_device *dev,
++				  struct drm_i915_error_state *error)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct drm_i915_gem_request *request;
++	int i, count;
++
++	for (i = 0; i < I915_NUM_RINGS; i++) {
++		struct intel_ring_buffer *ring = &dev_priv->ring[i];
++
++		if (ring->obj == NULL)
++			continue;
++
++		i915_record_ring_state(dev, error, ring);
++
++		error->ring[i].batchbuffer =
++			i915_error_first_batchbuffer(dev_priv, ring);
++
++		error->ring[i].ringbuffer =
++			i915_error_object_create(dev_priv, ring->obj);
++
++		count = 0;
++		list_for_each_entry(request, &ring->request_list, list)
++			count++;
++
++		error->ring[i].num_requests = count;
++		error->ring[i].requests =
++			kmalloc(count*sizeof(struct drm_i915_error_request),
++				GFP_ATOMIC);
++		if (error->ring[i].requests == NULL) {
++			error->ring[i].num_requests = 0;
++			continue;
++		}
++
++		count = 0;
++		list_for_each_entry(request, &ring->request_list, list) {
++			struct drm_i915_error_request *erq;
++
++			erq = &error->ring[i].requests[count++];
++			erq->seqno = request->seqno;
++			erq->jiffies = request->emitted_jiffies;
++			erq->tail = request->tail;
++		}
++	}
++}
++
+ /**
+  * i915_capture_error_state - capture an error record for later analysis
+  * @dev: drm device
+@@ -896,7 +1000,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ 		return;
+ 
+ 	/* Account for pipe specific data like PIPE*STAT */
+-	error = kmalloc(sizeof(*error), GFP_ATOMIC);
++	error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ 	if (!error) {
+ 		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ 		return;
+@@ -905,59 +1009,18 @@ static void i915_capture_error_state(struct drm_device *dev)
+ 	DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
+ 		 dev->primary->index);
+ 
+-	error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
+ 	error->eir = I915_READ(EIR);
+ 	error->pgtbl_er = I915_READ(PGTBL_ER);
+ 	for_each_pipe(pipe)
+ 		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+-	error->instpm = I915_READ(INSTPM);
+-	error->error = 0;
++
+ 	if (INTEL_INFO(dev)->gen >= 6) {
+ 		error->error = I915_READ(ERROR_GEN6);
+-
+-		error->bcs_acthd = I915_READ(BCS_ACTHD);
+-		error->bcs_ipehr = I915_READ(BCS_IPEHR);
+-		error->bcs_ipeir = I915_READ(BCS_IPEIR);
+-		error->bcs_instdone = I915_READ(BCS_INSTDONE);
+-		error->bcs_seqno = 0;
+-		if (dev_priv->ring[BCS].get_seqno)
+-			error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
+-
+-		error->vcs_acthd = I915_READ(VCS_ACTHD);
+-		error->vcs_ipehr = I915_READ(VCS_IPEHR);
+-		error->vcs_ipeir = I915_READ(VCS_IPEIR);
+-		error->vcs_instdone = I915_READ(VCS_INSTDONE);
+-		error->vcs_seqno = 0;
+-		if (dev_priv->ring[VCS].get_seqno)
+-			error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
++		error->done_reg = I915_READ(DONE_REG);
+ 	}
+-	if (INTEL_INFO(dev)->gen >= 4) {
+-		error->ipeir = I915_READ(IPEIR_I965);
+-		error->ipehr = I915_READ(IPEHR_I965);
+-		error->instdone = I915_READ(INSTDONE_I965);
+-		error->instps = I915_READ(INSTPS);
+-		error->instdone1 = I915_READ(INSTDONE1);
+-		error->acthd = I915_READ(ACTHD_I965);
+-		error->bbaddr = I915_READ64(BB_ADDR);
+-	} else {
+-		error->ipeir = I915_READ(IPEIR);
+-		error->ipehr = I915_READ(IPEHR);
+-		error->instdone = I915_READ(INSTDONE);
+-		error->acthd = I915_READ(ACTHD);
+-		error->bbaddr = 0;
+-	}
+-	i915_gem_record_fences(dev, error);
+ 
+-	/* Record the active batch and ring buffers */
+-	for (i = 0; i < I915_NUM_RINGS; i++) {
+-		error->batchbuffer[i] =
+-			i915_error_first_batchbuffer(dev_priv,
+-						     &dev_priv->ring[i]);
+-
+-		error->ringbuffer[i] =
+-			i915_error_object_create(dev_priv,
+-						 dev_priv->ring[i].obj);
+-	}
++	i915_gem_record_fences(dev, error);
++	i915_gem_record_rings(dev, error);
+ 
+ 	/* Record buffers on the active and pinned lists. */
+ 	error->active_bo = NULL;
+@@ -1013,11 +1076,12 @@ void i915_destroy_error_state(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_i915_error_state *error;
++	unsigned long flags;
+ 
+-	spin_lock(&dev_priv->error_lock);
++	spin_lock_irqsave(&dev_priv->error_lock, flags);
+ 	error = dev_priv->first_error;
+ 	dev_priv->first_error = NULL;
+-	spin_unlock(&dev_priv->error_lock);
++	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ 
+ 	if (error)
+ 		i915_error_state_free(dev, error);
+@@ -1187,9 +1251,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+ 	spin_lock_irqsave(&dev->event_lock, flags);
+ 	work = intel_crtc->unpin_work;
+ 
+-	if (work == NULL ||
+-	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
+-	    !work->enable_stall_check) {
++	if (work == NULL || work->pending || !work->enable_stall_check) {
+ 		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
+ 		spin_unlock_irqrestore(&dev->event_lock, flags);
+ 		return;
+@@ -1203,7 +1265,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
+ 	} else {
+ 		int dspaddr = DSPADDR(intel_crtc->plane);
+ 		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+-							crtc->y * crtc->fb->pitch +
++							crtc->y * crtc->fb->pitches[0] +
+ 							crtc->x * crtc->fb->bits_per_pixel/8);
+ 	}
+ 
+@@ -1647,13 +1709,6 @@ static bool kick_ring(struct intel_ring_buffer *ring)
+ 		I915_WRITE_CTL(ring, tmp);
+ 		return true;
+ 	}
+-	if (IS_GEN6(dev) &&
+-	    (tmp & RING_WAIT_SEMAPHORE)) {
+-		DRM_ERROR("Kicking stuck semaphore on %s\n",
+-			  ring->name);
+-		I915_WRITE_CTL(ring, tmp);
+-		return true;
+-	}
+ 	return false;
+ }
+ 
+@@ -1703,6 +1758,7 @@ void i915_hangcheck_elapsed(unsigned long data)
+ 	    dev_priv->last_instdone1 == instdone1) {
+ 		if (dev_priv->hangcheck_count++ > 1) {
+ 			DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
++			i915_handle_error(dev, true);
+ 
+ 			if (!IS_GEN2(dev)) {
+ 				/* Is the chip hanging on a WAIT_FOR_EVENT?
+@@ -1710,7 +1766,6 @@ void i915_hangcheck_elapsed(unsigned long data)
+ 				 * and break the hang. This should work on
+ 				 * all but the second generation chipsets.
+ 				 */
+-
+ 				if (kick_ring(&dev_priv->ring[RCS]))
+ 					goto repeat;
+ 
+@@ -1723,7 +1778,6 @@ void i915_hangcheck_elapsed(unsigned long data)
+ 					goto repeat;
+ 			}
+ 
+-			i915_handle_error(dev, true);
+ 			return;
+ 		}
+ 	} else {
+@@ -1756,17 +1810,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
+ 		INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+ 
+ 	I915_WRITE(HWSTAM, 0xeffe);
+-	if (IS_GEN6(dev) || IS_GEN7(dev)) {
+-		/* Workaround stalls observed on Sandy Bridge GPUs by
+-		 * making the blitter command streamer generate a
+-		 * write to the Hardware Status Page for
+-		 * MI_USER_INTERRUPT.  This appears to serialize the
+-		 * previous seqno write out before the interrupt
+-		 * happens.
+-		 */
+-		I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+-		I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
+-	}
+ 
+ 	/* XXX hotplug from PCH */
+ 
+diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
+deleted file mode 100644
+index cc8f6d4..0000000
+--- a/drivers/gpu/drm/i915/i915_mem.c
++++ /dev/null
+@@ -1,387 +0,0 @@
+-/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
+- */
+-/*
+- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+- * All Rights Reserved.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the
+- * "Software"), to deal in the Software without restriction, including
+- * without limitation the rights to use, copy, modify, merge, publish,
+- * distribute, sub license, and/or sell copies of the Software, and to
+- * permit persons to whom the Software is furnished to do so, subject to
+- * the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the
+- * next paragraph) shall be included in all copies or substantial portions
+- * of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+- *
+- */
+-
+-#include "drmP.h"
+-#include "drm.h"
+-#include "i915_drm.h"
+-#include "i915_drv.h"
+-
+-/* This memory manager is integrated into the global/local lru
+- * mechanisms used by the clients.  Specifically, it operates by
+- * setting the 'in_use' fields of the global LRU to indicate whether
+- * this region is privately allocated to a client.
+- *
+- * This does require the client to actually respect that field.
+- *
+- * Currently no effort is made to allocate 'private' memory in any
+- * clever way - the LRU information isn't used to determine which
+- * block to allocate, and the ring is drained prior to allocations --
+- * in other words allocation is expensive.
+- */
+-static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+-	drm_i915_sarea_t *sarea_priv = master_priv->sarea_priv;
+-	struct drm_tex_region *list;
+-	unsigned shift, nr;
+-	unsigned start;
+-	unsigned end;
+-	unsigned i;
+-	int age;
+-
+-	shift = dev_priv->tex_lru_log_granularity;
+-	nr = I915_NR_TEX_REGIONS;
+-
+-	start = p->start >> shift;
+-	end = (p->start + p->size - 1) >> shift;
+-
+-	age = ++sarea_priv->texAge;
+-	list = sarea_priv->texList;
+-
+-	/* Mark the regions with the new flag and update their age.  Move
+-	 * them to head of list to preserve LRU semantics.
+-	 */
+-	for (i = start; i <= end; i++) {
+-		list[i].in_use = in_use;
+-		list[i].age = age;
+-
+-		/* remove_from_list(i)
+-		 */
+-		list[(unsigned)list[i].next].prev = list[i].prev;
+-		list[(unsigned)list[i].prev].next = list[i].next;
+-
+-		/* insert_at_head(list, i)
+-		 */
+-		list[i].prev = nr;
+-		list[i].next = list[nr].next;
+-		list[(unsigned)list[nr].next].prev = i;
+-		list[nr].next = i;
+-	}
+-}
+-
+-/* Very simple allocator for agp memory, working on a static range
+- * already mapped into each client's address space.
+- */
+-
+-static struct mem_block *split_block(struct mem_block *p, int start, int size,
+-				     struct drm_file *file_priv)
+-{
+-	/* Maybe cut off the start of an existing block */
+-	if (start > p->start) {
+-		struct mem_block *newblock = kmalloc(sizeof(*newblock),
+-						     GFP_KERNEL);
+-		if (!newblock)
+-			goto out;
+-		newblock->start = start;
+-		newblock->size = p->size - (start - p->start);
+-		newblock->file_priv = NULL;
+-		newblock->next = p->next;
+-		newblock->prev = p;
+-		p->next->prev = newblock;
+-		p->next = newblock;
+-		p->size -= newblock->size;
+-		p = newblock;
+-	}
+-
+-	/* Maybe cut off the end of an existing block */
+-	if (size < p->size) {
+-		struct mem_block *newblock = kmalloc(sizeof(*newblock),
+-						     GFP_KERNEL);
+-		if (!newblock)
+-			goto out;
+-		newblock->start = start + size;
+-		newblock->size = p->size - size;
+-		newblock->file_priv = NULL;
+-		newblock->next = p->next;
+-		newblock->prev = p;
+-		p->next->prev = newblock;
+-		p->next = newblock;
+-		p->size = size;
+-	}
+-
+-      out:
+-	/* Our block is in the middle */
+-	p->file_priv = file_priv;
+-	return p;
+-}
+-
+-static struct mem_block *alloc_block(struct mem_block *heap, int size,
+-				     int align2, struct drm_file *file_priv)
+-{
+-	struct mem_block *p;
+-	int mask = (1 << align2) - 1;
+-
+-	for (p = heap->next; p != heap; p = p->next) {
+-		int start = (p->start + mask) & ~mask;
+-		if (p->file_priv == NULL && start + size <= p->start + p->size)
+-			return split_block(p, start, size, file_priv);
+-	}
+-
+-	return NULL;
+-}
+-
+-static struct mem_block *find_block(struct mem_block *heap, int start)
+-{
+-	struct mem_block *p;
+-
+-	for (p = heap->next; p != heap; p = p->next)
+-		if (p->start == start)
+-			return p;
+-
+-	return NULL;
+-}
+-
+-static void free_block(struct mem_block *p)
+-{
+-	p->file_priv = NULL;
+-
+-	/* Assumes a single contiguous range.  Needs a special file_priv in
+-	 * 'heap' to stop it being subsumed.
+-	 */
+-	if (p->next->file_priv == NULL) {
+-		struct mem_block *q = p->next;
+-		p->size += q->size;
+-		p->next = q->next;
+-		p->next->prev = p;
+-		kfree(q);
+-	}
+-
+-	if (p->prev->file_priv == NULL) {
+-		struct mem_block *q = p->prev;
+-		q->size += p->size;
+-		q->next = p->next;
+-		q->next->prev = q;
+-		kfree(p);
+-	}
+-}
+-
+-/* Initialize.  How to check for an uninitialized heap?
+- */
+-static int init_heap(struct mem_block **heap, int start, int size)
+-{
+-	struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
+-
+-	if (!blocks)
+-		return -ENOMEM;
+-
+-	*heap = kmalloc(sizeof(**heap), GFP_KERNEL);
+-	if (!*heap) {
+-		kfree(blocks);
+-		return -ENOMEM;
+-	}
+-
+-	blocks->start = start;
+-	blocks->size = size;
+-	blocks->file_priv = NULL;
+-	blocks->next = blocks->prev = *heap;
+-
+-	memset(*heap, 0, sizeof(**heap));
+-	(*heap)->file_priv = (struct drm_file *) -1;
+-	(*heap)->next = (*heap)->prev = blocks;
+-	return 0;
+-}
+-
+-/* Free all blocks associated with the releasing file.
+- */
+-void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
+-		      struct mem_block *heap)
+-{
+-	struct mem_block *p;
+-
+-	if (!heap || !heap->next)
+-		return;
+-
+-	for (p = heap->next; p != heap; p = p->next) {
+-		if (p->file_priv == file_priv) {
+-			p->file_priv = NULL;
+-			mark_block(dev, p, 0);
+-		}
+-	}
+-
+-	/* Assumes a single contiguous range.  Needs a special file_priv in
+-	 * 'heap' to stop it being subsumed.
+-	 */
+-	for (p = heap->next; p != heap; p = p->next) {
+-		while (p->file_priv == NULL && p->next->file_priv == NULL) {
+-			struct mem_block *q = p->next;
+-			p->size += q->size;
+-			p->next = q->next;
+-			p->next->prev = p;
+-			kfree(q);
+-		}
+-	}
+-}
+-
+-/* Shutdown.
+- */
+-void i915_mem_takedown(struct mem_block **heap)
+-{
+-	struct mem_block *p;
+-
+-	if (!*heap)
+-		return;
+-
+-	for (p = (*heap)->next; p != *heap;) {
+-		struct mem_block *q = p;
+-		p = p->next;
+-		kfree(q);
+-	}
+-
+-	kfree(*heap);
+-	*heap = NULL;
+-}
+-
+-static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
+-{
+-	switch (region) {
+-	case I915_MEM_REGION_AGP:
+-		return &dev_priv->agp_heap;
+-	default:
+-		return NULL;
+-	}
+-}
+-
+-/* IOCTL HANDLERS */
+-
+-int i915_mem_alloc(struct drm_device *dev, void *data,
+-		   struct drm_file *file_priv)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	drm_i915_mem_alloc_t *alloc = data;
+-	struct mem_block *block, **heap;
+-
+-	if (!dev_priv) {
+-		DRM_ERROR("called with no initialization\n");
+-		return -EINVAL;
+-	}
+-
+-	heap = get_heap(dev_priv, alloc->region);
+-	if (!heap || !*heap)
+-		return -EFAULT;
+-
+-	/* Make things easier on ourselves: all allocations at least
+-	 * 4k aligned.
+-	 */
+-	if (alloc->alignment < 12)
+-		alloc->alignment = 12;
+-
+-	block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
+-
+-	if (!block)
+-		return -ENOMEM;
+-
+-	mark_block(dev, block, 1);
+-
+-	if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+-			     sizeof(int))) {
+-		DRM_ERROR("copy_to_user\n");
+-		return -EFAULT;
+-	}
+-
+-	return 0;
+-}
+-
+-int i915_mem_free(struct drm_device *dev, void *data,
+-		  struct drm_file *file_priv)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	drm_i915_mem_free_t *memfree = data;
+-	struct mem_block *block, **heap;
+-
+-	if (!dev_priv) {
+-		DRM_ERROR("called with no initialization\n");
+-		return -EINVAL;
+-	}
+-
+-	heap = get_heap(dev_priv, memfree->region);
+-	if (!heap || !*heap)
+-		return -EFAULT;
+-
+-	block = find_block(*heap, memfree->region_offset);
+-	if (!block)
+-		return -EFAULT;
+-
+-	if (block->file_priv != file_priv)
+-		return -EPERM;
+-
+-	mark_block(dev, block, 0);
+-	free_block(block);
+-	return 0;
+-}
+-
+-int i915_mem_init_heap(struct drm_device *dev, void *data,
+-		       struct drm_file *file_priv)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	drm_i915_mem_init_heap_t *initheap = data;
+-	struct mem_block **heap;
+-
+-	if (!dev_priv) {
+-		DRM_ERROR("called with no initialization\n");
+-		return -EINVAL;
+-	}
+-
+-	heap = get_heap(dev_priv, initheap->region);
+-	if (!heap)
+-		return -EFAULT;
+-
+-	if (*heap) {
+-		DRM_ERROR("heap already initialized?");
+-		return -EFAULT;
+-	}
+-
+-	return init_heap(heap, initheap->start, initheap->size);
+-}
+-
+-int i915_mem_destroy_heap(struct drm_device *dev, void *data,
+-			   struct drm_file *file_priv)
+-{
+-	drm_i915_private_t *dev_priv = dev->dev_private;
+-	drm_i915_mem_destroy_heap_t *destroyheap = data;
+-	struct mem_block **heap;
+-
+-	if (!dev_priv) {
+-		DRM_ERROR("called with no initialization\n");
+-		return -EINVAL;
+-	}
+-
+-	heap = get_heap(dev_priv, destroyheap->region);
+-	if (!heap) {
+-		DRM_ERROR("get_heap failed");
+-		return -EFAULT;
+-	}
+-
+-	if (!*heap) {
+-		DRM_ERROR("heap not initialized?");
+-		return -EFAULT;
+-	}
+-
+-	i915_mem_takedown(heap);
+-	return 0;
+-}
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 7a10f5f..dde62bf 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -27,6 +27,8 @@
+ 
+ #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+ 
++#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
++
+ /*
+  * The Bridge device's PCI config space has information about the
+  * fb aperture size and the amount of pre-reserved memory.
+@@ -86,12 +88,45 @@
+ #define   GEN6_MBC_SNPCR_LOW	(2<<21)
+ #define   GEN6_MBC_SNPCR_MIN	(3<<21) /* only 1/16th of the cache is shared */
+ 
++#define GEN6_MBCTL		0x0907c
++#define   GEN6_MBCTL_ENABLE_BOOT_FETCH	(1 << 4)
++#define   GEN6_MBCTL_CTX_FETCH_NEEDED	(1 << 3)
++#define   GEN6_MBCTL_BME_UPDATE_ENABLE	(1 << 2)
++#define   GEN6_MBCTL_MAE_UPDATE_ENABLE	(1 << 1)
++#define   GEN6_MBCTL_BOOT_FETCH_MECH	(1 << 0)
++
+ #define GEN6_GDRST	0x941c
+ #define  GEN6_GRDOM_FULL		(1 << 0)
+ #define  GEN6_GRDOM_RENDER		(1 << 1)
+ #define  GEN6_GRDOM_MEDIA		(1 << 2)
+ #define  GEN6_GRDOM_BLT			(1 << 3)
+ 
++/* PPGTT stuff */
++#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
++
++#define GEN6_PDE_VALID			(1 << 0)
++#define GEN6_PDE_LARGE_PAGE		(2 << 0) /* use 32kb pages */
++/* gen6+ has bit 11-4 for physical addr bit 39-32 */
++#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
++
++#define GEN6_PTE_VALID			(1 << 0)
++#define GEN6_PTE_UNCACHED		(1 << 1)
++#define GEN6_PTE_CACHE_LLC		(2 << 1)
++#define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
++#define GEN6_PTE_CACHE_BITS		(3 << 1)
++#define GEN6_PTE_GFDT			(1 << 3)
++#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
++
++#define RING_PP_DIR_BASE(ring)		((ring)->mmio_base+0x228)
++#define RING_PP_DIR_BASE_READ(ring)	((ring)->mmio_base+0x518)
++#define RING_PP_DIR_DCLV(ring)		((ring)->mmio_base+0x220)
++#define   PP_DIR_DCLV_2G		0xffffffff
++
++#define GAM_ECOCHK			0x4090
++#define   ECOCHK_SNB_BIT		(1<<10)
++#define   ECOCHK_PPGTT_CACHE64B		(0x3<<3)
++#define   ECOCHK_PPGTT_CACHE4B		(0x0<<3)
++
+ /* VGA stuff */
+ 
+ #define VGA_ST01_MDA 0x3ba
+@@ -303,6 +338,12 @@
+ #define FENCE_REG_SANDYBRIDGE_0		0x100000
+ #define   SANDYBRIDGE_FENCE_PITCH_SHIFT	32
+ 
++/* control register for cpu gtt access */
++#define TILECTL				0x101000
++#define   TILECTL_SWZCTL			(1 << 0)
++#define   TILECTL_TLB_PREFETCH_DIS	(1 << 2)
++#define   TILECTL_BACKSNOOP_DIS		(1 << 3)
++
+ /*
+  * Instruction and interrupt control regs
+  */
+@@ -326,7 +367,14 @@
+ #define RING_MAX_IDLE(base)	((base)+0x54)
+ #define RING_HWS_PGA(base)	((base)+0x80)
+ #define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
++#define ARB_MODE		0x04030
++#define   ARB_MODE_SWIZZLE_SNB	(1<<4)
++#define   ARB_MODE_SWIZZLE_IVB	(1<<5)
++#define   ARB_MODE_ENABLE(x)	GFX_MODE_ENABLE(x)
++#define   ARB_MODE_DISABLE(x)	GFX_MODE_DISABLE(x)
+ #define RENDER_HWS_PGA_GEN7	(0x04080)
++#define RING_FAULT_REG(ring)	(0x4094 + 0x100*(ring)->id)
++#define DONE_REG		0x40b0
+ #define BSD_HWS_PGA_GEN7	(0x04180)
+ #define BLT_HWS_PGA_GEN7	(0x04280)
+ #define RING_ACTHD(base)	((base)+0x74)
+@@ -360,6 +408,12 @@
+ #define IPEIR_I965	0x02064
+ #define IPEHR_I965	0x02068
+ #define INSTDONE_I965	0x0206c
++#define RING_IPEIR(base)	((base)+0x64)
++#define RING_IPEHR(base)	((base)+0x68)
++#define RING_INSTDONE(base)	((base)+0x6c)
++#define RING_INSTPS(base)	((base)+0x70)
++#define RING_DMA_FADD(base)	((base)+0x78)
++#define RING_INSTPM(base)	((base)+0xc0)
+ #define INSTPS		0x02070 /* 965+ only */
+ #define INSTDONE1	0x0207c /* 965+ only */
+ #define ACTHD_I965	0x02074
+@@ -373,14 +427,6 @@
+ #define INSTDONE	0x02090
+ #define NOPID		0x02094
+ #define HWSTAM		0x02098
+-#define VCS_INSTDONE	0x1206C
+-#define VCS_IPEIR	0x12064
+-#define VCS_IPEHR	0x12068
+-#define VCS_ACTHD	0x12074
+-#define BCS_INSTDONE	0x2206C
+-#define BCS_IPEIR	0x22064
+-#define BCS_IPEHR	0x22068
+-#define BCS_ACTHD	0x22074
+ 
+ #define ERROR_GEN6	0x040a0
+ 
+@@ -389,6 +435,7 @@
+  * the enables for writing to the corresponding low bit.
+  */
+ #define _3D_CHICKEN	0x02084
++#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB	(1 << 10)
+ #define _3D_CHICKEN2	0x0208c
+ /* Disables pipelining of read flushes past the SF-WIZ interface.
+  * Required on all Ironlake steppings according to the B-Spec, but the
+@@ -399,13 +446,14 @@
+ 
+ #define MI_MODE		0x0209c
+ # define VS_TIMER_DISPATCH				(1 << 6)
+-# define MI_FLUSH_ENABLE				(1 << 11)
++# define MI_FLUSH_ENABLE				(1 << 12)
+ 
+ #define GEN6_GT_MODE	0x20d0
+ #define   GEN6_GT_MODE_HI	(1 << 9)
+ 
+ #define GFX_MODE	0x02520
+ #define GFX_MODE_GEN7	0x0229c
++#define RING_MODE_GEN7(ring)	((ring)->mmio_base+0x29c)
+ #define   GFX_RUN_LIST_ENABLE		(1<<15)
+ #define   GFX_TLB_INVALIDATE_ALWAYS	(1<<13)
+ #define   GFX_SURFACE_FAULT_ENABLE	(1<<12)
+@@ -1064,6 +1112,29 @@
+ #define C0DRB3			0x10206
+ #define C1DRB3			0x10606
+ 
++/** snb MCH registers for reading the DRAM channel configuration */
++#define MAD_DIMM_C0			(MCHBAR_MIRROR_BASE_SNB + 0x5004)
++#define MAD_DIMM_C1			(MCHBAR_MIRROR_BASE_SNB + 0x5008)
++#define MAD_DIMM_C2			(MCHBAR_MIRROR_BASE_SNB + 0x500C)
++#define   MAD_DIMM_ECC_MASK		(0x3 << 24)
++#define   MAD_DIMM_ECC_OFF		(0x0 << 24)
++#define   MAD_DIMM_ECC_IO_ON_LOGIC_OFF	(0x1 << 24)
++#define   MAD_DIMM_ECC_IO_OFF_LOGIC_ON	(0x2 << 24)
++#define   MAD_DIMM_ECC_ON		(0x3 << 24)
++#define   MAD_DIMM_ENH_INTERLEAVE	(0x1 << 22)
++#define   MAD_DIMM_RANK_INTERLEAVE	(0x1 << 21)
++#define   MAD_DIMM_B_WIDTH_X16		(0x1 << 20) /* X8 chips if unset */
++#define   MAD_DIMM_A_WIDTH_X16		(0x1 << 19) /* X8 chips if unset */
++#define   MAD_DIMM_B_DUAL_RANK		(0x1 << 18)
++#define   MAD_DIMM_A_DUAL_RANK		(0x1 << 17)
++#define   MAD_DIMM_A_SELECT		(0x1 << 16)
++/* DIMM sizes are in multiples of 256mb. */
++#define   MAD_DIMM_B_SIZE_SHIFT		8
++#define   MAD_DIMM_B_SIZE_MASK		(0xff << MAD_DIMM_B_SIZE_SHIFT)
++#define   MAD_DIMM_A_SIZE_SHIFT		0
++#define   MAD_DIMM_A_SIZE_MASK		(0xff << MAD_DIMM_A_SIZE_SHIFT)
++
++
+ /* Clocking configuration register */
+ #define CLKCFG			0x10c00
+ #define CLKCFG_FSB_400					(5 << 0)	/* hrawclk 100 */
+@@ -1343,6 +1414,7 @@
+ #define _VSYNC_A		0x60014
+ #define _PIPEASRC	0x6001c
+ #define _BCLRPAT_A	0x60020
++#define _VSYNCSHIFT_A	0x60028
+ 
+ /* Pipe B timing regs */
+ #define _HTOTAL_B	0x61000
+@@ -1353,6 +1425,8 @@
+ #define _VSYNC_B		0x61014
+ #define _PIPEBSRC	0x6101c
+ #define _BCLRPAT_B	0x61020
++#define _VSYNCSHIFT_B	0x61028
++
+ 
+ #define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
+ #define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
+@@ -1361,6 +1435,7 @@
+ #define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
+ #define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+ #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
++#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+ 
+ /* VGA port control */
+ #define ADPA			0x61100
+@@ -1560,10 +1635,6 @@
+ 
+ /* Video Data Island Packet control */
+ #define VIDEO_DIP_DATA		0x61178
+-/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
+- * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
+- * of the infoframe structure specified by CEA-861. */
+-#define   VIDEO_DIP_DATA_SIZE	32
+ #define VIDEO_DIP_CTL		0x61170
+ #define   VIDEO_DIP_ENABLE		(1 << 31)
+ #define   VIDEO_DIP_PORT_B		(1 << 29)
+@@ -2357,9 +2428,21 @@
+ #define   PIPECONF_PALETTE	0
+ #define   PIPECONF_GAMMA		(1<<24)
+ #define   PIPECONF_FORCE_BORDER	(1<<25)
+-#define   PIPECONF_PROGRESSIVE	(0 << 21)
++#define   PIPECONF_INTERLACE_MASK	(7 << 21)
++/* Note that pre-gen3 does not support interlaced display directly. Panel
++ * fitting must be disabled on pre-ilk for interlaced. */
++#define   PIPECONF_PROGRESSIVE			(0 << 21)
++#define   PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL	(4 << 21) /* gen4 only */
++#define   PIPECONF_INTERLACE_W_SYNC_SHIFT	(5 << 21) /* gen4 only */
+ #define   PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+-#define   PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
++#define   PIPECONF_INTERLACE_FIELD_0_ONLY	(7 << 21) /* gen3 only */
++/* Ironlake and later have a complete new set of values for interlaced. PFIT
++ * means panel fitter required, PF means progressive fetch, DBL means power
++ * saving pixel doubling. */
++#define   PIPECONF_PFIT_PF_INTERLACED_ILK	(1 << 21)
++#define   PIPECONF_INTERLACED_ILK		(3 << 21)
++#define   PIPECONF_INTERLACED_DBL_ILK		(4 << 21) /* ilk/snb only */
++#define   PIPECONF_PFIT_PF_INTERLACED_DBL_ILK	(5 << 21) /* ilk/snb only */
+ #define   PIPECONF_CXSR_DOWNCLOCK	(1<<16)
+ #define   PIPECONF_BPP_MASK	(0x000000e0)
+ #define   PIPECONF_BPP_8	(0<<5)
+@@ -2498,6 +2581,8 @@
+ #define WM3_LP_ILK		0x45110
+ #define  WM3_LP_EN		(1<<31)
+ #define WM1S_LP_ILK		0x45120
++#define WM2S_LP_IVB		0x45124
++#define WM3S_LP_IVB		0x45128
+ #define  WM1S_LP_EN		(1<<31)
+ 
+ /* Memory latency timer register */
+@@ -2714,6 +2799,140 @@
+ #define _DSPBSURF		0x7119C
+ #define _DSPBTILEOFF		0x711A4
+ 
++/* Sprite A control */
++#define _DVSACNTR		0x72180
++#define   DVS_ENABLE		(1<<31)
++#define   DVS_GAMMA_ENABLE	(1<<30)
++#define   DVS_PIXFORMAT_MASK	(3<<25)
++#define   DVS_FORMAT_YUV422	(0<<25)
++#define   DVS_FORMAT_RGBX101010	(1<<25)
++#define   DVS_FORMAT_RGBX888	(2<<25)
++#define   DVS_FORMAT_RGBX161616	(3<<25)
++#define   DVS_SOURCE_KEY	(1<<22)
++#define   DVS_RGB_ORDER_XBGR	(1<<20)
++#define   DVS_YUV_BYTE_ORDER_MASK (3<<16)
++#define   DVS_YUV_ORDER_YUYV	(0<<16)
++#define   DVS_YUV_ORDER_UYVY	(1<<16)
++#define   DVS_YUV_ORDER_YVYU	(2<<16)
++#define   DVS_YUV_ORDER_VYUY	(3<<16)
++#define   DVS_DEST_KEY		(1<<2)
++#define   DVS_TRICKLE_FEED_DISABLE (1<<14)
++#define   DVS_TILED		(1<<10)
++#define _DVSALINOFF		0x72184
++#define _DVSASTRIDE		0x72188
++#define _DVSAPOS		0x7218c
++#define _DVSASIZE		0x72190
++#define _DVSAKEYVAL		0x72194
++#define _DVSAKEYMSK		0x72198
++#define _DVSASURF		0x7219c
++#define _DVSAKEYMAXVAL		0x721a0
++#define _DVSATILEOFF		0x721a4
++#define _DVSASURFLIVE		0x721ac
++#define _DVSASCALE		0x72204
++#define   DVS_SCALE_ENABLE	(1<<31)
++#define   DVS_FILTER_MASK	(3<<29)
++#define   DVS_FILTER_MEDIUM	(0<<29)
++#define   DVS_FILTER_ENHANCING	(1<<29)
++#define   DVS_FILTER_SOFTENING	(2<<29)
++#define   DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
++#define   DVS_VERTICAL_OFFSET_ENABLE (1<<27)
++#define _DVSAGAMC		0x72300
++
++#define _DVSBCNTR		0x73180
++#define _DVSBLINOFF		0x73184
++#define _DVSBSTRIDE		0x73188
++#define _DVSBPOS		0x7318c
++#define _DVSBSIZE		0x73190
++#define _DVSBKEYVAL		0x73194
++#define _DVSBKEYMSK		0x73198
++#define _DVSBSURF		0x7319c
++#define _DVSBKEYMAXVAL		0x731a0
++#define _DVSBTILEOFF		0x731a4
++#define _DVSBSURFLIVE		0x731ac
++#define _DVSBSCALE		0x73204
++#define _DVSBGAMC		0x73300
++
++#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
++#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
++#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
++#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
++#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
++#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
++#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
++#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
++#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
++#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
++#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
++
++#define _SPRA_CTL		0x70280
++#define   SPRITE_ENABLE			(1<<31)
++#define   SPRITE_GAMMA_ENABLE		(1<<30)
++#define   SPRITE_PIXFORMAT_MASK		(7<<25)
++#define   SPRITE_FORMAT_YUV422		(0<<25)
++#define   SPRITE_FORMAT_RGBX101010	(1<<25)
++#define   SPRITE_FORMAT_RGBX888		(2<<25)
++#define   SPRITE_FORMAT_RGBX161616	(3<<25)
++#define   SPRITE_FORMAT_YUV444		(4<<25)
++#define   SPRITE_FORMAT_XR_BGR101010	(5<<25) /* Extended range */
++#define   SPRITE_CSC_ENABLE		(1<<24)
++#define   SPRITE_SOURCE_KEY		(1<<22)
++#define   SPRITE_RGB_ORDER_RGBX		(1<<20) /* only for 888 and 161616 */
++#define   SPRITE_YUV_TO_RGB_CSC_DISABLE	(1<<19)
++#define   SPRITE_YUV_CSC_FORMAT_BT709	(1<<18) /* 0 is BT601 */
++#define   SPRITE_YUV_BYTE_ORDER_MASK	(3<<16)
++#define   SPRITE_YUV_ORDER_YUYV		(0<<16)
++#define   SPRITE_YUV_ORDER_UYVY		(1<<16)
++#define   SPRITE_YUV_ORDER_YVYU		(2<<16)
++#define   SPRITE_YUV_ORDER_VYUY		(3<<16)
++#define   SPRITE_TRICKLE_FEED_DISABLE	(1<<14)
++#define   SPRITE_INT_GAMMA_ENABLE	(1<<13)
++#define   SPRITE_TILED			(1<<10)
++#define   SPRITE_DEST_KEY		(1<<2)
++#define _SPRA_LINOFF		0x70284
++#define _SPRA_STRIDE		0x70288
++#define _SPRA_POS		0x7028c
++#define _SPRA_SIZE		0x70290
++#define _SPRA_KEYVAL		0x70294
++#define _SPRA_KEYMSK		0x70298
++#define _SPRA_SURF		0x7029c
++#define _SPRA_KEYMAX		0x702a0
++#define _SPRA_TILEOFF		0x702a4
++#define _SPRA_SCALE		0x70304
++#define   SPRITE_SCALE_ENABLE	(1<<31)
++#define   SPRITE_FILTER_MASK	(3<<29)
++#define   SPRITE_FILTER_MEDIUM	(0<<29)
++#define   SPRITE_FILTER_ENHANCING	(1<<29)
++#define   SPRITE_FILTER_SOFTENING	(2<<29)
++#define   SPRITE_VERTICAL_OFFSET_HALF	(1<<28) /* must be enabled below */
++#define   SPRITE_VERTICAL_OFFSET_ENABLE	(1<<27)
++#define _SPRA_GAMC		0x70400
++
++#define _SPRB_CTL		0x71280
++#define _SPRB_LINOFF		0x71284
++#define _SPRB_STRIDE		0x71288
++#define _SPRB_POS		0x7128c
++#define _SPRB_SIZE		0x71290
++#define _SPRB_KEYVAL		0x71294
++#define _SPRB_KEYMSK		0x71298
++#define _SPRB_SURF		0x7129c
++#define _SPRB_KEYMAX		0x712a0
++#define _SPRB_TILEOFF		0x712a4
++#define _SPRB_SCALE		0x71304
++#define _SPRB_GAMC		0x71400
++
++#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
++#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
++#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
++#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
++#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
++#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
++#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
++#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
++#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
++#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
++#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
++#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
++
+ /* VBIOS regs */
+ #define VGACNTRL		0x71400
+ # define VGA_DISP_DISABLE			(1 << 31)
+@@ -2923,6 +3142,10 @@
+ #define   ILK_DPFC_DIS1		(1<<8)
+ #define   ILK_DPFC_DIS2		(1<<9)
+ 
++#define IVB_CHICKEN3	0x4200c
++# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE	(1 << 5)
++# define CHICKEN3_DGMG_DONE_FIX_DISABLE		(1 << 2)
++
+ #define DISP_ARB_CTL	0x45000
+ #define  DISP_TILE_SURFACE_SWIZZLING	(1<<13)
+ #define  DISP_FBC_WM_DIS		(1<<15)
+@@ -3118,6 +3341,7 @@
+ #define _TRANS_VSYNC_A           0xe0014
+ #define  TRANS_VSYNC_END_SHIFT  16
+ #define  TRANS_VSYNC_START_SHIFT 0
++#define _TRANS_VSYNCSHIFT_A	0xe0028
+ 
+ #define _TRANSA_DATA_M1          0xe0030
+ #define _TRANSA_DATA_N1          0xe0034
+@@ -3148,6 +3372,7 @@
+ #define _TRANS_VTOTAL_B          0xe100c
+ #define _TRANS_VBLANK_B          0xe1010
+ #define _TRANS_VSYNC_B           0xe1014
++#define _TRANS_VSYNCSHIFT_B	 0xe1028
+ 
+ #define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
+ #define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
+@@ -3155,6 +3380,8 @@
+ #define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
+ #define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
+ #define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
++#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
++				     _TRANS_VSYNCSHIFT_B)
+ 
+ #define _TRANSB_DATA_M1          0xe1030
+ #define _TRANSB_DATA_N1          0xe1034
+@@ -3188,7 +3415,10 @@
+ #define  TRANS_FSYNC_DELAY_HB4  (3<<27)
+ #define  TRANS_DP_AUDIO_ONLY    (1<<26)
+ #define  TRANS_DP_VIDEO_AUDIO   (0<<26)
++#define  TRANS_INTERLACE_MASK   (7<<21)
+ #define  TRANS_PROGRESSIVE      (0<<21)
++#define  TRANS_INTERLACED       (3<<21)
++#define  TRANS_LEGACY_INTERLACED_ILK (2<<21)
+ #define  TRANS_8BPC             (0<<5)
+ #define  TRANS_10BPC            (1<<5)
+ #define  TRANS_6BPC             (2<<5)
+@@ -3527,9 +3757,18 @@
+ #define  ECOBUS					0xa180
+ #define    FORCEWAKE_MT_ENABLE			(1<<5)
+ 
++#define  GTFIFODBG				0x120000
++#define    GT_FIFO_CPU_ERROR_MASK		7
++#define    GT_FIFO_OVFERR			(1<<2)
++#define    GT_FIFO_IAWRERR			(1<<1)
++#define    GT_FIFO_IARDERR			(1<<0)
++
+ #define  GT_FIFO_FREE_ENTRIES			0x120008
+ #define    GT_FIFO_NUM_RESERVED_ENTRIES		20
+ 
++#define GEN6_UCGCTL1				0x9400
++# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE		(1 << 5)
++
+ #define GEN6_UCGCTL2				0x9404
+ # define GEN6_RCZUNIT_CLOCK_GATE_DISABLE		(1 << 13)
+ # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE		(1 << 12)
+@@ -3617,6 +3856,14 @@
+ #define GEN6_PCODE_DATA				0x138128
+ #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
+ 
++#define GEN6_GT_CORE_STATUS		0x138060
++#define   GEN6_CORE_CPD_STATE_MASK	(7<<4)
++#define   GEN6_RCn_MASK			7
++#define   GEN6_RC0			0
++#define   GEN6_RC3			2
++#define   GEN6_RC6			3
++#define   GEN6_RC7			4
++
+ #define G4X_AUD_VID_DID			0x62020
+ #define INTEL_AUDIO_DEVCL		0x808629FB
+ #define INTEL_AUDIO_DEVBLC		0x80862801
+@@ -3629,17 +3876,35 @@
+ #define G4X_ELD_ACK			(1 << 4)
+ #define G4X_HDMIW_HDMIEDID		0x6210C
+ 
+-#define GEN5_HDMIW_HDMIEDID_A		0xE2050
+-#define GEN5_AUD_CNTL_ST_A		0xE20B4
+-#define GEN5_ELD_BUFFER_SIZE		(0x1f << 10)
+-#define GEN5_ELD_ADDRESS		(0x1f << 5)
+-#define GEN5_ELD_ACK			(1 << 4)
+-#define GEN5_AUD_CNTL_ST2		0xE20C0
+-#define GEN5_ELD_VALIDB			(1 << 0)
+-#define GEN5_CP_READYB			(1 << 1)
+-
+-#define GEN7_HDMIW_HDMIEDID_A		0xE5050
+-#define GEN7_AUD_CNTRL_ST_A		0xE50B4
+-#define GEN7_AUD_CNTRL_ST2		0xE50C0
++#define IBX_HDMIW_HDMIEDID_A		0xE2050
++#define IBX_AUD_CNTL_ST_A		0xE20B4
++#define IBX_ELD_BUFFER_SIZE		(0x1f << 10)
++#define IBX_ELD_ADDRESS			(0x1f << 5)
++#define IBX_ELD_ACK			(1 << 4)
++#define IBX_AUD_CNTL_ST2		0xE20C0
++#define IBX_ELD_VALIDB			(1 << 0)
++#define IBX_CP_READYB			(1 << 1)
++
++#define CPT_HDMIW_HDMIEDID_A		0xE5050
++#define CPT_AUD_CNTL_ST_A		0xE50B4
++#define CPT_AUD_CNTRL_ST2		0xE50C0
++
++/* These are the 4 32-bit write offset registers for each stream
++ * output buffer.  It determines the offset from the
++ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
++ */
++#define GEN7_SO_WRITE_OFFSET(n)		(0x5280 + (n) * 4)
++
++#define IBX_AUD_CONFIG_A			0xe2000
++#define CPT_AUD_CONFIG_A			0xe5000
++#define   AUD_CONFIG_N_VALUE_INDEX		(1 << 29)
++#define   AUD_CONFIG_N_PROG_ENABLE		(1 << 28)
++#define   AUD_CONFIG_UPPER_N_SHIFT		20
++#define   AUD_CONFIG_UPPER_N_VALUE		(0xff << 20)
++#define   AUD_CONFIG_LOWER_N_SHIFT		4
++#define   AUD_CONFIG_LOWER_N_VALUE		(0xfff << 4)
++#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT	16
++#define   AUD_CONFIG_PIXEL_CLOCK_HDMI		(0xf << 16)
++#define   AUD_CONFIG_DISABLE_NCTS		(1 << 3)
+ 
+ #endif /* _I915_REG_H_ */
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index f38d196..0d13778 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -28,6 +28,7 @@
+ #include "drm.h"
+ #include "i915_drm.h"
+ #include "intel_drv.h"
++#include "i915_reg.h"
+ 
+ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+ {
+@@ -39,7 +40,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+ 		return false;
+ 
+ 	if (HAS_PCH_SPLIT(dev))
+-		dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
++		dpll_reg = PCH_DPLL(pipe);
+ 	else
+ 		dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
+index cb91210..bae3edf 100644
+--- a/drivers/gpu/drm/i915/intel_acpi.c
++++ b/drivers/gpu/drm/i915/intel_acpi.c
+@@ -208,7 +208,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+ 
+ 	ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
+ 	if (ret < 0) {
+-		DRM_ERROR("failed to get supported _DSM functions\n");
++		DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
+ 		return false;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 0016fee..a2c9e56 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -605,7 +605,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
+ 		DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ 		return;
+ 	}
+-	dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
++	dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
+ 	if (!dev_priv->child_dev) {
+ 		DRM_DEBUG_KMS("No memory space for child device\n");
+ 		return;
+@@ -719,7 +719,7 @@ intel_parse_bios(struct drm_device *dev)
+ 		}
+ 
+ 		if (!vbt) {
+-			DRM_ERROR("VBT signature missing\n");
++			DRM_DEBUG_DRIVER("VBT signature missing\n");
+ 			pci_unmap_rom(pdev, bios);
+ 			return -1;
+ 		}
+diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
+index 8af3735..dbda6e3 100644
+--- a/drivers/gpu/drm/i915/intel_bios.h
++++ b/drivers/gpu/drm/i915/intel_bios.h
+@@ -467,8 +467,12 @@ struct edp_link_params {
+ struct bdb_edp {
+ 	struct edp_power_seq power_seqs[16];
+ 	u32 color_depth;
+-	u32 sdrrs_msa_timing_delay;
+ 	struct edp_link_params link_params[16];
++	u32 sdrrs_msa_timing_delay;
++
++	/* ith bit indicates enabled/disabled for (i+1)th panel */
++	u16 edp_s3d_feature;
++	u16 edp_t3_optimization;
+ } __attribute__ ((packed));
+ 
+ void intel_setup_bios(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index fee0ad0..342ffb7 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -24,6 +24,7 @@
+  *	Eric Anholt <eric at anholt.net>
+  */
+ 
++#include <linux/dmi.h>
+ #include <linux/i2c.h>
+ #include <linux/slab.h>
+ #include "drmP.h"
+@@ -265,6 +266,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+ 	return ret;
+ }
+ 
++static struct edid *intel_crt_get_edid(struct drm_connector *connector,
++				struct i2c_adapter *i2c)
++{
++	struct edid *edid;
++
++	edid = drm_get_edid(connector, i2c);
++
++	if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
++		DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
++		intel_gmbus_force_bit(i2c, true);
++		edid = drm_get_edid(connector, i2c);
++		intel_gmbus_force_bit(i2c, false);
++	}
++
++	return edid;
++}
++
++/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
++static int intel_crt_ddc_get_modes(struct drm_connector *connector,
++				struct i2c_adapter *adapter)
++{
++	struct edid *edid;
++
++	edid = intel_crt_get_edid(connector, adapter);
++	if (!edid)
++		return 0;
++
++	return intel_connector_update_modes(connector, edid);
++}
++
+ static bool intel_crt_detect_ddc(struct drm_connector *connector)
+ {
+ 	struct intel_crt *crt = intel_attached_crt(connector);
+@@ -278,7 +309,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
+ 		struct edid *edid;
+ 		bool is_digital = false;
+ 
+-		edid = drm_get_edid(connector,
++		edid = intel_crt_get_edid(connector,
+ 			&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ 		/*
+ 		 * This may be a DVI-I connector with a shared DDC
+@@ -429,8 +460,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
+ {
+ 	struct drm_device *dev = connector->dev;
+ 	struct intel_crt *crt = intel_attached_crt(connector);
+-	struct drm_crtc *crtc;
+ 	enum drm_connector_status status;
++	struct intel_load_detect_pipe tmp;
+ 
+ 	if (I915_HAS_HOTPLUG(dev)) {
+ 		if (intel_crt_detect_hotplug(connector)) {
+@@ -449,23 +480,16 @@ intel_crt_detect(struct drm_connector *connector, bool force)
+ 		return connector->status;
+ 
+ 	/* for pre-945g platforms use load detect */
+-	crtc = crt->base.base.crtc;
+-	if (crtc && crtc->enabled) {
+-		status = intel_crt_load_detect(crt);
+-	} else {
+-		struct intel_load_detect_pipe tmp;
+-
+-		if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
+-					       &tmp)) {
+-			if (intel_crt_detect_ddc(connector))
+-				status = connector_status_connected;
+-			else
+-				status = intel_crt_load_detect(crt);
+-			intel_release_load_detect_pipe(&crt->base, connector,
+-						       &tmp);
+-		} else
+-			status = connector_status_unknown;
+-	}
++	if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
++				       &tmp)) {
++		if (intel_crt_detect_ddc(connector))
++			status = connector_status_connected;
++		else
++			status = intel_crt_load_detect(crt);
++		intel_release_load_detect_pipe(&crt->base, connector,
++					       &tmp);
++	} else
++		status = connector_status_unknown;
+ 
+ 	return status;
+ }
+@@ -483,13 +507,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	int ret;
+ 
+-	ret = intel_ddc_get_modes(connector,
++	ret = intel_crt_ddc_get_modes(connector,
+ 				 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ 	if (ret || !IS_G4X(dev))
+ 		return ret;
+ 
+ 	/* Try to probe digital port for output in DVI-I -> VGA mode. */
+-	return intel_ddc_get_modes(connector,
++	return intel_crt_ddc_get_modes(connector,
+ 				   &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
+ }
+ 
+@@ -540,6 +564,24 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
+ 	.destroy = intel_encoder_destroy,
+ };
+ 
++static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
++{
++	DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
++	return 1;
++}
++
++static const struct dmi_system_id intel_no_crt[] = {
++	{
++		.callback = intel_no_crt_dmi_callback,
++		.ident = "ACER ZGB",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
++		},
++	},
++	{ }
++};
++
+ void intel_crt_init(struct drm_device *dev)
+ {
+ 	struct drm_connector *connector;
+@@ -547,6 +589,10 @@ void intel_crt_init(struct drm_device *dev)
+ 	struct intel_connector *intel_connector;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
++	/* Skip machines without VGA that falsely report hotplug events */
++	if (dmi_check_system(intel_no_crt))
++		return;
++
+ 	crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
+ 	if (!crt)
+ 		return;
+@@ -571,7 +617,10 @@ void intel_crt_init(struct drm_device *dev)
+ 				1 << INTEL_ANALOG_CLONE_BIT |
+ 				1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ 	crt->base.crtc_mask = (1 << 0) | (1 << 1);
+-	connector->interlace_allowed = 1;
++	if (IS_GEN2(dev))
++		connector->interlace_allowed = 0;
++	else
++		connector->interlace_allowed = 1;
+ 	connector->doublescan_allowed = 0;
+ 
+ 	drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index fa9639b..3c9b9c5 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -75,7 +75,7 @@ struct intel_limit {
+ 	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
+ 	intel_p2_t	    p2;
+ 	bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
+-			int, int, intel_clock_t *);
++			int, int, intel_clock_t *, intel_clock_t *);
+ };
+ 
+ /* FDI */
+@@ -83,17 +83,21 @@ struct intel_limit {
+ 
+ static bool
+ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+-		    int target, int refclk, intel_clock_t *best_clock);
++		    int target, int refclk, intel_clock_t *match_clock,
++		    intel_clock_t *best_clock);
+ static bool
+ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+-			int target, int refclk, intel_clock_t *best_clock);
++			int target, int refclk, intel_clock_t *match_clock,
++			intel_clock_t *best_clock);
+ 
+ static bool
+ intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
+-		      int target, int refclk, intel_clock_t *best_clock);
++		      int target, int refclk, intel_clock_t *match_clock,
++		      intel_clock_t *best_clock);
+ static bool
+ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
+-			   int target, int refclk, intel_clock_t *best_clock);
++			   int target, int refclk, intel_clock_t *match_clock,
++			   intel_clock_t *best_clock);
+ 
+ static inline u32 /* units of 100MHz */
+ intel_fdi_link_freq(struct drm_device *dev)
+@@ -534,7 +538,8 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
+ 
+ static bool
+ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+-		    int target, int refclk, intel_clock_t *best_clock)
++		    int target, int refclk, intel_clock_t *match_clock,
++		    intel_clock_t *best_clock)
+ 
+ {
+ 	struct drm_device *dev = crtc->dev;
+@@ -580,6 +585,9 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 					if (!intel_PLL_is_valid(dev, limit,
+ 								&clock))
+ 						continue;
++					if (match_clock &&
++					    clock.p != match_clock->p)
++						continue;
+ 
+ 					this_err = abs(clock.dot - target);
+ 					if (this_err < err) {
+@@ -596,7 +604,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 
+ static bool
+ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+-			int target, int refclk, intel_clock_t *best_clock)
++			int target, int refclk, intel_clock_t *match_clock,
++			intel_clock_t *best_clock)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -643,6 +652,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 					if (!intel_PLL_is_valid(dev, limit,
+ 								&clock))
+ 						continue;
++					if (match_clock &&
++					    clock.p != match_clock->p)
++						continue;
+ 
+ 					this_err = abs(clock.dot - target);
+ 					if (this_err < err_most) {
+@@ -660,7 +672,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 
+ static bool
+ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+-			   int target, int refclk, intel_clock_t *best_clock)
++			   int target, int refclk, intel_clock_t *match_clock,
++			   intel_clock_t *best_clock)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	intel_clock_t clock;
+@@ -686,7 +699,8 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ /* DisplayPort has only two frequencies, 162MHz and 270MHz */
+ static bool
+ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+-		      int target, int refclk, intel_clock_t *best_clock)
++		      int target, int refclk, intel_clock_t *match_clock,
++		      intel_clock_t *best_clock)
+ {
+ 	intel_clock_t clock;
+ 	if (target < 200000) {
+@@ -933,13 +947,17 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ 	     pipe_name(pipe));
+ }
+ 
+-static void assert_pipe(struct drm_i915_private *dev_priv,
+-			enum pipe pipe, bool state)
++void assert_pipe(struct drm_i915_private *dev_priv,
++		 enum pipe pipe, bool state)
+ {
+ 	int reg;
+ 	u32 val;
+ 	bool cur_state;
+ 
++	/* if we need the pipe A quirk it must be always on */
++	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
++		state = true;
++
+ 	reg = PIPECONF(pipe);
+ 	val = I915_READ(reg);
+ 	cur_state = !!(val & PIPECONF_ENABLE);
+@@ -947,22 +965,25 @@ static void assert_pipe(struct drm_i915_private *dev_priv,
+ 	     "pipe %c assertion failure (expected %s, current %s)\n",
+ 	     pipe_name(pipe), state_string(state), state_string(cur_state));
+ }
+-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+ 
+-static void assert_plane_enabled(struct drm_i915_private *dev_priv,
+-				 enum plane plane)
++static void assert_plane(struct drm_i915_private *dev_priv,
++			 enum plane plane, bool state)
+ {
+ 	int reg;
+ 	u32 val;
++	bool cur_state;
+ 
+ 	reg = DSPCNTR(plane);
+ 	val = I915_READ(reg);
+-	WARN(!(val & DISPLAY_PLANE_ENABLE),
+-	     "plane %c assertion failure, should be active but is disabled\n",
+-	     plane_name(plane));
++	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
++	WARN(cur_state != state,
++	     "plane %c assertion failure (expected %s, current %s)\n",
++	     plane_name(plane), state_string(state), state_string(cur_state));
+ }
+ 
++#define assert_plane_enabled(d, p) assert_plane(d, p, true)
++#define assert_plane_disabled(d, p) assert_plane(d, p, false)
++
+ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+ 				   enum pipe pipe)
+ {
+@@ -971,8 +992,14 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+ 	int cur_pipe;
+ 
+ 	/* Planes are fixed to pipes on ILK+ */
+-	if (HAS_PCH_SPLIT(dev_priv->dev))
++	if (HAS_PCH_SPLIT(dev_priv->dev)) {
++		reg = DSPCNTR(pipe);
++		val = I915_READ(reg);
++		WARN((val & DISPLAY_PLANE_ENABLE),
++		     "plane %c assertion failure, should be disabled but not\n",
++		     plane_name(pipe));
+ 		return;
++	}
+ 
+ 	/* Need to check both planes against the pipe */
+ 	for (i = 0; i < 2; i++) {
+@@ -1224,7 +1251,8 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
+ 				  enum pipe pipe)
+ {
+ 	int reg;
+-	u32 val;
++	u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
++		pll_sel = TRANSC_DPLL_ENABLE;
+ 
+ 	if (pipe > 1)
+ 		return;
+@@ -1235,6 +1263,15 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
+ 	/* Make sure transcoder isn't still depending on us */
+ 	assert_transcoder_disabled(dev_priv, pipe);
+ 
++	if (pipe == 0)
++		pll_sel |= TRANSC_DPLLA_SEL;
++	else if (pipe == 1)
++		pll_sel |= TRANSC_DPLLB_SEL;
++
++
++	if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
++		return;
++
+ 	reg = PCH_DPLL(pipe);
+ 	val = I915_READ(reg);
+ 	val &= ~DPLL_VCO_ENABLE;
+@@ -1247,7 +1284,8 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
+ 				    enum pipe pipe)
+ {
+ 	int reg;
+-	u32 val;
++	u32 val, pipeconf_val;
++	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+ 
+ 	/* PCH only available on ILK+ */
+ 	BUG_ON(dev_priv->info->gen < 5);
+@@ -1261,6 +1299,7 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
+ 
+ 	reg = TRANSCONF(pipe);
+ 	val = I915_READ(reg);
++	pipeconf_val = I915_READ(PIPECONF(pipe));
+ 
+ 	if (HAS_PCH_IBX(dev_priv->dev)) {
+ 		/*
+@@ -1268,8 +1307,19 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
+ 		 * that in pipeconf reg.
+ 		 */
+ 		val &= ~PIPE_BPC_MASK;
+-		val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
++		val |= pipeconf_val & PIPE_BPC_MASK;
+ 	}
++
++	val &= ~TRANS_INTERLACE_MASK;
++	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
++		if (HAS_PCH_IBX(dev_priv->dev) &&
++		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
++			val |= TRANS_LEGACY_INTERLACED_ILK;
++		else
++			val |= TRANS_INTERLACED;
++	else
++		val |= TRANS_PROGRESSIVE;
++
+ 	I915_WRITE(reg, val | TRANS_ENABLE);
+ 	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ 		DRM_ERROR("failed to enable transcoder %d\n", pipe);
+@@ -1529,8 +1579,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ 	u32 fbc_ctl, fbc_ctl2;
+ 
+ 	cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+-	if (fb->pitch < cfb_pitch)
+-		cfb_pitch = fb->pitch;
++	if (fb->pitches[0] < cfb_pitch)
++		cfb_pitch = fb->pitches[0];
+ 
+ 	/* FBC_CTL wants 64B units */
+ 	cfb_pitch = (cfb_pitch / 64) - 1;
+@@ -2022,6 +2072,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ 		ret = i915_gem_object_get_fence(obj, pipelined);
+ 		if (ret)
+ 			goto err_unpin;
++
++		i915_gem_object_pin_fence(obj);
+ 	}
+ 
+ 	dev_priv->mm.interruptible = true;
+@@ -2034,6 +2086,12 @@ err_interruptible:
+ 	return ret;
+ }
+ 
++void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
++{
++	i915_gem_object_unpin_fence(obj);
++	i915_gem_object_unpin(obj);
++}
++
+ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ 			     int x, int y)
+ {
+@@ -2091,11 +2149,11 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ 	I915_WRITE(reg, dspcntr);
+ 
+ 	Start = obj->gtt_offset;
+-	Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
++	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ 
+ 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+-		      Start, Offset, x, y, fb->pitch);
+-	I915_WRITE(DSPSTRIDE(plane), fb->pitch);
++		      Start, Offset, x, y, fb->pitches[0]);
++	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ 	if (INTEL_INFO(dev)->gen >= 4) {
+ 		I915_WRITE(DSPSURF(plane), Start);
+ 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+@@ -2172,11 +2230,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
+ 	I915_WRITE(reg, dspcntr);
+ 
+ 	Start = obj->gtt_offset;
+-	Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
++	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ 
+ 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+-		      Start, Offset, x, y, fb->pitch);
+-	I915_WRITE(DSPSTRIDE(plane), fb->pitch);
++		      Start, Offset, x, y, fb->pitches[0]);
++	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ 	I915_WRITE(DSPSURF(plane), Start);
+ 	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ 	I915_WRITE(DSPADDR(plane), Offset);
+@@ -2275,7 +2333,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
+ 					 LEAVE_ATOMIC_MODE_SET);
+ 	if (ret) {
+-		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
++		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ 		mutex_unlock(&dev->struct_mutex);
+ 		DRM_ERROR("failed to update base address\n");
+ 		return ret;
+@@ -2283,7 +2341,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 
+ 	if (old_fb) {
+ 		intel_wait_for_vblank(dev, intel_crtc->pipe);
+-		i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
++		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ 	}
+ 
+ 	mutex_unlock(&dev->struct_mutex);
+@@ -2427,9 +2485,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+ 	udelay(150);
+ 
+ 	/* Ironlake workaround, enable clock pointer after FDI enable*/
+-	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+-	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+-		   FDI_RX_PHASE_SYNC_POINTER_EN);
++	if (HAS_PCH_IBX(dev)) {
++		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
++		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
++			   FDI_RX_PHASE_SYNC_POINTER_EN);
++	}
+ 
+ 	reg = FDI_RX_IIR(pipe);
+ 	for (tries = 0; tries < 5; tries++) {
+@@ -2942,6 +3002,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
+ 	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
+ 	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
+ 	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
++	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
+ 
+ 	intel_fdi_normal_train(crtc);
+ 
+@@ -3331,10 +3392,12 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
+ 	struct drm_device *dev = crtc->dev;
+ 
+ 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
++	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
+ 
+ 	if (crtc->fb) {
+ 		mutex_lock(&dev->struct_mutex);
+-		i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
++		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
+ 		mutex_unlock(&dev->struct_mutex);
+ 	}
+ }
+@@ -3408,10 +3471,10 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ 			return false;
+ 	}
+ 
+-	/* XXX some encoders set the crtcinfo, others don't.
+-	 * Obviously we need some form of conflict resolution here...
+-	 */
+-	if (adjusted_mode->crtc_htotal == 0)
++	/* All interlaced capable intel hw wants timings in frames. Note though
++	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
++	 * timings, so we need to be careful not to clobber these.*/
++	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
+ 		drm_mode_set_crtcinfo(adjusted_mode, 0);
+ 
+ 	return true;
+@@ -4527,10 +4590,11 @@ static void ironlake_update_wm(struct drm_device *dev)
+ 	 */
+ }
+ 
+-static void sandybridge_update_wm(struct drm_device *dev)
++void sandybridge_update_wm(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
++	u32 val;
+ 	int fbc_wm, plane_wm, cursor_wm;
+ 	unsigned int enabled;
+ 
+@@ -4539,8 +4603,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
+ 			    &sandybridge_display_wm_info, latency,
+ 			    &sandybridge_cursor_wm_info, latency,
+ 			    &plane_wm, &cursor_wm)) {
+-		I915_WRITE(WM0_PIPEA_ILK,
+-			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
++		val = I915_READ(WM0_PIPEA_ILK);
++		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
++		I915_WRITE(WM0_PIPEA_ILK, val |
++			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ 			      " plane %d, " "cursor: %d\n",
+ 			      plane_wm, cursor_wm);
+@@ -4551,8 +4617,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
+ 			    &sandybridge_display_wm_info, latency,
+ 			    &sandybridge_cursor_wm_info, latency,
+ 			    &plane_wm, &cursor_wm)) {
+-		I915_WRITE(WM0_PIPEB_ILK,
+-			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
++		val = I915_READ(WM0_PIPEB_ILK);
++		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
++		I915_WRITE(WM0_PIPEB_ILK, val |
++			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ 			      " plane %d, cursor: %d\n",
+ 			      plane_wm, cursor_wm);
+@@ -4565,8 +4633,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
+ 			    &sandybridge_display_wm_info, latency,
+ 			    &sandybridge_cursor_wm_info, latency,
+ 			    &plane_wm, &cursor_wm)) {
+-		I915_WRITE(WM0_PIPEC_IVB,
+-			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
++		val = I915_READ(WM0_PIPEC_IVB);
++		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
++		I915_WRITE(WM0_PIPEC_IVB, val |
++			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
+ 		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
+ 			      " plane %d, cursor: %d\n",
+ 			      plane_wm, cursor_wm);
+@@ -4587,7 +4657,8 @@ static void sandybridge_update_wm(struct drm_device *dev)
+ 	I915_WRITE(WM2_LP_ILK, 0);
+ 	I915_WRITE(WM1_LP_ILK, 0);
+ 
+-	if (!single_plane_enabled(enabled))
++	if (!single_plane_enabled(enabled) ||
++	    dev_priv->sprite_scaling_enabled)
+ 		return;
+ 	enabled = ffs(enabled) - 1;
+ 
+@@ -4637,6 +4708,161 @@ static void sandybridge_update_wm(struct drm_device *dev)
+ 		   cursor_wm);
+ }
+ 
++static bool
++sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
++			      uint32_t sprite_width, int pixel_size,
++			      const struct intel_watermark_params *display,
++			      int display_latency_ns, int *sprite_wm)
++{
++	struct drm_crtc *crtc;
++	int clock;
++	int entries, tlb_miss;
++
++	crtc = intel_get_crtc_for_plane(dev, plane);
++	if (crtc->fb == NULL || !crtc->enabled) {
++		*sprite_wm = display->guard_size;
++		return false;
++	}
++
++	clock = crtc->mode.clock;
++
++	/* Use the small buffer method to calculate the sprite watermark */
++	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
++	tlb_miss = display->fifo_size*display->cacheline_size -
++		sprite_width * 8;
++	if (tlb_miss > 0)
++		entries += tlb_miss;
++	entries = DIV_ROUND_UP(entries, display->cacheline_size);
++	*sprite_wm = entries + display->guard_size;
++	if (*sprite_wm > (int)display->max_wm)
++		*sprite_wm = display->max_wm;
++
++	return true;
++}
++
++static bool
++sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
++				uint32_t sprite_width, int pixel_size,
++				const struct intel_watermark_params *display,
++				int latency_ns, int *sprite_wm)
++{
++	struct drm_crtc *crtc;
++	unsigned long line_time_us;
++	int clock;
++	int line_count, line_size;
++	int small, large;
++	int entries;
++
++	if (!latency_ns) {
++		*sprite_wm = 0;
++		return false;
++	}
++
++	crtc = intel_get_crtc_for_plane(dev, plane);
++	clock = crtc->mode.clock;
++	if (!clock) {
++		*sprite_wm = 0;
++		return false;
++	}
++
++	line_time_us = (sprite_width * 1000) / clock;
++	if (!line_time_us) {
++		*sprite_wm = 0;
++		return false;
++	}
++
++	line_count = (latency_ns / line_time_us + 1000) / 1000;
++	line_size = sprite_width * pixel_size;
++
++	/* Use the minimum of the small and large buffer method for primary */
++	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
++	large = line_count * line_size;
++
++	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
++	*sprite_wm = entries + display->guard_size;
++
++	return *sprite_wm > 0x3ff ? false : true;
++}
++
++static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
++					 uint32_t sprite_width, int pixel_size)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
++	u32 val;
++	int sprite_wm, reg;
++	int ret;
++
++	switch (pipe) {
++	case 0:
++		reg = WM0_PIPEA_ILK;
++		break;
++	case 1:
++		reg = WM0_PIPEB_ILK;
++		break;
++	case 2:
++		reg = WM0_PIPEC_IVB;
++		break;
++	default:
++		return; /* bad pipe */
++	}
++
++	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
++					    &sandybridge_display_wm_info,
++					    latency, &sprite_wm);
++	if (!ret) {
++		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
++			      pipe);
++		return;
++	}
++
++	val = I915_READ(reg);
++	val &= ~WM0_PIPE_SPRITE_MASK;
++	I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
++	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
++
++
++	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
++					      pixel_size,
++					      &sandybridge_display_srwm_info,
++					      SNB_READ_WM1_LATENCY() * 500,
++					      &sprite_wm);
++	if (!ret) {
++		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
++			      pipe);
++		return;
++	}
++	I915_WRITE(WM1S_LP_ILK, sprite_wm);
++
++	/* Only IVB has two more LP watermarks for sprite */
++	if (!IS_IVYBRIDGE(dev))
++		return;
++
++	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
++					      pixel_size,
++					      &sandybridge_display_srwm_info,
++					      SNB_READ_WM2_LATENCY() * 500,
++					      &sprite_wm);
++	if (!ret) {
++		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
++			      pipe);
++		return;
++	}
++	I915_WRITE(WM2S_LP_IVB, sprite_wm);
++
++	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
++					      pixel_size,
++					      &sandybridge_display_srwm_info,
++					      SNB_READ_WM3_LATENCY() * 500,
++					      &sprite_wm);
++	if (!ret) {
++		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
++			      pipe);
++		return;
++	}
++	I915_WRITE(WM3S_LP_IVB, sprite_wm);
++}
++
+ /**
+  * intel_update_watermarks - update FIFO watermark values based on current modes
+  *
+@@ -4677,6 +4903,16 @@ static void intel_update_watermarks(struct drm_device *dev)
+ 		dev_priv->display.update_wm(dev);
+ }
+ 
++void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
++				    uint32_t sprite_width, int pixel_size)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->display.update_sprite_wm)
++		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
++						   pixel_size);
++}
++
+ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
+ {
+ 	if (i915_panel_use_ssc >= 0)
+@@ -4824,6 +5060,82 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ 	return display_bpc != bpc;
+ }
+ 
++static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	int refclk;
++
++	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
++	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
++		refclk = dev_priv->lvds_ssc_freq * 1000;
++		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
++			      refclk / 1000);
++	} else if (!IS_GEN2(dev)) {
++		refclk = 96000;
++	} else {
++		refclk = 48000;
++	}
++
++	return refclk;
++}
++
++static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
++				      intel_clock_t *clock)
++{
++	/* SDVO TV has fixed PLL values depend on its clock range,
++	   this mirrors vbios setting. */
++	if (adjusted_mode->clock >= 100000
++	    && adjusted_mode->clock < 140500) {
++		clock->p1 = 2;
++		clock->p2 = 10;
++		clock->n = 3;
++		clock->m1 = 16;
++		clock->m2 = 8;
++	} else if (adjusted_mode->clock >= 140500
++		   && adjusted_mode->clock <= 200000) {
++		clock->p1 = 1;
++		clock->p2 = 10;
++		clock->n = 6;
++		clock->m1 = 12;
++		clock->m2 = 8;
++	}
++}
++
++static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
++				     intel_clock_t *clock,
++				     intel_clock_t *reduced_clock)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	int pipe = intel_crtc->pipe;
++	u32 fp, fp2 = 0;
++
++	if (IS_PINEVIEW(dev)) {
++		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
++		if (reduced_clock)
++			fp2 = (1 << reduced_clock->n) << 16 |
++				reduced_clock->m1 << 8 | reduced_clock->m2;
++	} else {
++		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
++		if (reduced_clock)
++			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
++				reduced_clock->m2;
++	}
++
++	I915_WRITE(FP0(pipe), fp);
++
++	intel_crtc->lowfreq_avail = false;
++	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
++	    reduced_clock && i915_powersave) {
++		I915_WRITE(FP1(pipe), fp2);
++		intel_crtc->lowfreq_avail = true;
++	} else {
++		I915_WRITE(FP1(pipe), fp);
++	}
++}
++
+ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ 			      struct drm_display_mode *mode,
+ 			      struct drm_display_mode *adjusted_mode,
+@@ -4837,7 +5149,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ 	int plane = intel_crtc->plane;
+ 	int refclk, num_connectors = 0;
+ 	intel_clock_t clock, reduced_clock;
+-	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
++	u32 dpll, dspcntr, pipeconf, vsyncshift;
+ 	bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
+ 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
+ 	struct drm_mode_config *mode_config = &dev->mode_config;
+@@ -4878,15 +5190,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ 		num_connectors++;
+ 	}
+ 
+-	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
+-		refclk = dev_priv->lvds_ssc_freq * 1000;
+-		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
+-			      refclk / 1000);
+-	} else if (!IS_GEN2(dev)) {
+-		refclk = 96000;
+-	} else {
+-		refclk = 48000;
+-	}
++	refclk = i9xx_get_refclk(crtc, num_connectors);
+ 
+ 	/*
+ 	 * Returns a set of divisors for the desired target clock with the given
+@@ -4894,7 +5198,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ 	 */
+ 	limit = intel_limit(crtc, refclk);
+-	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
++	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
++			     &clock);
+ 	if (!ok) {
+ 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ 		return -EINVAL;
+@@ -4904,53 +5209,24 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ 	intel_crtc_update_cursor(crtc, true);
+ 
+ 	if (is_lvds && dev_priv->lvds_downclock_avail) {
++		/*
++		 * Ensure we match the reduced clock's P to the target clock.
++		 * If the clocks don't match, we can't switch the display clock
++		 * by using the FP0/FP1. In such case we will disable the LVDS
++		 * downclock feature.
++		*/
+ 		has_reduced_clock = limit->find_pll(limit, crtc,
+ 						    dev_priv->lvds_downclock,
+ 						    refclk,
++						    &clock,
+ 						    &reduced_clock);
+-		if (has_reduced_clock && (clock.p != reduced_clock.p)) {
+-			/*
+-			 * If the different P is found, it means that we can't
+-			 * switch the display clock by using the FP0/FP1.
+-			 * In such case we will disable the LVDS downclock
+-			 * feature.
+-			 */
+-			DRM_DEBUG_KMS("Different P is found for "
+-				      "LVDS clock/downclock\n");
+-			has_reduced_clock = 0;
+-		}
+-	}
+-	/* SDVO TV has fixed PLL values depend on its clock range,
+-	   this mirrors vbios setting. */
+-	if (is_sdvo && is_tv) {
+-		if (adjusted_mode->clock >= 100000
+-		    && adjusted_mode->clock < 140500) {
+-			clock.p1 = 2;
+-			clock.p2 = 10;
+-			clock.n = 3;
+-			clock.m1 = 16;
+-			clock.m2 = 8;
+-		} else if (adjusted_mode->clock >= 140500
+-			   && adjusted_mode->clock <= 200000) {
+-			clock.p1 = 1;
+-			clock.p2 = 10;
+-			clock.n = 6;
+-			clock.m1 = 12;
+-			clock.m2 = 8;
+-		}
+ 	}
+ 
+-	if (IS_PINEVIEW(dev)) {
+-		fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
+-		if (has_reduced_clock)
+-			fp2 = (1 << reduced_clock.n) << 16 |
+-				reduced_clock.m1 << 8 | reduced_clock.m2;
+-	} else {
+-		fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
+-		if (has_reduced_clock)
+-			fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
+-				reduced_clock.m2;
+-	}
++	if (is_sdvo && is_tv)
++		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
++
++	i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
++				 &reduced_clock : NULL);
+ 
+ 	dpll = DPLL_VGA_MODE_DIS;
+ 
+@@ -5024,8 +5300,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ 	/* Set up the display plane register */
+ 	dspcntr = DISPPLANE_GAMMA_ENABLE;
+ 
+-	/* Ironlake's plane is forced to pipe, bit 24 is to
+-	   enable color space conversion */
+ 	if (pipe == 0)
+ 		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
+ 	else
+@@ -5060,7 +5334,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ 	drm_mode_debug_printmodeline(mode);
+ 
+-	I915_WRITE(FP0(pipe), fp);
+ 	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
+ 
+ 	POSTING_READ(DPLL(pipe));
+@@ -5147,33 +5420,32 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ 		I915_WRITE(DPLL(pipe), dpll);
+ 	}
+ 
+-	intel_crtc->lowfreq_avail = false;
+-	if (is_lvds && has_reduced_clock && i915_powersave) {
+-		I915_WRITE(FP1(pipe), fp2);
+-		intel_crtc->lowfreq_avail = true;
+-		if (HAS_PIPE_CXSR(dev)) {
++	if (HAS_PIPE_CXSR(dev)) {
++		if (intel_crtc->lowfreq_avail) {
+ 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+ 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+-		}
+-	} else {
+-		I915_WRITE(FP1(pipe), fp);
+-		if (HAS_PIPE_CXSR(dev)) {
++		} else {
+ 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ 			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ 		}
+ 	}
+ 
+-	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
++	pipeconf &= ~PIPECONF_INTERLACE_MASK;
++	if (!IS_GEN2(dev) &&
++	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ 		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
+ 		/* the chip adds 2 halflines automatically */
+-		adjusted_mode->crtc_vdisplay -= 1;
+ 		adjusted_mode->crtc_vtotal -= 1;
+-		adjusted_mode->crtc_vblank_start -= 1;
+ 		adjusted_mode->crtc_vblank_end -= 1;
+-		adjusted_mode->crtc_vsync_end -= 1;
+-		adjusted_mode->crtc_vsync_start -= 1;
+-	} else
+-		pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
++		vsyncshift = adjusted_mode->crtc_hsync_start
++			     - adjusted_mode->crtc_htotal/2;
++	} else {
++		pipeconf |= PIPECONF_PROGRESSIVE;
++		vsyncshift = 0;
++	}
++
++	if (!IS_GEN3(dev))
++		I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
+ 
+ 	I915_WRITE(HTOTAL(pipe),
+ 		   (adjusted_mode->crtc_hdisplay - 1) |
+@@ -5290,7 +5562,8 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
+ 			DRM_DEBUG_KMS("Using SSC on panel\n");
+ 			temp |= DREF_SSC1_ENABLE;
+-		}
++		} else
++			temp &= ~DREF_SSC1_ENABLE;
+ 
+ 		/* Get SSC going before enabling the outputs */
+ 		I915_WRITE(PCH_DREF_CONTROL, temp);
+@@ -5439,7 +5712,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ 	 */
+ 	limit = intel_limit(crtc, refclk);
+-	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
++	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
++			     &clock);
+ 	if (!ok) {
+ 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ 		return -EINVAL;
+@@ -5449,21 +5723,17 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ 	intel_crtc_update_cursor(crtc, true);
+ 
+ 	if (is_lvds && dev_priv->lvds_downclock_avail) {
++		/*
++		 * Ensure we match the reduced clock's P to the target clock.
++		 * If the clocks don't match, we can't switch the display clock
++		 * by using the FP0/FP1. In such case we will disable the LVDS
++		 * downclock feature.
++		*/
+ 		has_reduced_clock = limit->find_pll(limit, crtc,
+ 						    dev_priv->lvds_downclock,
+ 						    refclk,
++						    &clock,
+ 						    &reduced_clock);
+-		if (has_reduced_clock && (clock.p != reduced_clock.p)) {
+-			/*
+-			 * If the different P is found, it means that we can't
+-			 * switch the display clock by using the FP0/FP1.
+-			 * In such case we will disable the LVDS downclock
+-			 * feature.
+-			 */
+-			DRM_DEBUG_KMS("Different P is found for "
+-				      "LVDS clock/downclock\n");
+-			has_reduced_clock = 0;
+-		}
+ 	}
+ 	/* SDVO TV has fixed PLL values depend on its clock range,
+ 	   this mirrors vbios setting. */
+@@ -5758,17 +6028,19 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ 		}
+ 	}
+ 
++	pipeconf &= ~PIPECONF_INTERLACE_MASK;
+ 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+-		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
++		pipeconf |= PIPECONF_INTERLACED_ILK;
+ 		/* the chip adds 2 halflines automatically */
+-		adjusted_mode->crtc_vdisplay -= 1;
+ 		adjusted_mode->crtc_vtotal -= 1;
+-		adjusted_mode->crtc_vblank_start -= 1;
+ 		adjusted_mode->crtc_vblank_end -= 1;
+-		adjusted_mode->crtc_vsync_end -= 1;
+-		adjusted_mode->crtc_vsync_start -= 1;
+-	} else
+-		pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
++		I915_WRITE(VSYNCSHIFT(pipe),
++			   adjusted_mode->crtc_hsync_start
++			   - adjusted_mode->crtc_htotal/2);
++	} else {
++		pipeconf |= PIPECONF_PROGRESSIVE;
++		I915_WRITE(VSYNCSHIFT(pipe), 0);
++	}
+ 
+ 	I915_WRITE(HTOTAL(pipe),
+ 		   (adjusted_mode->crtc_hdisplay - 1) |
+@@ -5811,12 +6083,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ 
+ 	intel_wait_for_vblank(dev, pipe);
+ 
+-	if (IS_GEN5(dev)) {
+-		/* enable address swizzle for tiling buffer */
+-		temp = I915_READ(DISP_ARB_CTL);
+-		I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
+-	}
+-
+ 	I915_WRITE(DSPCNTR(plane), dspcntr);
+ 	POSTING_READ(DSPCNTR(plane));
+ 
+@@ -5843,14 +6109,45 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 
+ 	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
+ 					      x, y, old_fb);
+-
+ 	drm_vblank_post_modeset(dev, pipe);
+ 
+-	intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
++	if (ret)
++		intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
++	else
++		intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+ 
+ 	return ret;
+ }
+ 
++static bool intel_eld_uptodate(struct drm_connector *connector,
++			       int reg_eldv, uint32_t bits_eldv,
++			       int reg_elda, uint32_t bits_elda,
++			       int reg_edid)
++{
++	struct drm_i915_private *dev_priv = connector->dev->dev_private;
++	uint8_t *eld = connector->eld;
++	uint32_t i;
++
++	i = I915_READ(reg_eldv);
++	i &= bits_eldv;
++
++	if (!eld[0])
++		return !i;
++
++	if (!i)
++		return false;
++
++	i = I915_READ(reg_elda);
++	i &= ~bits_elda;
++	I915_WRITE(reg_elda, i);
++
++	for (i = 0; i < eld[2]; i++)
++		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
++			return false;
++
++	return true;
++}
++
+ static void g4x_write_eld(struct drm_connector *connector,
+ 			  struct drm_crtc *crtc)
+ {
+@@ -5867,6 +6164,12 @@ static void g4x_write_eld(struct drm_connector *connector,
+ 	else
+ 		eldv = G4X_ELDV_DEVCTG;
+ 
++	if (intel_eld_uptodate(connector,
++			       G4X_AUD_CNTL_ST, eldv,
++			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
++			       G4X_HDMIW_HDMIEDID))
++		return;
++
+ 	i = I915_READ(G4X_AUD_CNTL_ST);
+ 	i &= ~(eldv | G4X_ELD_ADDR);
+ 	len = (i >> 9) & 0x1f;		/* ELD buffer size */
+@@ -5894,22 +6197,26 @@ static void ironlake_write_eld(struct drm_connector *connector,
+ 	uint32_t i;
+ 	int len;
+ 	int hdmiw_hdmiedid;
++	int aud_config;
+ 	int aud_cntl_st;
+ 	int aud_cntrl_st2;
+ 
+ 	if (HAS_PCH_IBX(connector->dev)) {
+-		hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A;
+-		aud_cntl_st = GEN5_AUD_CNTL_ST_A;
+-		aud_cntrl_st2 = GEN5_AUD_CNTL_ST2;
++		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
++		aud_config = IBX_AUD_CONFIG_A;
++		aud_cntl_st = IBX_AUD_CNTL_ST_A;
++		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
+ 	} else {
+-		hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
+-		aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
+-		aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
++		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
++		aud_config = CPT_AUD_CONFIG_A;
++		aud_cntl_st = CPT_AUD_CNTL_ST_A;
++		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ 	}
+ 
+ 	i = to_intel_crtc(crtc)->pipe;
+ 	hdmiw_hdmiedid += i * 0x100;
+ 	aud_cntl_st += i * 0x100;
++	aud_config += i * 0x100;
+ 
+ 	DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
+ 
+@@ -5918,14 +6225,27 @@ static void ironlake_write_eld(struct drm_connector *connector,
+ 	if (!i) {
+ 		DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
+ 		/* operate blindly on all ports */
+-		eldv = GEN5_ELD_VALIDB;
+-		eldv |= GEN5_ELD_VALIDB << 4;
+-		eldv |= GEN5_ELD_VALIDB << 8;
++		eldv = IBX_ELD_VALIDB;
++		eldv |= IBX_ELD_VALIDB << 4;
++		eldv |= IBX_ELD_VALIDB << 8;
+ 	} else {
+ 		DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
+-		eldv = GEN5_ELD_VALIDB << ((i - 1) * 4);
++		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
+ 	}
+ 
++	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
++		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
++		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
++		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
++	} else
++		I915_WRITE(aud_config, 0);
++
++	if (intel_eld_uptodate(connector,
++			       aud_cntrl_st2, eldv,
++			       aud_cntl_st, IBX_ELD_ADDRESS,
++			       hdmiw_hdmiedid))
++		return;
++
+ 	i = I915_READ(aud_cntrl_st2);
+ 	i &= ~eldv;
+ 	I915_WRITE(aud_cntrl_st2, i);
+@@ -5933,13 +6253,8 @@ static void ironlake_write_eld(struct drm_connector *connector,
+ 	if (!eld[0])
+ 		return;
+ 
+-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+-		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+-		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
+-	}
+-
+ 	i = I915_READ(aud_cntl_st);
+-	i &= ~GEN5_ELD_ADDRESS;
++	i &= ~IBX_ELD_ADDRESS;
+ 	I915_WRITE(aud_cntl_st, i);
+ 
+ 	len = min_t(uint8_t, eld[2], 21);	/* 84 bytes of hw ELD buffer */
+@@ -6319,7 +6634,7 @@ static struct drm_display_mode load_detect_mode = {
+ 
+ static struct drm_framebuffer *
+ intel_framebuffer_create(struct drm_device *dev,
+-			 struct drm_mode_fb_cmd *mode_cmd,
++			 struct drm_mode_fb_cmd2 *mode_cmd,
+ 			 struct drm_i915_gem_object *obj)
+ {
+ 	struct intel_framebuffer *intel_fb;
+@@ -6361,7 +6676,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
+ 				  int depth, int bpp)
+ {
+ 	struct drm_i915_gem_object *obj;
+-	struct drm_mode_fb_cmd mode_cmd;
++	struct drm_mode_fb_cmd2 mode_cmd;
+ 
+ 	obj = i915_gem_alloc_object(dev,
+ 				    intel_framebuffer_size_for_mode(mode, bpp));
+@@ -6370,9 +6685,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
+ 
+ 	mode_cmd.width = mode->hdisplay;
+ 	mode_cmd.height = mode->vdisplay;
+-	mode_cmd.depth = depth;
+-	mode_cmd.bpp = bpp;
+-	mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
++	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
++								bpp);
++	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+ 
+ 	return intel_framebuffer_create(dev, &mode_cmd, obj);
+ }
+@@ -6393,11 +6708,11 @@ mode_fits_in_fbdev(struct drm_device *dev,
+ 		return NULL;
+ 
+ 	fb = &dev_priv->fbdev->ifb.base;
+-	if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
+-							  fb->bits_per_pixel))
++	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
++							       fb->bits_per_pixel))
+ 		return NULL;
+ 
+-	if (obj->base.size < mode->vdisplay * fb->pitch)
++	if (obj->base.size < mode->vdisplay * fb->pitches[0])
+ 		return NULL;
+ 
+ 	return fb;
+@@ -6729,9 +7044,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
+ 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+ 		DRM_DEBUG_DRIVER("upclocking LVDS\n");
+ 
+-		/* Unlock panel regs */
+-		I915_WRITE(PP_CONTROL,
+-			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
++		assert_panel_unlocked(dev_priv, pipe);
+ 
+ 		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
+ 		I915_WRITE(dpll_reg, dpll);
+@@ -6740,9 +7053,6 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
+ 		dpll = I915_READ(dpll_reg);
+ 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
+ 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+-
+-		/* ...and lock them again */
+-		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
+ 	}
+ 
+ 	/* Schedule downclock */
+@@ -6755,9 +7065,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
+ 	struct drm_device *dev = crtc->dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+-	int pipe = intel_crtc->pipe;
+-	int dpll_reg = DPLL(pipe);
+-	int dpll = I915_READ(dpll_reg);
+ 
+ 	if (HAS_PCH_SPLIT(dev))
+ 		return;
+@@ -6770,23 +7077,22 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
+ 	 * the manual case.
+ 	 */
+ 	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
++		int pipe = intel_crtc->pipe;
++		int dpll_reg = DPLL(pipe);
++		u32 dpll;
++
+ 		DRM_DEBUG_DRIVER("downclocking LVDS\n");
+ 
+-		/* Unlock panel regs */
+-		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
+-			   PANEL_UNLOCK_REGS);
++		assert_panel_unlocked(dev_priv, pipe);
+ 
++		dpll = I915_READ(dpll_reg);
+ 		dpll |= DISPLAY_RATE_SELECT_FPA1;
+ 		I915_WRITE(dpll_reg, dpll);
+ 		intel_wait_for_vblank(dev, pipe);
+ 		dpll = I915_READ(dpll_reg);
+ 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
+ 			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
+-
+-		/* ...and lock them again */
+-		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
+ 	}
+-
+ }
+ 
+ /**
+@@ -6899,7 +7205,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
+ 		container_of(__work, struct intel_unpin_work, work);
+ 
+ 	mutex_lock(&work->dev->struct_mutex);
+-	i915_gem_object_unpin(work->old_fb_obj);
++	intel_unpin_fb_obj(work->old_fb_obj);
+ 	drm_gem_object_unreference(&work->pending_flip_obj->base);
+ 	drm_gem_object_unreference(&work->old_fb_obj->base);
+ 
+@@ -6927,18 +7233,11 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
+ 
+ 	spin_lock_irqsave(&dev->event_lock, flags);
+ 	work = intel_crtc->unpin_work;
+-
+-	/* Ensure we don't miss a work->pending update ... */
+-	smp_rmb();
+-
+-	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
++	if (work == NULL || !work->pending) {
+ 		spin_unlock_irqrestore(&dev->event_lock, flags);
+ 		return;
+ 	}
+ 
+-	/* and that the unpin work is consistent wrt ->pending. */
+-	smp_rmb();
+-
+ 	intel_crtc->unpin_work = NULL;
+ 
+ 	if (work->event) {
+@@ -7010,25 +7309,16 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
+ 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
+ 	unsigned long flags;
+ 
+-	/* NB: An MMIO update of the plane base pointer will also
+-	 * generate a page-flip completion irq, i.e. every modeset
+-	 * is also accompanied by a spurious intel_prepare_page_flip().
+-	 */
+ 	spin_lock_irqsave(&dev->event_lock, flags);
+-	if (intel_crtc->unpin_work)
+-		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
++	if (intel_crtc->unpin_work) {
++		if ((++intel_crtc->unpin_work->pending) > 1)
++			DRM_ERROR("Prepared flip multiple times\n");
++	} else {
++		DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
++	}
+ 	spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+ 
+-inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+-{
+-	/* Ensure that the work item is consistent when activating it ... */
+-	smp_wmb();
+-	atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+-	/* and that it is marked active as soon as the irq could fire. */
+-	smp_wmb();
+-}
+-
+ static int intel_gen2_queue_flip(struct drm_device *dev,
+ 				 struct drm_crtc *crtc,
+ 				 struct drm_framebuffer *fb,
+@@ -7045,7 +7335,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
+ 		goto err;
+ 
+ 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
+-	offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
++	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
+ 
+ 	ret = BEGIN_LP_RING(6);
+ 	if (ret)
+@@ -7062,16 +7352,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
+ 	OUT_RING(MI_NOOP);
+ 	OUT_RING(MI_DISPLAY_FLIP |
+ 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+-	OUT_RING(fb->pitch);
++	OUT_RING(fb->pitches[0]);
+ 	OUT_RING(obj->gtt_offset + offset);
+-	OUT_RING(MI_NOOP);
+-
+-	intel_mark_page_flip_active(intel_crtc);
++	OUT_RING(0); /* aux display base address, unused */
+ 	ADVANCE_LP_RING();
+ 	return 0;
+ 
+ err_unpin:
+-	i915_gem_object_unpin(obj);
++	intel_unpin_fb_obj(obj);
+ err:
+ 	return ret;
+ }
+@@ -7092,7 +7380,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
+ 		goto err;
+ 
+ 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
+-	offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
++	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
+ 
+ 	ret = BEGIN_LP_RING(6);
+ 	if (ret)
+@@ -7106,16 +7394,15 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
+ 	OUT_RING(MI_NOOP);
+ 	OUT_RING(MI_DISPLAY_FLIP_I915 |
+ 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+-	OUT_RING(fb->pitch);
++	OUT_RING(fb->pitches[0]);
+ 	OUT_RING(obj->gtt_offset + offset);
+ 	OUT_RING(MI_NOOP);
+ 
+-	intel_mark_page_flip_active(intel_crtc);
+ 	ADVANCE_LP_RING();
+ 	return 0;
+ 
+ err_unpin:
+-	i915_gem_object_unpin(obj);
++	intel_unpin_fb_obj(obj);
+ err:
+ 	return ret;
+ }
+@@ -7144,7 +7431,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+ 	 */
+ 	OUT_RING(MI_DISPLAY_FLIP |
+ 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+-	OUT_RING(fb->pitch);
++	OUT_RING(fb->pitches[0]);
+ 	OUT_RING(obj->gtt_offset | obj->tiling_mode);
+ 
+ 	/* XXX Enabling the panel-fitter across page-flip is so far
+@@ -7154,15 +7441,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
+ 	pf = 0;
+ 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ 	OUT_RING(pf | pipesrc);
+-
+-	intel_mark_page_flip_active(intel_crtc);
+-
+-	intel_mark_page_flip_active(intel_crtc);
+ 	ADVANCE_LP_RING();
+ 	return 0;
+ 
+ err_unpin:
+-	i915_gem_object_unpin(obj);
++	intel_unpin_fb_obj(obj);
+ err:
+ 	return ret;
+ }
+@@ -7187,17 +7470,23 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
+ 
+ 	OUT_RING(MI_DISPLAY_FLIP |
+ 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+-	OUT_RING(fb->pitch | obj->tiling_mode);
++	OUT_RING(fb->pitches[0] | obj->tiling_mode);
+ 	OUT_RING(obj->gtt_offset);
+ 
+-	pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
++	/* Contrary to the suggestions in the documentation,
++	 * "Enable Panel Fitter" does not seem to be required when page
++	 * flipping with a non-native mode, and worse causes a normal
++	 * modeset to fail.
++	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
++	 */
++	pf = 0;
+ 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ 	OUT_RING(pf | pipesrc);
+ 	ADVANCE_LP_RING();
+ 	return 0;
+ 
+ err_unpin:
+-	i915_gem_object_unpin(obj);
++	intel_unpin_fb_obj(obj);
+ err:
+ 	return ret;
+ }
+@@ -7244,16 +7533,14 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
+ 		goto err_unpin;
+ 
+ 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+-	intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
++	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
+ 	intel_ring_emit(ring, (obj->gtt_offset));
+ 	intel_ring_emit(ring, (MI_NOOP));
+-
+-	intel_mark_page_flip_active(intel_crtc);
+ 	intel_ring_advance(ring);
+ 	return 0;
+ 
+ err_unpin:
+-	i915_gem_object_unpin(obj);
++	intel_unpin_fb_obj(obj);
+ err:
+ 	return ret;
+ }
+@@ -7551,10 +7838,9 @@ static void intel_setup_outputs(struct drm_device *dev)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_encoder *encoder;
+ 	bool dpd_is_edp = false;
+-	bool has_lvds = false;
++	bool has_lvds;
+ 
+-	if (IS_MOBILE(dev) && !IS_I830(dev))
+-		has_lvds = intel_lvds_init(dev);
++	has_lvds = intel_lvds_init(dev);
+ 	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
+ 		/* disable the panel fitter on everything but LVDS */
+ 		I915_WRITE(PFIT_CONTROL, 0);
+@@ -7683,7 +7969,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
+ 
+ int intel_framebuffer_init(struct drm_device *dev,
+ 			   struct intel_framebuffer *intel_fb,
+-			   struct drm_mode_fb_cmd *mode_cmd,
++			   struct drm_mode_fb_cmd2 *mode_cmd,
+ 			   struct drm_i915_gem_object *obj)
+ {
+ 	int ret;
+@@ -7691,21 +7977,27 @@ int intel_framebuffer_init(struct drm_device *dev,
+ 	if (obj->tiling_mode == I915_TILING_Y)
+ 		return -EINVAL;
+ 
+-	if (mode_cmd->pitch & 63)
++	if (mode_cmd->pitches[0] & 63)
+ 		return -EINVAL;
+ 
+-	switch (mode_cmd->bpp) {
+-	case 8:
+-	case 16:
+-		/* Only pre-ILK can handle 5:5:5 */
+-		if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
+-			return -EINVAL;
++	switch (mode_cmd->pixel_format) {
++	case DRM_FORMAT_RGB332:
++	case DRM_FORMAT_RGB565:
++	case DRM_FORMAT_XRGB8888:
++	case DRM_FORMAT_XBGR8888:
++	case DRM_FORMAT_ARGB8888:
++	case DRM_FORMAT_XRGB2101010:
++	case DRM_FORMAT_ARGB2101010:
++		/* RGB formats are common across chipsets */
+ 		break;
+-
+-	case 24:
+-	case 32:
++	case DRM_FORMAT_YUYV:
++	case DRM_FORMAT_UYVY:
++	case DRM_FORMAT_YVYU:
++	case DRM_FORMAT_VYUY:
+ 		break;
+ 	default:
++		DRM_DEBUG_KMS("unsupported pixel format %u\n",
++				mode_cmd->pixel_format);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -7723,11 +8015,12 @@ int intel_framebuffer_init(struct drm_device *dev,
+ static struct drm_framebuffer *
+ intel_user_framebuffer_create(struct drm_device *dev,
+ 			      struct drm_file *filp,
+-			      struct drm_mode_fb_cmd *mode_cmd)
++			      struct drm_mode_fb_cmd2 *mode_cmd)
+ {
+ 	struct drm_i915_gem_object *obj;
+ 
+-	obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
++	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
++						mode_cmd->handles[0]));
+ 	if (&obj->base == NULL)
+ 		return ERR_PTR(-ENOENT);
+ 
+@@ -7996,7 +8289,7 @@ void intel_init_emon(struct drm_device *dev)
+ 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
+ }
+ 
+-static bool intel_enable_rc6(struct drm_device *dev)
++static int intel_enable_rc6(struct drm_device *dev)
+ {
+ 	/*
+ 	 * Respect the kernel parameter if it is set
+@@ -8014,11 +8307,11 @@ static bool intel_enable_rc6(struct drm_device *dev)
+ 	 * Disable rc6 on Sandybridge
+ 	 */
+ 	if (INTEL_INFO(dev)->gen == 6) {
+-		DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+-		return 0;
++		DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
++		return INTEL_RC6_ENABLE;
+ 	}
+-	DRM_DEBUG_DRIVER("RC6 enabled\n");
+-	return 1;
++	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
++	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
+ }
+ 
+ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+@@ -8026,7 +8319,9 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+ 	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ 	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ 	u32 pcu_mbox, rc6_mask = 0;
++	u32 gtfifodbg;
+ 	int cur_freq, min_freq, max_freq;
++	int rc6_mode;
+ 	int i;
+ 
+ 	/* Here begins a magic sequence of register writes to enable
+@@ -8037,6 +8332,13 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+ 	 */
+ 	I915_WRITE(GEN6_RC_STATE, 0);
+ 	mutex_lock(&dev_priv->dev->struct_mutex);
++
++	/* Clear the DBG now so we don't confuse earlier errors */
++	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
++		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
++		I915_WRITE(GTFIFODBG, gtfifodbg);
++	}
++
+ 	gen6_gt_force_wake_get(dev_priv);
+ 
+ 	/* disable the counters and set deterministic thresholds */
+@@ -8057,9 +8359,20 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
+ 	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+ 
+-	if (intel_enable_rc6(dev_priv->dev))
+-		rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
+-			((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
++	rc6_mode = intel_enable_rc6(dev_priv->dev);
++	if (rc6_mode & INTEL_RC6_ENABLE)
++		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
++
++	if (rc6_mode & INTEL_RC6p_ENABLE)
++		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
++
++	if (rc6_mode & INTEL_RC6pp_ENABLE)
++		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
++
++	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
++			(rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
++			(rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
++			(rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
+ 
+ 	I915_WRITE(GEN6_RC_CONTROL,
+ 		   rc6_mask |
+@@ -8279,10 +8592,18 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
+ 		   ILK_ELPIN_409_SELECT);
+ 
++	/* WaDisableHiZPlanesWhenMSAAEnabled */
++	I915_WRITE(_3D_CHICKEN,
++		   _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
++
+ 	I915_WRITE(WM3_LP_ILK, 0);
+ 	I915_WRITE(WM2_LP_ILK, 0);
+ 	I915_WRITE(WM1_LP_ILK, 0);
+ 
++	I915_WRITE(GEN6_UCGCTL1,
++		   I915_READ(GEN6_UCGCTL1) |
++		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
++
+ 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ 	 * gating disable must be set.  Failure to set it results in
+ 	 * flickering pixels due to Z write ordering failures after
+@@ -8361,6 +8682,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
+ 
+ 	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ 
++	I915_WRITE(IVB_CHICKEN3,
++		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
++		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
++
+ 	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+@@ -8671,9 +8996,15 @@ static void intel_init_display(struct drm_device *dev)
+ 		if (IS_IVYBRIDGE(dev)) {
+ 			u32	ecobus;
+ 
++			/* A small trick here - if the bios hasn't configured MT forcewake,
++			 * and if the device is in RC6, then force_wake_mt_get will not wake
++			 * the device and the ECOBUS read will return zero. Which will be
++			 * (correctly) interpreted by the test below as MT forcewake being
++			 * disabled.
++			 */
+ 			mutex_lock(&dev->struct_mutex);
+ 			__gen6_gt_force_wake_mt_get(dev_priv);
+-			ecobus = I915_READ(ECOBUS);
++			ecobus = I915_READ_NOTRACE(ECOBUS);
+ 			__gen6_gt_force_wake_mt_put(dev_priv);
+ 			mutex_unlock(&dev->struct_mutex);
+ 
+@@ -8705,6 +9036,7 @@ static void intel_init_display(struct drm_device *dev)
+ 		} else if (IS_GEN6(dev)) {
+ 			if (SNB_READ_WM0_LATENCY()) {
+ 				dev_priv->display.update_wm = sandybridge_update_wm;
++				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ 			} else {
+ 				DRM_DEBUG_KMS("Failed to read display plane latency. "
+ 					      "Disable CxSR\n");
+@@ -8718,6 +9050,7 @@ static void intel_init_display(struct drm_device *dev)
+ 			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ 			if (SNB_READ_WM0_LATENCY()) {
+ 				dev_priv->display.update_wm = sandybridge_update_wm;
++				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
+ 			} else {
+ 				DRM_DEBUG_KMS("Failed to read display plane latency. "
+ 					      "Disable CxSR\n");
+@@ -8830,8 +9163,6 @@ struct intel_quirk {
+ };
+ 
+ struct intel_quirk intel_quirks[] = {
+-	/* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
+-	{ 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
+ 	/* HP Mini needs pipe A force quirk (LP: #322104) */
+ 	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
+ 
+@@ -8898,33 +9229,19 @@ static void i915_disable_vga(struct drm_device *dev)
+ 	POSTING_READ(vga_reg);
+ }
+ 
+-void i915_redisable_vga(struct drm_device *dev)
+-{
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	u32 vga_reg;
+-
+-	if (HAS_PCH_SPLIT(dev))
+-		vga_reg = CPU_VGACNTRL;
+-	else
+-		vga_reg = VGACNTRL;
+-
+-	if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+-		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+-		I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+-		POSTING_READ(vga_reg);
+-	}
+-}
+-
+ void intel_modeset_init(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	int i;
++	int i, ret;
+ 
+ 	drm_mode_config_init(dev);
+ 
+ 	dev->mode_config.min_width = 0;
+ 	dev->mode_config.min_height = 0;
+ 
++	dev->mode_config.preferred_depth = 24;
++	dev->mode_config.prefer_shadow = 1;
++
+ 	dev->mode_config.funcs = (void *)&intel_mode_funcs;
+ 
+ 	intel_init_quirks(dev);
+@@ -8948,6 +9265,9 @@ void intel_modeset_init(struct drm_device *dev)
+ 
+ 	for (i = 0; i < dev_priv->num_pipe; i++) {
+ 		intel_crtc_init(dev, i);
++		ret = intel_plane_init(dev, i);
++		if (ret)
++			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
+ 	}
+ 
+ 	/* Just disable it once at startup */
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index c8ecaab..069725c 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -49,7 +49,7 @@ struct intel_dp {
+ 	uint32_t DP;
+ 	uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
+ 	bool has_audio;
+-	int force_audio;
++	enum hdmi_force_audio force_audio;
+ 	uint32_t color_range;
+ 	int dpms_mode;
+ 	uint8_t link_bw;
+@@ -384,7 +384,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
+ 		else
+ 			aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ 	} else if (HAS_PCH_SPLIT(dev))
+-		aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
++		aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
+ 	else
+ 		aux_clock_divider = intel_hrawclk(dev) / 2;
+ 
+@@ -437,6 +437,10 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
+ 			   DP_AUX_CH_CTL_DONE |
+ 			   DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ 			   DP_AUX_CH_CTL_RECEIVE_ERROR);
++
++		if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
++			      DP_AUX_CH_CTL_RECEIVE_ERROR))
++			continue;
+ 		if (status & DP_AUX_CH_CTL_DONE)
+ 			break;
+ 	}
+@@ -2137,8 +2141,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
+ 	if (status != connector_status_connected)
+ 		return status;
+ 
+-	if (intel_dp->force_audio) {
+-		intel_dp->has_audio = intel_dp->force_audio > 0;
++	if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
++		intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
+ 	} else {
+ 		edid = intel_dp_get_edid(connector, &intel_dp->adapter);
+ 		if (edid) {
+@@ -2238,10 +2242,10 @@ intel_dp_set_property(struct drm_connector *connector,
+ 
+ 		intel_dp->force_audio = i;
+ 
+-		if (i == 0)
++		if (i == HDMI_AUDIO_AUTO)
+ 			has_audio = intel_dp_detect_audio(connector);
+ 		else
+-			has_audio = i > 0;
++			has_audio = (i == HDMI_AUDIO_ON);
+ 
+ 		if (has_audio == intel_dp->has_audio)
+ 			return 0;
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 5212284..cd623e8 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -26,6 +26,7 @@
+ #define __INTEL_DRV_H__
+ 
+ #include <linux/i2c.h>
++#include "i915_drm.h"
+ #include "i915_drv.h"
+ #include "drm_crtc.h"
+ #include "drm_crtc_helper.h"
+@@ -39,7 +40,7 @@
+ 			ret__ = -ETIMEDOUT;				\
+ 			break;						\
+ 		}							\
+-		if (W && !(in_atomic() || in_dbg_master())) msleep(W);	\
++		if (W && drm_can_sleep()) msleep(W);	\
+ 	}								\
+ 	ret__;								\
+ })
+@@ -47,13 +48,6 @@
+ #define wait_for(COND, MS) _wait_for(COND, MS, 1)
+ #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+ 
+-#define MSLEEP(x) do { \
+-	if (in_dbg_master()) \
+-		mdelay(x); \
+-	else \
+-		msleep(x); \
+-} while (0)
+-
+ #define KHz(x) (1000*x)
+ #define MHz(x) KHz(1000*x)
+ 
+@@ -111,6 +105,10 @@
+ #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+ #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+ #define INTEL_MODE_DP_FORCE_6BPC (0x10)
++/* This flag must be set by the encoder's mode_fixup if it changes the crtc
++ * timings in the mode to prevent the crtc fixup from overwriting them.
++ * Currently only lvds needs that. */
++#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
+ 
+ static inline void
+ intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+@@ -177,10 +175,32 @@ struct intel_crtc {
+ 	bool use_pll_a;
+ };
+ 
++struct intel_plane {
++	struct drm_plane base;
++	enum pipe pipe;
++	struct drm_i915_gem_object *obj;
++	bool primary_disabled;
++	int max_downscale;
++	u32 lut_r[1024], lut_g[1024], lut_b[1024];
++	void (*update_plane)(struct drm_plane *plane,
++			     struct drm_framebuffer *fb,
++			     struct drm_i915_gem_object *obj,
++			     int crtc_x, int crtc_y,
++			     unsigned int crtc_w, unsigned int crtc_h,
++			     uint32_t x, uint32_t y,
++			     uint32_t src_w, uint32_t src_h);
++	void (*disable_plane)(struct drm_plane *plane);
++	int (*update_colorkey)(struct drm_plane *plane,
++			       struct drm_intel_sprite_colorkey *key);
++	void (*get_colorkey)(struct drm_plane *plane,
++			     struct drm_intel_sprite_colorkey *key);
++};
++
+ #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+ #define to_intel_connector(x) container_of(x, struct intel_connector, base)
+ #define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
+ #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
++#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+ 
+ #define DIP_HEADER_SIZE	5
+ 
+@@ -257,10 +277,7 @@ struct intel_unpin_work {
+ 	struct drm_i915_gem_object *old_fb_obj;
+ 	struct drm_i915_gem_object *pending_flip_obj;
+ 	struct drm_pending_vblank_event *event;
+-	atomic_t pending;
+-#define INTEL_FLIP_INACTIVE	0
+-#define INTEL_FLIP_PENDING	1
+-#define INTEL_FLIP_COMPLETE	2
++	int pending;
+ 	bool enable_stall_check;
+ };
+ 
+@@ -271,6 +288,8 @@ struct intel_fbc_work {
+ 	int interval;
+ };
+ 
++int intel_connector_update_modes(struct drm_connector *connector,
++				struct edid *edid);
+ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+ extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
+ 
+@@ -293,6 +312,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ extern bool intel_dpd_is_edp(struct drm_device *dev);
+ extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
+ extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
++extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
+ 
+ /* intel_panel.c */
+ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+@@ -360,10 +380,11 @@ extern void intel_init_emon(struct drm_device *dev);
+ extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+ 				      struct drm_i915_gem_object *obj,
+ 				      struct intel_ring_buffer *pipelined);
++extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+ 
+ extern int intel_framebuffer_init(struct drm_device *dev,
+ 				  struct intel_framebuffer *ifb,
+-				  struct drm_mode_fb_cmd *mode_cmd,
++				  struct drm_mode_fb_cmd2 *mode_cmd,
+ 				  struct drm_i915_gem_object *obj);
+ extern int intel_fbdev_init(struct drm_device *dev);
+ extern void intel_fbdev_fini(struct drm_device *dev);
+@@ -383,9 +404,25 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+ extern void intel_fb_output_poll_changed(struct drm_device *dev);
+ extern void intel_fb_restore_mode(struct drm_device *dev);
+ 
++extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
++			bool state);
++#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
++#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
++
+ extern void intel_init_clock_gating(struct drm_device *dev);
+ extern void intel_write_eld(struct drm_encoder *encoder,
+ 			    struct drm_display_mode *mode);
+ extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+ 
++/* For use by IVB LP watermark workaround in intel_sprite.c */
++extern void sandybridge_update_wm(struct drm_device *dev);
++extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
++					   uint32_t sprite_width,
++					   int pixel_size);
++
++extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
++				     struct drm_file *file_priv);
++extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
++				     struct drm_file *file_priv);
++
+ #endif /* __INTEL_DRV_H__ */
+diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
+index 6eda1b5..020a7d7 100644
+--- a/drivers/gpu/drm/i915/intel_dvo.c
++++ b/drivers/gpu/drm/i915/intel_dvo.c
+@@ -157,7 +157,6 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
+ 		C(vsync_end);
+ 		C(vtotal);
+ 		C(clock);
+-		drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ #undef C
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
+index d0ce34b..6e9ee33 100644
+--- a/drivers/gpu/drm/i915/intel_fb.c
++++ b/drivers/gpu/drm/i915/intel_fb.c
+@@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct fb_info *info;
+ 	struct drm_framebuffer *fb;
+-	struct drm_mode_fb_cmd mode_cmd;
++	struct drm_mode_fb_cmd2 mode_cmd;
+ 	struct drm_i915_gem_object *obj;
+ 	struct device *device = &dev->pdev->dev;
+ 	int size, ret;
+@@ -77,11 +77,12 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
+ 	mode_cmd.width = sizes->surface_width;
+ 	mode_cmd.height = sizes->surface_height;
+ 
+-	mode_cmd.bpp = sizes->surface_bpp;
+-	mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
+-	mode_cmd.depth = sizes->surface_depth;
++	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
++						      8), 64);
++	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
++							  sizes->surface_depth);
+ 
+-	size = mode_cmd.pitch * mode_cmd.height;
++	size = mode_cmd.pitches[0] * mode_cmd.height;
+ 	size = ALIGN(size, PAGE_SIZE);
+ 	obj = i915_gem_alloc_object(dev, size);
+ 	if (!obj) {
+@@ -148,14 +149,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
+ 
+ //	memset(info->screen_base, 0, size);
+ 
+-	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
++	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ 	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+ 
+-	info->pixmap.size = 64*1024;
+-	info->pixmap.buf_align = 8;
+-	info->pixmap.access_align = 32;
+-	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+-	info->pixmap.scan_align = 1;
++	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+ 
+ 	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+ 		      fb->width, fb->height,
+@@ -279,8 +276,18 @@ void intel_fb_restore_mode(struct drm_device *dev)
+ {
+ 	int ret;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_mode_config *config = &dev->mode_config;
++	struct drm_plane *plane;
++
++	mutex_lock(&dev->mode_config.mutex);
+ 
+ 	ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
+ 	if (ret)
+ 		DRM_DEBUG("failed to restore crtc mode\n");
++
++	/* Be sure to shut off any planes that may be active */
++	list_for_each_entry(plane, &config->plane_list, head)
++		plane->funcs->disable_plane(plane);
++
++	mutex_unlock(&dev->mode_config.mutex);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 497da2a..c60100d 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -44,7 +44,7 @@ struct intel_hdmi {
+ 	uint32_t color_range;
+ 	bool has_hdmi_sink;
+ 	bool has_audio;
+-	int force_audio;
++	enum hdmi_force_audio force_audio;
+ 	void (*write_infoframe)(struct drm_encoder *encoder,
+ 				struct dip_infoframe *frame);
+ };
+@@ -143,9 +143,6 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
+ 		I915_WRITE(VIDEO_DIP_DATA, *data);
+ 		data++;
+ 	}
+-	/* Write every possible data byte to force correct ECC calculation. */
+-	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+-		I915_WRITE(VIDEO_DIP_DATA, 0);
+ 	mmiowb();
+ 
+ 	flags |= intel_infoframe_flags(frame);
+@@ -179,9 +176,6 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder,
+ 		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ 		data++;
+ 	}
+-	/* Write every possible data byte to force correct ECC calculation. */
+-	for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
+-		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ 	mmiowb();
+ 
+ 	flags |= intel_infoframe_flags(frame);
+@@ -351,7 +345,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
+ 	if (edid) {
+ 		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ 			status = connector_status_connected;
+-			intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
++			if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
++				intel_hdmi->has_hdmi_sink =
++						drm_detect_hdmi_monitor(edid);
+ 			intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
+ 		}
+ 		connector->display_info.raw_edid = NULL;
+@@ -359,8 +355,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
+ 	}
+ 
+ 	if (status == connector_status_connected) {
+-		if (intel_hdmi->force_audio)
+-			intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
++		if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
++			intel_hdmi->has_audio =
++				(intel_hdmi->force_audio == HDMI_AUDIO_ON);
+ 	}
+ 
+ 	return status;
+@@ -414,7 +411,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
+ 		return ret;
+ 
+ 	if (property == dev_priv->force_audio_property) {
+-		int i = val;
++		enum hdmi_force_audio i = val;
+ 		bool has_audio;
+ 
+ 		if (i == intel_hdmi->force_audio)
+@@ -422,13 +419,13 @@ intel_hdmi_set_property(struct drm_connector *connector,
+ 
+ 		intel_hdmi->force_audio = i;
+ 
+-		if (i == 0)
++		if (i == HDMI_AUDIO_AUTO)
+ 			has_audio = intel_hdmi_detect_audio(connector);
+ 		else
+-			has_audio = i > 0;
++			has_audio = (i == HDMI_AUDIO_ON);
+ 
+-		if (has_audio == intel_hdmi->has_audio)
+-			return 0;
++		if (i == HDMI_AUDIO_OFF_DVI)
++			intel_hdmi->has_hdmi_sink = 0;
+ 
+ 		intel_hdmi->has_audio = has_audio;
+ 		goto done;
+@@ -526,7 +523,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ 	intel_encoder->type = INTEL_OUTPUT_HDMI;
+ 
+ 	connector->polled = DRM_CONNECTOR_POLL_HPD;
+-	connector->interlace_allowed = 0;
++	connector->interlace_allowed = 1;
+ 	connector->doublescan_allowed = 0;
+ 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index d30cccc..8fdc957 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -37,7 +37,7 @@
+ 
+ /* Intel GPIO access functions */
+ 
+-#define I2C_RISEFALL_TIME 20
++#define I2C_RISEFALL_TIME 10
+ 
+ static inline struct intel_gmbus *
+ to_intel_gmbus(struct i2c_adapter *i2c)
+@@ -45,13 +45,6 @@ to_intel_gmbus(struct i2c_adapter *i2c)
+ 	return container_of(i2c, struct intel_gmbus, adapter);
+ }
+ 
+-struct intel_gpio {
+-	struct i2c_adapter adapter;
+-	struct i2c_algo_bit_data algo;
+-	struct drm_i915_private *dev_priv;
+-	u32 reg;
+-};
+-
+ void
+ intel_i2c_reset(struct drm_device *dev)
+ {
+@@ -78,15 +71,15 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+ 	I915_WRITE(DSPCLK_GATE_D, val);
+ }
+ 
+-static u32 get_reserved(struct intel_gpio *gpio)
++static u32 get_reserved(struct intel_gmbus *bus)
+ {
+-	struct drm_i915_private *dev_priv = gpio->dev_priv;
++	struct drm_i915_private *dev_priv = bus->dev_priv;
+ 	struct drm_device *dev = dev_priv->dev;
+ 	u32 reserved = 0;
+ 
+ 	/* On most chips, these bits must be preserved in software. */
+ 	if (!IS_I830(dev) && !IS_845G(dev))
+-		reserved = I915_READ_NOTRACE(gpio->reg) &
++		reserved = I915_READ_NOTRACE(bus->gpio_reg) &
+ 					     (GPIO_DATA_PULLUP_DISABLE |
+ 					      GPIO_CLOCK_PULLUP_DISABLE);
+ 
+@@ -95,29 +88,29 @@ static u32 get_reserved(struct intel_gpio *gpio)
+ 
+ static int get_clock(void *data)
+ {
+-	struct intel_gpio *gpio = data;
+-	struct drm_i915_private *dev_priv = gpio->dev_priv;
+-	u32 reserved = get_reserved(gpio);
+-	I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+-	I915_WRITE_NOTRACE(gpio->reg, reserved);
+-	return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
++	struct intel_gmbus *bus = data;
++	struct drm_i915_private *dev_priv = bus->dev_priv;
++	u32 reserved = get_reserved(bus);
++	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
++	I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
++	return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
+ }
+ 
+ static int get_data(void *data)
+ {
+-	struct intel_gpio *gpio = data;
+-	struct drm_i915_private *dev_priv = gpio->dev_priv;
+-	u32 reserved = get_reserved(gpio);
+-	I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+-	I915_WRITE_NOTRACE(gpio->reg, reserved);
+-	return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
++	struct intel_gmbus *bus = data;
++	struct drm_i915_private *dev_priv = bus->dev_priv;
++	u32 reserved = get_reserved(bus);
++	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
++	I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
++	return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
+ }
+ 
+ static void set_clock(void *data, int state_high)
+ {
+-	struct intel_gpio *gpio = data;
+-	struct drm_i915_private *dev_priv = gpio->dev_priv;
+-	u32 reserved = get_reserved(gpio);
++	struct intel_gmbus *bus = data;
++	struct drm_i915_private *dev_priv = bus->dev_priv;
++	u32 reserved = get_reserved(bus);
+ 	u32 clock_bits;
+ 
+ 	if (state_high)
+@@ -126,15 +119,15 @@ static void set_clock(void *data, int state_high)
+ 		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ 			GPIO_CLOCK_VAL_MASK;
+ 
+-	I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
+-	POSTING_READ(gpio->reg);
++	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
++	POSTING_READ(bus->gpio_reg);
+ }
+ 
+ static void set_data(void *data, int state_high)
+ {
+-	struct intel_gpio *gpio = data;
+-	struct drm_i915_private *dev_priv = gpio->dev_priv;
+-	u32 reserved = get_reserved(gpio);
++	struct intel_gmbus *bus = data;
++	struct drm_i915_private *dev_priv = bus->dev_priv;
++	u32 reserved = get_reserved(bus);
+ 	u32 data_bits;
+ 
+ 	if (state_high)
+@@ -143,13 +136,14 @@ static void set_data(void *data, int state_high)
+ 		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ 			GPIO_DATA_VAL_MASK;
+ 
+-	I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
+-	POSTING_READ(gpio->reg);
++	I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
++	POSTING_READ(bus->gpio_reg);
+ }
+ 
+-static struct i2c_adapter *
+-intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
++static bool
++intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
+ {
++	struct drm_i915_private *dev_priv = bus->dev_priv;
+ 	static const int map_pin_to_reg[] = {
+ 		0,
+ 		GPIOB,
+@@ -160,65 +154,48 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
+ 		0,
+ 		GPIOF,
+ 	};
+-	struct intel_gpio *gpio;
++	struct i2c_algo_bit_data *algo;
+ 
+ 	if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
+-		return NULL;
++		return false;
+ 
+-	gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+-	if (gpio == NULL)
+-		return NULL;
++	algo = &bus->bit_algo;
+ 
+-	gpio->reg = map_pin_to_reg[pin];
++	bus->gpio_reg = map_pin_to_reg[pin];
+ 	if (HAS_PCH_SPLIT(dev_priv->dev))
+-		gpio->reg += PCH_GPIOA - GPIOA;
+-	gpio->dev_priv = dev_priv;
+-
+-	snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+-		 "i915 GPIO%c", "?BACDE?F"[pin]);
+-	gpio->adapter.owner = THIS_MODULE;
+-	gpio->adapter.algo_data	= &gpio->algo;
+-	gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+-	gpio->algo.setsda = set_data;
+-	gpio->algo.setscl = set_clock;
+-	gpio->algo.getsda = get_data;
+-	gpio->algo.getscl = get_clock;
+-	gpio->algo.udelay = I2C_RISEFALL_TIME;
+-	gpio->algo.timeout = usecs_to_jiffies(2200);
+-	gpio->algo.data = gpio;
+-
+-	if (i2c_bit_add_bus(&gpio->adapter))
+-		goto out_free;
+-
+-	return &gpio->adapter;
+-
+-out_free:
+-	kfree(gpio);
+-	return NULL;
++		bus->gpio_reg += PCH_GPIOA - GPIOA;
++
++	bus->adapter.algo_data = algo;
++	algo->setsda = set_data;
++	algo->setscl = set_clock;
++	algo->getsda = get_data;
++	algo->getscl = get_clock;
++	algo->udelay = I2C_RISEFALL_TIME;
++	algo->timeout = usecs_to_jiffies(2200);
++	algo->data = bus;
++
++	return true;
+ }
+ 
+ static int
+-intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
+-		     struct i2c_adapter *adapter,
++intel_i2c_quirk_xfer(struct intel_gmbus *bus,
+ 		     struct i2c_msg *msgs,
+ 		     int num)
+ {
+-	struct intel_gpio *gpio = container_of(adapter,
+-					       struct intel_gpio,
+-					       adapter);
++	struct drm_i915_private *dev_priv = bus->dev_priv;
+ 	int ret;
+ 
+ 	intel_i2c_reset(dev_priv->dev);
+ 
+ 	intel_i2c_quirk_set(dev_priv, true);
+-	set_data(gpio, 1);
+-	set_clock(gpio, 1);
++	set_data(bus, 1);
++	set_clock(bus, 1);
+ 	udelay(I2C_RISEFALL_TIME);
+ 
+-	ret = adapter->algo->master_xfer(adapter, msgs, num);
++	ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num);
+ 
+-	set_data(gpio, 1);
+-	set_clock(gpio, 1);
++	set_data(bus, 1);
++	set_clock(bus, 1);
+ 	intel_i2c_quirk_set(dev_priv, false);
+ 
+ 	return ret;
+@@ -232,12 +209,15 @@ gmbus_xfer(struct i2c_adapter *adapter,
+ 	struct intel_gmbus *bus = container_of(adapter,
+ 					       struct intel_gmbus,
+ 					       adapter);
+-	struct drm_i915_private *dev_priv = adapter->algo_data;
+-	int i, reg_offset;
++	struct drm_i915_private *dev_priv = bus->dev_priv;
++	int i, reg_offset, ret;
+ 
+-	if (bus->force_bit)
+-		return intel_i2c_quirk_xfer(dev_priv,
+-					    bus->force_bit, msgs, num);
++	mutex_lock(&dev_priv->gmbus_mutex);
++
++	if (bus->force_bit) {
++		ret = intel_i2c_quirk_xfer(bus, msgs, num);
++		goto out;
++	}
+ 
+ 	reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+ 
+@@ -249,7 +229,8 @@ gmbus_xfer(struct i2c_adapter *adapter,
+ 
+ 		if (msgs[i].flags & I2C_M_RD) {
+ 			I915_WRITE(GMBUS1 + reg_offset,
+-				   GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
++				   GMBUS_CYCLE_WAIT |
++				   (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ 				   (len << GMBUS_BYTE_COUNT_SHIFT) |
+ 				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ 				   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+@@ -278,7 +259,8 @@ gmbus_xfer(struct i2c_adapter *adapter,
+ 
+ 			I915_WRITE(GMBUS3 + reg_offset, val);
+ 			I915_WRITE(GMBUS1 + reg_offset,
+-				   (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
++				   GMBUS_CYCLE_WAIT |
++				   (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ 				   (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+ 				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ 				   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+@@ -317,11 +299,15 @@ clear_err:
+ 	I915_WRITE(GMBUS1 + reg_offset, 0);
+ 
+ done:
+-	/* Mark the GMBUS interface as disabled. We will re-enable it at the
+-	 * start of the next xfer, till then let it sleep.
++	/* Mark the GMBUS interface as disabled after waiting for idle.
++	 * We will re-enable it at the start of the next xfer,
++	 * till then let it sleep.
+ 	 */
++	if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10))
++		DRM_INFO("GMBUS timed out waiting for idle\n");
+ 	I915_WRITE(GMBUS0 + reg_offset, 0);
+-	return i;
++	ret = i;
++	goto out;
+ 
+ timeout:
+ 	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+@@ -329,23 +315,21 @@ timeout:
+ 	I915_WRITE(GMBUS0 + reg_offset, 0);
+ 
+ 	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+-	bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+-	if (!bus->force_bit)
+-		return -ENOMEM;
+-
+-	return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
++	if (!bus->has_gpio) {
++		ret = -EIO;
++	} else {
++		bus->force_bit = true;
++		ret = intel_i2c_quirk_xfer(bus, msgs, num);
++	}
++out:
++	mutex_unlock(&dev_priv->gmbus_mutex);
++	return ret;
+ }
+ 
+ static u32 gmbus_func(struct i2c_adapter *adapter)
+ {
+-	struct intel_gmbus *bus = container_of(adapter,
+-					       struct intel_gmbus,
+-					       adapter);
+-
+-	if (bus->force_bit)
+-		bus->force_bit->algo->functionality(bus->force_bit);
+-
+-	return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
++	return i2c_bit_algo.functionality(adapter) &
++		(I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ 		/* I2C_FUNC_10BIT_ADDR | */
+ 		I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ 		I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+@@ -375,11 +359,13 @@ int intel_setup_gmbus(struct drm_device *dev)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	int ret, i;
+ 
+-	dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
++	dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
+ 				  GFP_KERNEL);
+ 	if (dev_priv->gmbus == NULL)
+ 		return -ENOMEM;
+ 
++	mutex_init(&dev_priv->gmbus_mutex);
++
+ 	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ 		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ 
+@@ -391,7 +377,7 @@ int intel_setup_gmbus(struct drm_device *dev)
+ 			 names[i]);
+ 
+ 		bus->adapter.dev.parent = &dev->pdev->dev;
+-		bus->adapter.algo_data	= dev_priv;
++		bus->dev_priv = dev_priv;
+ 
+ 		bus->adapter.algo = &gmbus_algorithm;
+ 		ret = i2c_add_adapter(&bus->adapter);
+@@ -401,8 +387,11 @@ int intel_setup_gmbus(struct drm_device *dev)
+ 		/* By default use a conservative clock rate */
+ 		bus->reg0 = i | GMBUS_RATE_100KHZ;
+ 
++		bus->has_gpio = intel_gpio_setup(bus, i);
++
+ 		/* XXX force bit banging until GMBUS is fully debugged */
+-		bus->force_bit = intel_gpio_create(dev_priv, i);
++		if (bus->has_gpio)
++			bus->force_bit = true;
+ 	}
+ 
+ 	intel_i2c_reset(dev_priv->dev);
+@@ -430,19 +419,8 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+ {
+ 	struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ 
+-	if (force_bit) {
+-		if (bus->force_bit == NULL) {
+-			struct drm_i915_private *dev_priv = adapter->algo_data;
+-			bus->force_bit = intel_gpio_create(dev_priv,
+-							   bus->reg0 & 0xff);
+-		}
+-	} else {
+-		if (bus->force_bit) {
+-			i2c_del_adapter(bus->force_bit);
+-			kfree(bus->force_bit);
+-			bus->force_bit = NULL;
+-		}
+-	}
++	if (bus->has_gpio)
++		bus->force_bit = force_bit;
+ }
+ 
+ void intel_teardown_gmbus(struct drm_device *dev)
+@@ -455,10 +433,6 @@ void intel_teardown_gmbus(struct drm_device *dev)
+ 
+ 	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ 		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+-		if (bus->force_bit) {
+-			i2c_del_adapter(bus->force_bit);
+-			kfree(bus->force_bit);
+-		}
+ 		i2c_del_adapter(&bus->adapter);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index c6d0966..a8b28c4 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -187,6 +187,8 @@ centre_horizontally(struct drm_display_mode *mode,
+ 
+ 	mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
+ 	mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
++
++	mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
+ }
+ 
+ static void
+@@ -208,6 +210,8 @@ centre_vertically(struct drm_display_mode *mode,
+ 
+ 	mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
+ 	mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
++
++	mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
+ }
+ 
+ static inline u32 panel_fitter_scaling(u32 source, u32 target)
+@@ -283,6 +287,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 	for_each_pipe(pipe)
+ 		I915_WRITE(BCLRPAT(pipe), 0);
+ 
++	drm_mode_set_crtcinfo(adjusted_mode, 0);
++
+ 	switch (intel_lvds->fitting_mode) {
+ 	case DRM_MODE_SCALE_CENTER:
+ 		/*
+@@ -535,7 +541,6 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
+ 
+ 	mutex_lock(&dev->mode_config.mutex);
+ 	drm_helper_resume_force_mode(dev);
+-	i915_redisable_vga(dev);
+ 	mutex_unlock(&dev->mode_config.mutex);
+ 
+ 	return NOTIFY_OK;
+@@ -774,14 +779,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ 	},
+ 	{
+ 		.callback = intel_no_lvds_dmi_callback,
+-		.ident = "ZOTAC ZBOXSD-ID12/ID13",
+-		.matches = {
+-			DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
+-			DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
+-		},
+-	},
+-	{
+-		.callback = intel_no_lvds_dmi_callback,
+ 		.ident = "Gigabyte GA-D525TUD",
+ 		.matches = {
+ 			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+@@ -901,6 +898,18 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
+ 	return false;
+ }
+ 
++static bool intel_lvds_supported(struct drm_device *dev)
++{
++	/* With the introduction of the PCH we gained a dedicated
++	 * LVDS presence pin, use it. */
++	if (HAS_PCH_SPLIT(dev))
++		return true;
++
++	/* Otherwise LVDS was only attached to mobile products,
++	 * except for the inglorious 830gm */
++	return IS_MOBILE(dev) && !IS_I830(dev);
++}
++
+ /**
+  * intel_lvds_init - setup LVDS connectors on this device
+  * @dev: drm device
+@@ -922,6 +931,9 @@ bool intel_lvds_init(struct drm_device *dev)
+ 	int pipe;
+ 	u8 pin;
+ 
++	if (!intel_lvds_supported(dev))
++		return false;
++
+ 	/* Skip init on machines we know falsely report LVDS */
+ 	if (dmi_check_system(intel_no_lvds))
+ 		return false;
+diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
+index be2c6fe..9a2b270 100644
+--- a/drivers/gpu/drm/i915/intel_modes.c
++++ b/drivers/gpu/drm/i915/intel_modes.c
+@@ -28,6 +28,7 @@
+ #include <linux/fb.h>
+ #include <drm/drm_edid.h>
+ #include "drmP.h"
++#include "drm_edid.h"
+ #include "intel_drv.h"
+ #include "i915_drv.h"
+ 
+@@ -42,13 +43,13 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
+ 	u8 buf[2];
+ 	struct i2c_msg msgs[] = {
+ 		{
+-			.addr = 0x50,
++			.addr = DDC_ADDR,
+ 			.flags = 0,
+ 			.len = 1,
+ 			.buf = out_buf,
+ 		},
+ 		{
+-			.addr = 0x50,
++			.addr = DDC_ADDR,
+ 			.flags = I2C_M_RD,
+ 			.len = 1,
+ 			.buf = buf,
+@@ -59,6 +60,25 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
+ }
+ 
+ /**
++ * intel_connector_update_modes - update connector from edid
++ * @connector: DRM connector device to use
++ * @edid: previously read EDID information
++ */
++int intel_connector_update_modes(struct drm_connector *connector,
++				struct edid *edid)
++{
++	int ret;
++
++	drm_mode_connector_update_edid_property(connector, edid);
++	ret = drm_add_edid_modes(connector, edid);
++	drm_edid_to_eld(connector, edid);
++	connector->display_info.raw_edid = NULL;
++	kfree(edid);
++
++	return ret;
++}
++
++/**
+  * intel_ddc_get_modes - get modelist from monitor
+  * @connector: DRM connector device to use
+  * @adapter: i2c adapter
+@@ -69,24 +89,19 @@ int intel_ddc_get_modes(struct drm_connector *connector,
+ 			struct i2c_adapter *adapter)
+ {
+ 	struct edid *edid;
+-	int ret = 0;
+ 
+ 	edid = drm_get_edid(connector, adapter);
+-	if (edid) {
+-		drm_mode_connector_update_edid_property(connector, edid);
+-		ret = drm_add_edid_modes(connector, edid);
+-		drm_edid_to_eld(connector, edid);
+-		connector->display_info.raw_edid = NULL;
+-		kfree(edid);
+-	}
++	if (!edid)
++		return 0;
+ 
+-	return ret;
++	return intel_connector_update_modes(connector, edid);
+ }
+ 
+-static const char *force_audio_names[] = {
+-	"off",
+-	"auto",
+-	"on",
++static const struct drm_prop_enum_list force_audio_names[] = {
++	{ HDMI_AUDIO_OFF_DVI, "force-dvi" },
++	{ HDMI_AUDIO_OFF, "off" },
++	{ HDMI_AUDIO_AUTO, "auto" },
++	{ HDMI_AUDIO_ON, "on" },
+ };
+ 
+ void
+@@ -95,27 +110,24 @@ intel_attach_force_audio_property(struct drm_connector *connector)
+ 	struct drm_device *dev = connector->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_property *prop;
+-	int i;
+ 
+ 	prop = dev_priv->force_audio_property;
+ 	if (prop == NULL) {
+-		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
++		prop = drm_property_create_enum(dev, 0,
+ 					   "audio",
++					   force_audio_names,
+ 					   ARRAY_SIZE(force_audio_names));
+ 		if (prop == NULL)
+ 			return;
+ 
+-		for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
+-			drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
+-
+ 		dev_priv->force_audio_property = prop;
+ 	}
+ 	drm_connector_attach_property(connector, prop, 0);
+ }
+ 
+-static const char *broadcast_rgb_names[] = {
+-	"Full",
+-	"Limited 16:235",
++static const struct drm_prop_enum_list broadcast_rgb_names[] = {
++	{ 0, "Full" },
++	{ 1, "Limited 16:235" },
+ };
+ 
+ void
+@@ -124,19 +136,16 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+ 	struct drm_device *dev = connector->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_property *prop;
+-	int i;
+ 
+ 	prop = dev_priv->broadcast_rgb_property;
+ 	if (prop == NULL) {
+-		prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
++		prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ 					   "Broadcast RGB",
++					   broadcast_rgb_names,
+ 					   ARRAY_SIZE(broadcast_rgb_names));
+ 		if (prop == NULL)
+ 			return;
+ 
+-		for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+-			drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+-
+ 		dev_priv->broadcast_rgb_property = prop;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
+index 478b51f..5ba5e66 100644
+--- a/drivers/gpu/drm/i915/intel_overlay.c
++++ b/drivers/gpu/drm/i915/intel_overlay.c
+@@ -25,8 +25,6 @@
+  *
+  * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+  */
+-
+-#include <linux/seq_file.h>
+ #include "drmP.h"
+ #include "drm.h"
+ #include "i915_drm.h"
+@@ -227,7 +225,8 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+ 	}
+ 	overlay->last_flip_req = request->seqno;
+ 	overlay->flip_tail = tail;
+-	ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
++	ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
++				true);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -263,7 +262,7 @@ i830_activate_pipe_a(struct drm_device *dev)
+ 	DRM_DEBUG_DRIVER("Enabling pipe A in order to enable overlay\n");
+ 
+ 	mode = drm_mode_duplicate(dev, &vesa_640x480);
+-	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
++	drm_mode_set_crtcinfo(mode, 0);
+ 	if (!drm_crtc_helper_set_mode(&crtc->base, mode,
+ 				       crtc->base.x, crtc->base.y,
+ 				       crtc->base.fb))
+@@ -456,7 +455,8 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
+ 	if (overlay->last_flip_req == 0)
+ 		return 0;
+ 
+-	ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
++	ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
++				true);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -943,10 +943,10 @@ static int check_overlay_dst(struct intel_overlay *overlay,
+ {
+ 	struct drm_display_mode *mode = &overlay->crtc->base.mode;
+ 
+-	if (rec->dst_x < mode->crtc_hdisplay &&
+-	    rec->dst_x + rec->dst_width <= mode->crtc_hdisplay &&
+-	    rec->dst_y < mode->crtc_vdisplay &&
+-	    rec->dst_y + rec->dst_height <= mode->crtc_vdisplay)
++	if (rec->dst_x < mode->hdisplay &&
++	    rec->dst_x + rec->dst_width <= mode->hdisplay &&
++	    rec->dst_y < mode->vdisplay &&
++	    rec->dst_y + rec->dst_height <= mode->vdisplay)
+ 		return 0;
+ 	else
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index 04d79fd..48177ec 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -47,8 +47,6 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
+ 	adjusted_mode->vtotal = fixed_mode->vtotal;
+ 
+ 	adjusted_mode->clock = fixed_mode->clock;
+-
+-	drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ }
+ 
+ /* adjusted_mode has been preset to be the panel's fixed mode */
+@@ -141,8 +139,8 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+ 			dev_priv->saveBLC_PWM_CTL2 = val;
+ 		} else if (val == 0) {
+ 			I915_WRITE(BLC_PWM_PCH_CTL2,
+-				   dev_priv->saveBLC_PWM_CTL);
+-			val = dev_priv->saveBLC_PWM_CTL;
++				   dev_priv->saveBLC_PWM_CTL2);
++			val = dev_priv->saveBLC_PWM_CTL2;
+ 		}
+ 	} else {
+ 		val = I915_READ(BLC_PWM_CTL);
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 19085c0..c17325c 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -263,6 +263,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ 	I915_WRITE_HEAD(ring, 0);
+ 	ring->write_tail(ring, 0);
+ 
++	/* Initialize the ring. */
++	I915_WRITE_START(ring, obj->gtt_offset);
+ 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ 
+ 	/* G45 ring initialization fails to reset head to zero */
+@@ -288,11 +290,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ 		}
+ 	}
+ 
+-	/* Initialize the ring. This must happen _after_ we've cleared the ring
+-	 * registers with the above sequence (the readback of the HEAD registers
+-	 * also enforces ordering), otherwise the hw might lose the new ring
+-	 * register values. */
+-	I915_WRITE_START(ring, obj->gtt_offset);
+ 	I915_WRITE_CTL(ring,
+ 			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
+ 			| RING_VALID);
+@@ -318,6 +315,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
+ 		ring->head = I915_READ_HEAD(ring);
+ 		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ 		ring->space = ring_space(ring);
++		ring->last_retired_head = -1;
+ 	}
+ 
+ out:
+@@ -398,8 +396,6 @@ static int init_render_ring(struct intel_ring_buffer *ring)
+ 
+ 	if (INTEL_INFO(dev)->gen > 3) {
+ 		int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+-		if (IS_GEN6(dev) || IS_GEN7(dev))
+-			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
+ 		I915_WRITE(MI_MODE, mode);
+ 		if (IS_GEN7(dev))
+ 			I915_WRITE(GFX_MODE_GEN7,
+@@ -609,6 +605,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
+ 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ 	scratch_addr += 128;
+ 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
++
+ 	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ 			PIPE_CONTROL_WRITE_FLUSH |
+ 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+@@ -651,7 +648,7 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+ 	/* Workaround to force correct ordering between irq and seqno writes on
+ 	 * ivb (and maybe also on snb) by reading from a CS register (like
+ 	 * ACTHD) before reading the status page. */
+-	if (IS_GEN7(dev))
++	if (IS_GEN6(dev) || IS_GEN7(dev))
+ 		intel_ring_get_active_head(ring);
+ 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ }
+@@ -752,13 +749,13 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+ 	 */
+ 	if (IS_GEN7(dev)) {
+ 		switch (ring->id) {
+-		case RING_RENDER:
++		case RCS:
+ 			mmio = RENDER_HWS_PGA_GEN7;
+ 			break;
+-		case RING_BLT:
++		case BCS:
+ 			mmio = BLT_HWS_PGA_GEN7;
+ 			break;
+-		case RING_BSD:
++		case VCS:
+ 			mmio = BSD_HWS_PGA_GEN7;
+ 			break;
+ 		}
+@@ -824,8 +821,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+ 	/* It looks like we need to prevent the gt from suspending while waiting
+ 	 * for an notifiy irq, otherwise irqs seem to get lost on at least the
+ 	 * blt/bsd rings on ivb. */
+-	if (IS_GEN7(dev))
+-		gen6_gt_force_wake_get(dev_priv);
++	gen6_gt_force_wake_get(dev_priv);
+ 
+ 	spin_lock(&ring->irq_lock);
+ 	if (ring->irq_refcount++ == 0) {
+@@ -852,8 +848,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+ 	}
+ 	spin_unlock(&ring->irq_lock);
+ 
+-	if (IS_GEN7(dev))
+-		gen6_gt_force_wake_put(dev_priv);
++	gen6_gt_force_wake_put(dev_priv);
+ }
+ 
+ static bool
+@@ -1139,14 +1134,105 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
+ 	return 0;
+ }
+ 
++static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
++{
++	struct drm_i915_private *dev_priv = ring->dev->dev_private;
++	bool was_interruptible;
++	int ret;
++
++	/* XXX As we have not yet audited all the paths to check that
++	 * they are ready for ERESTARTSYS from intel_ring_begin, do not
++	 * allow us to be interruptible by a signal.
++	 */
++	was_interruptible = dev_priv->mm.interruptible;
++	dev_priv->mm.interruptible = false;
++
++	ret = i915_wait_request(ring, seqno, true);
++
++	dev_priv->mm.interruptible = was_interruptible;
++
++	return ret;
++}
++
++static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
++{
++	struct drm_i915_gem_request *request;
++	u32 seqno = 0;
++	int ret;
++
++	i915_gem_retire_requests_ring(ring);
++
++	if (ring->last_retired_head != -1) {
++		ring->head = ring->last_retired_head;
++		ring->last_retired_head = -1;
++		ring->space = ring_space(ring);
++		if (ring->space >= n)
++			return 0;
++	}
++
++	list_for_each_entry(request, &ring->request_list, list) {
++		int space;
++
++		if (request->tail == -1)
++			continue;
++
++		space = request->tail - (ring->tail + 8);
++		if (space < 0)
++			space += ring->size;
++		if (space >= n) {
++			seqno = request->seqno;
++			break;
++		}
++
++		/* Consume this request in case we need more space than
++		 * is available and so need to prevent a race between
++		 * updating last_retired_head and direct reads of
++		 * I915_RING_HEAD. It also provides a nice sanity check.
++		 */
++		request->tail = -1;
++	}
++
++	if (seqno == 0)
++		return -ENOSPC;
++
++	ret = intel_ring_wait_seqno(ring, seqno);
++	if (ret)
++		return ret;
++
++	if (WARN_ON(ring->last_retired_head == -1))
++		return -ENOSPC;
++
++	ring->head = ring->last_retired_head;
++	ring->last_retired_head = -1;
++	ring->space = ring_space(ring);
++	if (WARN_ON(ring->space < n))
++		return -ENOSPC;
++
++	return 0;
++}
++
+ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
+ {
+ 	struct drm_device *dev = ring->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	unsigned long end;
++	int ret;
++
++	ret = intel_ring_wait_request(ring, n);
++	if (ret != -ENOSPC)
++		return ret;
+ 
+ 	trace_i915_ring_wait_begin(ring);
+-	end = jiffies + 3 * HZ;
++	if (drm_core_check_feature(dev, DRIVER_GEM))
++		/* With GEM the hangcheck timer should kick us out of the loop,
++		 * leaving it early runs the risk of corrupting GEM state (due
++		 * to running on almost untested codepaths). But on resume
++		 * timers don't work yet, so prevent a complete hang in that
++		 * case by choosing an insanely large timeout. */
++		end = jiffies + 60 * HZ;
++	else
++		end = jiffies + 3 * HZ;
++
+ 	do {
+ 		ring->head = I915_READ_HEAD(ring);
+ 		ring->space = ring_space(ring);
+@@ -1203,7 +1289,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
+ 
+ static const struct intel_ring_buffer render_ring = {
+ 	.name			= "render ring",
+-	.id			= RING_RENDER,
++	.id			= RCS,
+ 	.mmio_base		= RENDER_RING_BASE,
+ 	.size			= 32 * PAGE_SIZE,
+ 	.init			= init_render_ring,
+@@ -1226,7 +1312,7 @@ static const struct intel_ring_buffer render_ring = {
+ 
+ static const struct intel_ring_buffer bsd_ring = {
+ 	.name                   = "bsd ring",
+-	.id			= RING_BSD,
++	.id			= VCS,
+ 	.mmio_base		= BSD_RING_BASE,
+ 	.size			= 32 * PAGE_SIZE,
+ 	.init			= init_ring_common,
+@@ -1336,7 +1422,7 @@ gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
+ /* ring buffer for Video Codec for Gen6+ */
+ static const struct intel_ring_buffer gen6_bsd_ring = {
+ 	.name			= "gen6 bsd ring",
+-	.id			= RING_BSD,
++	.id			= VCS,
+ 	.mmio_base		= GEN6_BSD_RING_BASE,
+ 	.size			= 32 * PAGE_SIZE,
+ 	.init			= init_ring_common,
+@@ -1372,79 +1458,13 @@ blt_ring_put_irq(struct intel_ring_buffer *ring)
+ 			  GEN6_BLITTER_USER_INTERRUPT);
+ }
+ 
+-
+-/* Workaround for some stepping of SNB,
+- * each time when BLT engine ring tail moved,
+- * the first command in the ring to be parsed
+- * should be MI_BATCH_BUFFER_START
+- */
+-#define NEED_BLT_WORKAROUND(dev) \
+-	(IS_GEN6(dev) && (dev->pdev->revision < 8))
+-
+-static inline struct drm_i915_gem_object *
+-to_blt_workaround(struct intel_ring_buffer *ring)
+-{
+-	return ring->private;
+-}
+-
+-static int blt_ring_init(struct intel_ring_buffer *ring)
+-{
+-	if (NEED_BLT_WORKAROUND(ring->dev)) {
+-		struct drm_i915_gem_object *obj;
+-		u32 *ptr;
+-		int ret;
+-
+-		obj = i915_gem_alloc_object(ring->dev, 4096);
+-		if (obj == NULL)
+-			return -ENOMEM;
+-
+-		ret = i915_gem_object_pin(obj, 4096, true);
+-		if (ret) {
+-			drm_gem_object_unreference(&obj->base);
+-			return ret;
+-		}
+-
+-		ptr = kmap(obj->pages[0]);
+-		*ptr++ = MI_BATCH_BUFFER_END;
+-		*ptr++ = MI_NOOP;
+-		kunmap(obj->pages[0]);
+-
+-		ret = i915_gem_object_set_to_gtt_domain(obj, false);
+-		if (ret) {
+-			i915_gem_object_unpin(obj);
+-			drm_gem_object_unreference(&obj->base);
+-			return ret;
+-		}
+-
+-		ring->private = obj;
+-	}
+-
+-	return init_ring_common(ring);
+-}
+-
+-static int blt_ring_begin(struct intel_ring_buffer *ring,
+-			  int num_dwords)
+-{
+-	if (ring->private) {
+-		int ret = intel_ring_begin(ring, num_dwords+2);
+-		if (ret)
+-			return ret;
+-
+-		intel_ring_emit(ring, MI_BATCH_BUFFER_START);
+-		intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+-
+-		return 0;
+-	} else
+-		return intel_ring_begin(ring, 4);
+-}
+-
+ static int blt_ring_flush(struct intel_ring_buffer *ring,
+ 			  u32 invalidate, u32 flush)
+ {
+ 	uint32_t cmd;
+ 	int ret;
+ 
+-	ret = blt_ring_begin(ring, 4);
++	ret = intel_ring_begin(ring, 4);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1459,22 +1479,12 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
+ 	return 0;
+ }
+ 
+-static void blt_ring_cleanup(struct intel_ring_buffer *ring)
+-{
+-	if (!ring->private)
+-		return;
+-
+-	i915_gem_object_unpin(ring->private);
+-	drm_gem_object_unreference(ring->private);
+-	ring->private = NULL;
+-}
+-
+ static const struct intel_ring_buffer gen6_blt_ring = {
+ 	.name			= "blt ring",
+-	.id			= RING_BLT,
++	.id			= BCS,
+ 	.mmio_base		= BLT_RING_BASE,
+ 	.size			= 32 * PAGE_SIZE,
+-	.init			= blt_ring_init,
++	.init			= init_ring_common,
+ 	.write_tail		= ring_write_tail,
+ 	.flush			= blt_ring_flush,
+ 	.add_request		= gen6_add_request,
+@@ -1482,7 +1492,6 @@ static const struct intel_ring_buffer gen6_blt_ring = {
+ 	.irq_get		= blt_ring_get_irq,
+ 	.irq_put		= blt_ring_put_irq,
+ 	.dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
+-	.cleanup		= blt_ring_cleanup,
+ 	.sync_to		= gen6_blt_ring_sync_to,
+ 	.semaphore_register	= {MI_SEMAPHORE_SYNC_BR,
+ 				   MI_SEMAPHORE_SYNC_BV,
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
+index 68281c9..bc0365b 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
+@@ -1,13 +1,6 @@
+ #ifndef _INTEL_RINGBUFFER_H_
+ #define _INTEL_RINGBUFFER_H_
+ 
+-enum {
+-	RCS = 0x0,
+-	VCS,
+-	BCS,
+-	I915_NUM_RINGS,
+-};
+-
+ struct  intel_hw_status_page {
+ 	u32	__iomem	*page_addr;
+ 	unsigned int	gfx_addr;
+@@ -36,10 +29,11 @@ struct  intel_hw_status_page {
+ struct  intel_ring_buffer {
+ 	const char	*name;
+ 	enum intel_ring_id {
+-		RING_RENDER = 0x1,
+-		RING_BSD = 0x2,
+-		RING_BLT = 0x4,
++		RCS = 0x0,
++		VCS,
++		BCS,
+ 	} id;
++#define I915_NUM_RINGS 3
+ 	u32		mmio_base;
+ 	void		__iomem *virtual_start;
+ 	struct		drm_device *dev;
+@@ -52,6 +46,16 @@ struct  intel_ring_buffer {
+ 	int		effective_size;
+ 	struct intel_hw_status_page status_page;
+ 
++	/** We track the position of the requests in the ring buffer, and
++	 * when each is retired we increment last_retired_head as the GPU
++	 * must have finished processing the request and so we know we
++	 * can advance the ringbuffer up to that position.
++	 *
++	 * last_retired_head is set to -1 after the value is consumed so
++	 * we can detect new retirements.
++	 */
++	u32		last_retired_head;
++
+ 	spinlock_t	irq_lock;
+ 	u32		irq_refcount;
+ 	u32		irq_mask;
+@@ -119,6 +123,12 @@ struct  intel_ring_buffer {
+ 	void *private;
+ };
+ 
++static inline unsigned
++intel_ring_flag(struct intel_ring_buffer *ring)
++{
++	return 1 << ring->id;
++}
++
+ static inline u32
+ intel_ring_sync_index(struct intel_ring_buffer *ring,
+ 		      struct intel_ring_buffer *other)
+@@ -193,6 +203,11 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
+ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+ void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+ 
++static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
++{
++	return ring->tail;
++}
++
+ static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+ {
+ 	if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index 9e24670..1b6b157 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -148,7 +148,7 @@ struct intel_sdvo_connector {
+ 	/* Mark the type of connector */
+ 	uint16_t output_flag;
+ 
+-	int force_audio;
++	enum hdmi_force_audio force_audio;
+ 
+ 	/* This contains all current supported TV format */
+ 	u8 tv_format_supported[TV_FORMAT_NUM];
+@@ -982,7 +982,6 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
+ 
+ 	intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
+ 
+-	drm_mode_set_crtcinfo(adjusted_mode, 0);
+ 	return true;
+ }
+ 
+@@ -1351,8 +1350,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
+ 
+ 	if (status == connector_status_connected) {
+ 		struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+-		if (intel_sdvo_connector->force_audio)
+-			intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
++		if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
++			intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
+ 	}
+ 
+ 	return status;
+@@ -1725,10 +1724,10 @@ intel_sdvo_set_property(struct drm_connector *connector,
+ 
+ 		intel_sdvo_connector->force_audio = i;
+ 
+-		if (i == 0)
++		if (i == HDMI_AUDIO_AUTO)
+ 			has_audio = intel_sdvo_detect_hdmi_audio(connector);
+ 		else
+-			has_audio = i > 0;
++			has_audio = (i == HDMI_AUDIO_ON);
+ 
+ 		if (has_audio == intel_sdvo->has_hdmi_audio)
+ 			return 0;
+@@ -2026,7 +2025,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
+ 	drm_connector_helper_add(&connector->base.base,
+ 				 &intel_sdvo_connector_helper_funcs);
+ 
+-	connector->base.base.interlace_allowed = 0;
++	connector->base.base.interlace_allowed = 1;
+ 	connector->base.base.doublescan_allowed = 0;
+ 	connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ 
+@@ -2264,18 +2263,6 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
+ 	return true;
+ }
+ 
+-static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
+-{
+-	struct drm_device *dev = intel_sdvo->base.base.dev;
+-	struct drm_connector *connector, *tmp;
+-
+-	list_for_each_entry_safe(connector, tmp,
+-				 &dev->mode_config.connector_list, head) {
+-		if (intel_attached_encoder(connector) == &intel_sdvo->base)
+-			intel_sdvo_destroy(connector);
+-	}
+-}
+-
+ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ 					  struct intel_sdvo_connector *intel_sdvo_connector,
+ 					  int type)
+@@ -2330,10 +2317,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
+ 		intel_sdvo_connector->max_##name = data_value[0]; \
+ 		intel_sdvo_connector->cur_##name = response; \
+ 		intel_sdvo_connector->name = \
+-			drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
++			drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
+ 		if (!intel_sdvo_connector->name) return false; \
+-		intel_sdvo_connector->name->values[0] = 0; \
+-		intel_sdvo_connector->name->values[1] = data_value[0]; \
+ 		drm_connector_attach_property(connector, \
+ 					      intel_sdvo_connector->name, \
+ 					      intel_sdvo_connector->cur_##name); \
+@@ -2367,25 +2352,19 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ 		intel_sdvo_connector->left_margin = data_value[0] - response;
+ 		intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
+ 		intel_sdvo_connector->left =
+-			drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-					    "left_margin", 2);
++			drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
+ 		if (!intel_sdvo_connector->left)
+ 			return false;
+ 
+-		intel_sdvo_connector->left->values[0] = 0;
+-		intel_sdvo_connector->left->values[1] = data_value[0];
+ 		drm_connector_attach_property(connector,
+ 					      intel_sdvo_connector->left,
+ 					      intel_sdvo_connector->left_margin);
+ 
+ 		intel_sdvo_connector->right =
+-			drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-					    "right_margin", 2);
++			drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
+ 		if (!intel_sdvo_connector->right)
+ 			return false;
+ 
+-		intel_sdvo_connector->right->values[0] = 0;
+-		intel_sdvo_connector->right->values[1] = data_value[0];
+ 		drm_connector_attach_property(connector,
+ 					      intel_sdvo_connector->right,
+ 					      intel_sdvo_connector->right_margin);
+@@ -2409,25 +2388,21 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ 		intel_sdvo_connector->top_margin = data_value[0] - response;
+ 		intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
+ 		intel_sdvo_connector->top =
+-			drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-					    "top_margin", 2);
++			drm_property_create_range(dev, 0,
++					    "top_margin", 0, data_value[0]);
+ 		if (!intel_sdvo_connector->top)
+ 			return false;
+ 
+-		intel_sdvo_connector->top->values[0] = 0;
+-		intel_sdvo_connector->top->values[1] = data_value[0];
+ 		drm_connector_attach_property(connector,
+ 					      intel_sdvo_connector->top,
+ 					      intel_sdvo_connector->top_margin);
+ 
+ 		intel_sdvo_connector->bottom =
+-			drm_property_create(dev, DRM_MODE_PROP_RANGE,
+-					    "bottom_margin", 2);
++			drm_property_create_range(dev, 0,
++					    "bottom_margin", 0, data_value[0]);
+ 		if (!intel_sdvo_connector->bottom)
+ 			return false;
+ 
+-		intel_sdvo_connector->bottom->values[0] = 0;
+-		intel_sdvo_connector->bottom->values[1] = data_value[0];
+ 		drm_connector_attach_property(connector,
+ 					      intel_sdvo_connector->bottom,
+ 					      intel_sdvo_connector->bottom_margin);
+@@ -2456,12 +2431,10 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
+ 		intel_sdvo_connector->max_dot_crawl = 1;
+ 		intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ 		intel_sdvo_connector->dot_crawl =
+-			drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
++			drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
+ 		if (!intel_sdvo_connector->dot_crawl)
+ 			return false;
+ 
+-		intel_sdvo_connector->dot_crawl->values[0] = 0;
+-		intel_sdvo_connector->dot_crawl->values[1] = 1;
+ 		drm_connector_attach_property(connector,
+ 					      intel_sdvo_connector->dot_crawl,
+ 					      intel_sdvo_connector->cur_dot_crawl);
+@@ -2608,8 +2581,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ 				    intel_sdvo->caps.output_flags) != true) {
+ 		DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+ 			      IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+-		/* Output_setup can leave behind connectors! */
+-		goto err_output;
++		goto err;
+ 	}
+ 
+ 	/* Only enable the hotplug irq if we need it, to work around noisy
+@@ -2622,12 +2594,12 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ 
+ 	/* Set the input timing to the screen. Assume always input 0. */
+ 	if (!intel_sdvo_set_target_input(intel_sdvo))
+-		goto err_output;
++		goto err;
+ 
+ 	if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
+ 						    &intel_sdvo->pixel_clock_min,
+ 						    &intel_sdvo->pixel_clock_max))
+-		goto err_output;
++		goto err;
+ 
+ 	DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ 			"clock range %dMHz - %dMHz, "
+@@ -2647,9 +2619,6 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+ 			(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ 	return true;
+ 
+-err_output:
+-	intel_sdvo_output_cleanup(intel_sdvo);
+-
+ err:
+ 	drm_encoder_cleanup(&intel_encoder->base);
+ 	i2c_del_adapter(&intel_sdvo->ddc);
+diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
+index 4193c54..770bdd6 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
++++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright © 2006-2007 Intel Corporation
++ * Copyright © 2006-2007 Intel Corporation
+  *
+  * Permission is hereby granted, free of charge, to any person obtaining a
+  * copy of this software and associated documentation files (the "Software"),
+diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
+new file mode 100644
+index 0000000..e90dfb6
+--- /dev/null
++++ b/drivers/gpu/drm/i915/intel_sprite.c
+@@ -0,0 +1,666 @@
++/*
++ * Copyright © 2011 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * Authors:
++ *   Jesse Barnes <jbarnes at virtuousgeek.org>
++ *
++ * New plane/sprite handling.
++ *
++ * The older chips had a separate interface for programming plane related
++ * registers; newer ones are much simpler and we can use the new DRM plane
++ * support.
++ */
++#include "drmP.h"
++#include "drm_crtc.h"
++#include "drm_fourcc.h"
++#include "intel_drv.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++
++static void
++ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
++		 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
++		 unsigned int crtc_w, unsigned int crtc_h,
++		 uint32_t x, uint32_t y,
++		 uint32_t src_w, uint32_t src_h)
++{
++	struct drm_device *dev = plane->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_plane *intel_plane = to_intel_plane(plane);
++	int pipe = intel_plane->pipe;
++	u32 sprctl, sprscale = 0;
++	int pixel_size;
++
++	sprctl = I915_READ(SPRCTL(pipe));
++
++	/* Mask out pixel format bits in case we change it */
++	sprctl &= ~SPRITE_PIXFORMAT_MASK;
++	sprctl &= ~SPRITE_RGB_ORDER_RGBX;
++	sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
++
++	switch (fb->pixel_format) {
++	case DRM_FORMAT_XBGR8888:
++		sprctl |= SPRITE_FORMAT_RGBX888;
++		pixel_size = 4;
++		break;
++	case DRM_FORMAT_XRGB8888:
++		sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
++		pixel_size = 4;
++		break;
++	case DRM_FORMAT_YUYV:
++		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
++		pixel_size = 2;
++		break;
++	case DRM_FORMAT_YVYU:
++		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
++		pixel_size = 2;
++		break;
++	case DRM_FORMAT_UYVY:
++		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
++		pixel_size = 2;
++		break;
++	case DRM_FORMAT_VYUY:
++		sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
++		pixel_size = 2;
++		break;
++	default:
++		DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
++		sprctl |= DVS_FORMAT_RGBX888;
++		pixel_size = 4;
++		break;
++	}
++
++	if (obj->tiling_mode != I915_TILING_NONE)
++		sprctl |= SPRITE_TILED;
++
++	/* must disable */
++	sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
++	sprctl |= SPRITE_ENABLE;
++
++	/* Sizes are 0 based */
++	src_w--;
++	src_h--;
++	crtc_w--;
++	crtc_h--;
++
++	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
++
++	/*
++	 * IVB workaround: must disable low power watermarks for at least
++	 * one frame before enabling scaling.  LP watermarks can be re-enabled
++	 * when scaling is disabled.
++	 */
++	if (crtc_w != src_w || crtc_h != src_h) {
++		dev_priv->sprite_scaling_enabled = true;
++		sandybridge_update_wm(dev);
++		intel_wait_for_vblank(dev, pipe);
++		sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
++	} else {
++		dev_priv->sprite_scaling_enabled = false;
++		/* potentially re-enable LP watermarks */
++		sandybridge_update_wm(dev);
++	}
++
++	I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
++	I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
++	if (obj->tiling_mode != I915_TILING_NONE) {
++		I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
++	} else {
++		unsigned long offset;
++
++		offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
++		I915_WRITE(SPRLINOFF(pipe), offset);
++	}
++	I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
++	I915_WRITE(SPRSCALE(pipe), sprscale);
++	I915_WRITE(SPRCTL(pipe), sprctl);
++	I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
++	POSTING_READ(SPRSURF(pipe));
++}
++
++static void
++ivb_disable_plane(struct drm_plane *plane)
++{
++	struct drm_device *dev = plane->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_plane *intel_plane = to_intel_plane(plane);
++	int pipe = intel_plane->pipe;
++
++	I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
++	/* Can't leave the scaler enabled... */
++	I915_WRITE(SPRSCALE(pipe), 0);
++	/* Activate double buffered register update */
++	I915_WRITE(SPRSURF(pipe), 0);
++	POSTING_READ(SPRSURF(pipe));
++}
++
++static int
++ivb_update_colorkey(struct drm_plane *plane,
++		    struct drm_intel_sprite_colorkey *key)
++{
++	struct drm_device *dev = plane->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_plane *intel_plane;
++	u32 sprctl;
++	int ret = 0;
++
++	intel_plane = to_intel_plane(plane);
++
++	I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
++	I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
++	I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
++
++	sprctl = I915_READ(SPRCTL(intel_plane->pipe));
++	sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
++	if (key->flags & I915_SET_COLORKEY_DESTINATION)
++		sprctl |= SPRITE_DEST_KEY;
++	else if (key->flags & I915_SET_COLORKEY_SOURCE)
++		sprctl |= SPRITE_SOURCE_KEY;
++	I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
++
++	POSTING_READ(SPRKEYMSK(intel_plane->pipe));
++
++	return ret;
++}
++
++static void
++ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
++{
++	struct drm_device *dev = plane->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_plane *intel_plane;
++	u32 sprctl;
++
++	intel_plane = to_intel_plane(plane);
++
++	key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
++	key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
++	key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
++	key->flags = 0;
++
++	sprctl = I915_READ(SPRCTL(intel_plane->pipe));
++
++	if (sprctl & SPRITE_DEST_KEY)
++		key->flags = I915_SET_COLORKEY_DESTINATION;
++	else if (sprctl & SPRITE_SOURCE_KEY)
++		key->flags = I915_SET_COLORKEY_SOURCE;
++	else
++		key->flags = I915_SET_COLORKEY_NONE;
++}
++
++static void
++snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
++		 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
++		 unsigned int crtc_w, unsigned int crtc_h,
++		 uint32_t x, uint32_t y,
++		 uint32_t src_w, uint32_t src_h)
++{
++	struct drm_device *dev = plane->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_plane *intel_plane = to_intel_plane(plane);
++	int pipe = intel_plane->pipe, pixel_size;
++	u32 dvscntr, dvsscale = 0;
++
++	dvscntr = I915_READ(DVSCNTR(pipe));
++
++	/* Mask out pixel format bits in case we change it */
++	dvscntr &= ~DVS_PIXFORMAT_MASK;
++	dvscntr &= ~DVS_RGB_ORDER_XBGR;
++	dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
++
++	switch (fb->pixel_format) {
++	case DRM_FORMAT_XBGR8888:
++		dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
++		pixel_size = 4;
++		break;
++	case DRM_FORMAT_XRGB8888:
++		dvscntr |= DVS_FORMAT_RGBX888;
++		pixel_size = 4;
++		break;
++	case DRM_FORMAT_YUYV:
++		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
++		pixel_size = 2;
++		break;
++	case DRM_FORMAT_YVYU:
++		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
++		pixel_size = 2;
++		break;
++	case DRM_FORMAT_UYVY:
++		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
++		pixel_size = 2;
++		break;
++	case DRM_FORMAT_VYUY:
++		dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
++		pixel_size = 2;
++		break;
++	default:
++		DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
++		dvscntr |= DVS_FORMAT_RGBX888;
++		pixel_size = 4;
++		break;
++	}
++
++	if (obj->tiling_mode != I915_TILING_NONE)
++		dvscntr |= DVS_TILED;
++
++	/* must disable */
++	dvscntr |= DVS_TRICKLE_FEED_DISABLE;
++	dvscntr |= DVS_ENABLE;
++
++	/* Sizes are 0 based */
++	src_w--;
++	src_h--;
++	crtc_w--;
++	crtc_h--;
++
++	intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
++
++	if (crtc_w != src_w || crtc_h != src_h)
++		dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
++
++	I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
++	I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
++	if (obj->tiling_mode != I915_TILING_NONE) {
++		I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
++	} else {
++		unsigned long offset;
++
++		offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
++		I915_WRITE(DVSLINOFF(pipe), offset);
++	}
++	I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
++	I915_WRITE(DVSSCALE(pipe), dvsscale);
++	I915_WRITE(DVSCNTR(pipe), dvscntr);
++	I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
++	POSTING_READ(DVSSURF(pipe));
++}
++
++static void
++snb_disable_plane(struct drm_plane *plane)
++{
++	struct drm_device *dev = plane->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_plane *intel_plane = to_intel_plane(plane);
++	int pipe = intel_plane->pipe;
++
++	I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
++	/* Disable the scaler */
++	I915_WRITE(DVSSCALE(pipe), 0);
++	/* Flush double buffered register updates */
++	I915_WRITE(DVSSURF(pipe), 0);
++	POSTING_READ(DVSSURF(pipe));
++}
++
++static void
++intel_enable_primary(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	int reg = DSPCNTR(intel_crtc->plane);
++
++	I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
++}
++
++static void
++intel_disable_primary(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	int reg = DSPCNTR(intel_crtc->plane);
++
++	I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
++}
++
++static int
++snb_update_colorkey(struct drm_plane *plane,
++		    struct drm_intel_sprite_colorkey *key)
++{
++	struct drm_device *dev = plane->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_plane *intel_plane;
++	u32 dvscntr;
++	int ret = 0;
++
++	intel_plane = to_intel_plane(plane);
++
++	I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
++	I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
++	I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
++
++	dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
++	dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
++	if (key->flags & I915_SET_COLORKEY_DESTINATION)
++		dvscntr |= DVS_DEST_KEY;
++	else if (key->flags & I915_SET_COLORKEY_SOURCE)
++		dvscntr |= DVS_SOURCE_KEY;
++	I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
++
++	POSTING_READ(DVSKEYMSK(intel_plane->pipe));
++
++	return ret;
++}
++
++static void
++snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
++{
++	struct drm_device *dev = plane->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_plane *intel_plane;
++	u32 dvscntr;
++
++	intel_plane = to_intel_plane(plane);
++
++	key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
++	key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
++	key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
++	key->flags = 0;
++
++	dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
++
++	if (dvscntr & DVS_DEST_KEY)
++		key->flags = I915_SET_COLORKEY_DESTINATION;
++	else if (dvscntr & DVS_SOURCE_KEY)
++		key->flags = I915_SET_COLORKEY_SOURCE;
++	else
++		key->flags = I915_SET_COLORKEY_NONE;
++}
++
++static int
++intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
++		   struct drm_framebuffer *fb, int crtc_x, int crtc_y,
++		   unsigned int crtc_w, unsigned int crtc_h,
++		   uint32_t src_x, uint32_t src_y,
++		   uint32_t src_w, uint32_t src_h)
++{
++	struct drm_device *dev = plane->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	struct intel_plane *intel_plane = to_intel_plane(plane);
++	struct intel_framebuffer *intel_fb;
++	struct drm_i915_gem_object *obj, *old_obj;
++	int pipe = intel_plane->pipe;
++	int ret = 0;
++	int x = src_x >> 16, y = src_y >> 16;
++	int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
++	bool disable_primary = false;
++
++	intel_fb = to_intel_framebuffer(fb);
++	obj = intel_fb->obj;
++
++	old_obj = intel_plane->obj;
++
++	src_w = src_w >> 16;
++	src_h = src_h >> 16;
++
++	/* Pipe must be running... */
++	if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
++		return -EINVAL;
++
++	if (crtc_x >= primary_w || crtc_y >= primary_h)
++		return -EINVAL;
++
++	/* Don't modify another pipe's plane */
++	if (intel_plane->pipe != intel_crtc->pipe)
++		return -EINVAL;
++
++	/*
++	 * Clamp the width & height into the visible area.  Note we don't
++	 * try to scale the source if part of the visible region is offscreen.
++	 * The caller must handle that by adjusting source offset and size.
++	 */
++	if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
++		crtc_w += crtc_x;
++		crtc_x = 0;
++	}
++	if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
++		goto out;
++	if ((crtc_x + crtc_w) > primary_w)
++		crtc_w = primary_w - crtc_x;
++
++	if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
++		crtc_h += crtc_y;
++		crtc_y = 0;
++	}
++	if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
++		goto out;
++	if (crtc_y + crtc_h > primary_h)
++		crtc_h = primary_h - crtc_y;
++
++	if (!crtc_w || !crtc_h) /* Again, nothing to display */
++		goto out;
++
++	/*
++	 * We can take a larger source and scale it down, but
++	 * only so much...  16x is the max on SNB.
++	 */
++	if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
++		return -EINVAL;
++
++	/*
++	 * If the sprite is completely covering the primary plane,
++	 * we can disable the primary and save power.
++	 */
++	if ((crtc_x == 0) && (crtc_y == 0) &&
++	    (crtc_w == primary_w) && (crtc_h == primary_h))
++		disable_primary = true;
++
++	mutex_lock(&dev->struct_mutex);
++
++	ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
++	if (ret)
++		goto out_unlock;
++
++	intel_plane->obj = obj;
++
++	/*
++	 * Be sure to re-enable the primary before the sprite is no longer
++	 * covering it fully.
++	 */
++	if (!disable_primary && intel_plane->primary_disabled) {
++		intel_enable_primary(crtc);
++		intel_plane->primary_disabled = false;
++	}
++
++	intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
++				  crtc_w, crtc_h, x, y, src_w, src_h);
++
++	if (disable_primary) {
++		intel_disable_primary(crtc);
++		intel_plane->primary_disabled = true;
++	}
++
++	/* Unpin old obj after new one is active to avoid ugliness */
++	if (old_obj) {
++		/*
++		 * It's fairly common to simply update the position of
++		 * an existing object.  In that case, we don't need to
++		 * wait for vblank to avoid ugliness, we only need to
++		 * do the pin & ref bookkeeping.
++		 */
++		if (old_obj != obj) {
++			mutex_unlock(&dev->struct_mutex);
++			intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
++			mutex_lock(&dev->struct_mutex);
++		}
++		intel_unpin_fb_obj(old_obj);
++	}
++
++out_unlock:
++	mutex_unlock(&dev->struct_mutex);
++out:
++	return ret;
++}
++
++static int
++intel_disable_plane(struct drm_plane *plane)
++{
++	struct drm_device *dev = plane->dev;
++	struct intel_plane *intel_plane = to_intel_plane(plane);
++	int ret = 0;
++
++	if (intel_plane->primary_disabled) {
++		intel_enable_primary(plane->crtc);
++		intel_plane->primary_disabled = false;
++	}
++
++	intel_plane->disable_plane(plane);
++
++	if (!intel_plane->obj)
++		goto out;
++
++	mutex_lock(&dev->struct_mutex);
++	intel_unpin_fb_obj(intel_plane->obj);
++	intel_plane->obj = NULL;
++	mutex_unlock(&dev->struct_mutex);
++out:
++
++	return ret;
++}
++
++static void intel_destroy_plane(struct drm_plane *plane)
++{
++	struct intel_plane *intel_plane = to_intel_plane(plane);
++	intel_disable_plane(plane);
++	drm_plane_cleanup(plane);
++	kfree(intel_plane);
++}
++
++int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
++			      struct drm_file *file_priv)
++{
++	struct drm_intel_sprite_colorkey *set = data;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct drm_mode_object *obj;
++	struct drm_plane *plane;
++	struct intel_plane *intel_plane;
++	int ret = 0;
++
++	if (!dev_priv)
++		return -EINVAL;
++
++	/* Make sure we don't try to enable both src & dest simultaneously */
++	if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
++		return -EINVAL;
++
++	mutex_lock(&dev->mode_config.mutex);
++
++	obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
++	if (!obj) {
++		ret = -EINVAL;
++		goto out_unlock;
++	}
++
++	plane = obj_to_plane(obj);
++	intel_plane = to_intel_plane(plane);
++	ret = intel_plane->update_colorkey(plane, set);
++
++out_unlock:
++	mutex_unlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
++			      struct drm_file *file_priv)
++{
++	struct drm_intel_sprite_colorkey *get = data;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct drm_mode_object *obj;
++	struct drm_plane *plane;
++	struct intel_plane *intel_plane;
++	int ret = 0;
++
++	if (!dev_priv)
++		return -EINVAL;
++
++	mutex_lock(&dev->mode_config.mutex);
++
++	obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
++	if (!obj) {
++		ret = -EINVAL;
++		goto out_unlock;
++	}
++
++	plane = obj_to_plane(obj);
++	intel_plane = to_intel_plane(plane);
++	intel_plane->get_colorkey(plane, get);
++
++out_unlock:
++	mutex_unlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++static const struct drm_plane_funcs intel_plane_funcs = {
++	.update_plane = intel_update_plane,
++	.disable_plane = intel_disable_plane,
++	.destroy = intel_destroy_plane,
++};
++
++static uint32_t snb_plane_formats[] = {
++	DRM_FORMAT_XBGR8888,
++	DRM_FORMAT_XRGB8888,
++	DRM_FORMAT_YUYV,
++	DRM_FORMAT_YVYU,
++	DRM_FORMAT_UYVY,
++	DRM_FORMAT_VYUY,
++};
++
++int
++intel_plane_init(struct drm_device *dev, enum pipe pipe)
++{
++	struct intel_plane *intel_plane;
++	unsigned long possible_crtcs;
++	int ret;
++
++	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
++		return -ENODEV;
++
++	intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
++	if (!intel_plane)
++		return -ENOMEM;
++
++	if (IS_GEN6(dev)) {
++		intel_plane->max_downscale = 16;
++		intel_plane->update_plane = snb_update_plane;
++		intel_plane->disable_plane = snb_disable_plane;
++		intel_plane->update_colorkey = snb_update_colorkey;
++		intel_plane->get_colorkey = snb_get_colorkey;
++	} else if (IS_GEN7(dev)) {
++		intel_plane->max_downscale = 2;
++		intel_plane->update_plane = ivb_update_plane;
++		intel_plane->disable_plane = ivb_disable_plane;
++		intel_plane->update_colorkey = ivb_update_colorkey;
++		intel_plane->get_colorkey = ivb_get_colorkey;
++	}
++
++	intel_plane->pipe = pipe;
++	possible_crtcs = (1 << pipe);
++	ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
++			     &intel_plane_funcs, snb_plane_formats,
++			     ARRAY_SIZE(snb_plane_formats), false);
++	if (ret)
++		kfree(intel_plane);
++
++	return ret;
++}
++
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index 12041fa..c82b1d4 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -674,7 +674,7 @@ static const struct tv_mode tv_modes[] = {
+ 		.filter_table = filter_table,
+ 	},
+ 	{
+-		.name       = "480p at 59.94Hz",
++		.name       = "480p",
+ 		.clock		= 107520,
+ 		.refresh	= 59940,
+ 		.oversample     = TV_OVERSAMPLE_4X,
+@@ -698,30 +698,6 @@ static const struct tv_mode tv_modes[] = {
+ 		.filter_table = filter_table,
+ 	},
+ 	{
+-		.name       = "480p at 60Hz",
+-		.clock		= 107520,
+-		.refresh	= 60000,
+-		.oversample     = TV_OVERSAMPLE_4X,
+-		.component_only = 1,
+-
+-		.hsync_end      = 64,               .hblank_end         = 122,
+-		.hblank_start   = 842,              .htotal             = 856,
+-
+-		.progressive    = true,		    .trilevel_sync = false,
+-
+-		.vsync_start_f1 = 12,               .vsync_start_f2     = 12,
+-		.vsync_len      = 12,
+-
+-		.veq_ena        = false,
+-
+-		.vi_end_f1      = 44,               .vi_end_f2          = 44,
+-		.nbr_end        = 479,
+-
+-		.burst_ena      = false,
+-
+-		.filter_table = filter_table,
+-	},
+-	{
+ 		.name       = "576p",
+ 		.clock		= 107520,
+ 		.refresh	= 50000,
+@@ -731,7 +707,7 @@ static const struct tv_mode tv_modes[] = {
+ 		.hsync_end      = 64,               .hblank_end         = 139,
+ 		.hblank_start   = 859,              .htotal             = 863,
+ 
+-		.progressive    = true,		.trilevel_sync = false,
++		.progressive    = true,		    .trilevel_sync = false,
+ 
+ 		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+ 		.vsync_len      = 10,
+@@ -770,30 +746,6 @@ static const struct tv_mode tv_modes[] = {
+ 		.filter_table = filter_table,
+ 	},
+ 	{
+-		.name       = "720p at 59.94Hz",
+-		.clock		= 148800,
+-		.refresh	= 59940,
+-		.oversample     = TV_OVERSAMPLE_2X,
+-		.component_only = 1,
+-
+-		.hsync_end      = 80,               .hblank_end         = 300,
+-		.hblank_start   = 1580,             .htotal             = 1651,
+-
+-		.progressive	= true,		    .trilevel_sync = true,
+-
+-		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
+-		.vsync_len      = 10,
+-
+-		.veq_ena        = false,
+-
+-		.vi_end_f1      = 29,               .vi_end_f2          = 29,
+-		.nbr_end        = 719,
+-
+-		.burst_ena      = false,
+-
+-		.filter_table = filter_table,
+-	},
+-	{
+ 		.name       = "720p at 50Hz",
+ 		.clock		= 148800,
+ 		.refresh	= 50000,
+@@ -870,32 +822,6 @@ static const struct tv_mode tv_modes[] = {
+ 
+ 		.filter_table = filter_table,
+ 	},
+-	{
+-		.name       = "1080i at 59.94Hz",
+-		.clock		= 148800,
+-		.refresh	= 29970,
+-		.oversample     = TV_OVERSAMPLE_2X,
+-		.component_only = 1,
+-
+-		.hsync_end      = 88,               .hblank_end         = 235,
+-		.hblank_start   = 2155,             .htotal             = 2201,
+-
+-		.progressive	= false,	    .trilevel_sync = true,
+-
+-		.vsync_start_f1 = 4,            .vsync_start_f2    = 5,
+-		.vsync_len      = 10,
+-
+-		.veq_ena	= true,		    .veq_start_f1	= 4,
+-		.veq_start_f2	= 4,		.veq_len	  = 10,
+-
+-
+-		.vi_end_f1	= 21,		.vi_end_f2	  = 22,
+-		.nbr_end        = 539,
+-
+-		.burst_ena      = false,
+-
+-		.filter_table = filter_table,
+-	},
+ };
+ 
+ static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+@@ -1367,7 +1293,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
+ 	int type;
+ 
+ 	mode = reported_modes[0];
+-	drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
++	drm_mode_set_crtcinfo(&mode, 0);
+ 
+ 	if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
+ 		type = intel_tv_detect_type(intel_tv, connector);
+diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
+index 5ccb65de..507aa3d 100644
+--- a/drivers/gpu/drm/mga/mga_dma.c
++++ b/drivers/gpu/drm/mga/mga_dma.c
+@@ -403,6 +403,8 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
+ 	dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
+ 	dev_priv->chipset = flags;
+ 
++	pci_set_master(dev->pdev);
++
+ 	dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
+ 	dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
+ 
+diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
+index 33daa29..f9a925d 100644
+--- a/drivers/gpu/drm/mga/mga_drv.c
++++ b/drivers/gpu/drm/mga/mga_drv.c
+@@ -44,6 +44,20 @@ static struct pci_device_id pciidlist[] = {
+ 	mga_PCI_IDS
+ };
+ 
++static const struct file_operations mga_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = drm_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = mga_compat_ioctl,
++#endif
++	.llseek = noop_llseek,
++};
++
+ static struct drm_driver driver = {
+ 	.driver_features =
+ 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
+@@ -64,20 +78,7 @@ static struct drm_driver driver = {
+ 	.reclaim_buffers = drm_core_reclaim_buffers,
+ 	.ioctls = mga_ioctls,
+ 	.dma_ioctl = mga_dma_buffers,
+-	.fops = {
+-		.owner = THIS_MODULE,
+-		.open = drm_open,
+-		.release = drm_release,
+-		.unlocked_ioctl = drm_ioctl,
+-		.mmap = drm_mmap,
+-		.poll = drm_poll,
+-		.fasync = drm_fasync,
+-#ifdef CONFIG_COMPAT
+-		.compat_ioctl = mga_compat_ioctl,
+-#endif
+-		.llseek = noop_llseek,
+-	},
+-
++	.fops = &mga_driver_fops,
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ 	.date = DRIVER_DATE,
+diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
+index ca16399..97a8126 100644
+--- a/drivers/gpu/drm/nouveau/Kconfig
++++ b/drivers/gpu/drm/nouveau/Kconfig
+@@ -13,6 +13,7 @@ config DRM_NOUVEAU
+ 	select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
+ 	select ACPI_WMI if ACPI
+ 	select MXM_WMI if ACPI
++	select POWER_SUPPLY
+ 	help
+ 	  Choose this option for open-source nVidia support.
+ 
+diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
+index 35ef5b1..1a2ad7e 100644
+--- a/drivers/gpu/drm/nouveau/Makefile
++++ b/drivers/gpu/drm/nouveau/Makefile
+@@ -9,19 +9,23 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
+              nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
+              nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
+              nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
+-             nouveau_dp.o nouveau_ramht.o \
++             nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
+ 	     nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
+-	     nouveau_mm.o nouveau_vm.o \
++	     nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
+              nv04_timer.o \
+              nv04_mc.o nv40_mc.o nv50_mc.o \
+-             nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
++             nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
++             nv50_fb.o nvc0_fb.o \
+              nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
+              nv04_graph.o nv10_graph.o nv20_graph.o \
+              nv40_graph.o nv50_graph.o nvc0_graph.o \
+              nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
+-             nv84_crypt.o \
++             nv84_crypt.o nv98_crypt.o \
+              nva3_copy.o nvc0_copy.o \
+              nv31_mpeg.o nv50_mpeg.o \
++             nv84_bsp.o \
++             nv84_vp.o \
++             nv98_ppp.o \
+              nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
+              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
+              nv04_crtc.o nv04_display.o nv04_cursor.o \
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+index 3df56c7..284bd25 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -18,12 +18,6 @@
+ 
+ #include <linux/vga_switcheroo.h>
+ 
+-#define NOUVEAU_DSM_SUPPORTED 0x00
+-#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
+-
+-#define NOUVEAU_DSM_ACTIVE 0x01
+-#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
+-
+ #define NOUVEAU_DSM_LED 0x02
+ #define NOUVEAU_DSM_LED_STATE 0x00
+ #define NOUVEAU_DSM_LED_OFF 0x10
+@@ -35,6 +29,9 @@
+ #define NOUVEAU_DSM_POWER_SPEED 0x01
+ #define NOUVEAU_DSM_POWER_STAMINA 0x02
+ 
++#define NOUVEAU_DSM_OPTIMUS_FN 0x1A
++#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001
++
+ static struct nouveau_dsm_priv {
+ 	bool dsm_detected;
+ 	bool optimus_detected;
+@@ -61,7 +58,8 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
+ 	struct acpi_object_list input;
+ 	union acpi_object params[4];
+ 	union acpi_object *obj;
+-	int err;
++	int i, err;
++	char args_buff[4];
+ 
+ 	input.count = 4;
+ 	input.pointer = params;
+@@ -73,7 +71,11 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
+ 	params[2].type = ACPI_TYPE_INTEGER;
+ 	params[2].integer.value = func;
+ 	params[3].type = ACPI_TYPE_BUFFER;
+-	params[3].buffer.length = 0;
++	params[3].buffer.length = 4;
++	/* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */
++	for (i = 0; i < 4; i++)
++		args_buff[i] = (arg >> i * 8) & 0xFF;
++	params[3].buffer.pointer = args_buff;
+ 
+ 	err = acpi_evaluate_object(handle, "_DSM", &input, &output);
+ 	if (err) {
+@@ -148,6 +150,23 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
+ 	return 0;
+ }
+ 
++/* Returns 1 if a DSM function is usable and 0 otherwise */
++static int nouveau_test_dsm(acpi_handle test_handle,
++	int (*dsm_func)(acpi_handle, int, int, uint32_t *),
++	int sfnc)
++{
++	u32 result = 0;
++
++	/* Function 0 returns a Buffer containing available functions. The args
++	 * parameter is ignored for function 0, so just put 0 in it */
++	if (dsm_func(test_handle, 0, 0, &result))
++		return 0;
++
++	/* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If
++	 * the n-th bit is enabled, function n is supported */
++	return result & 1 && result & (1 << sfnc);
++}
++
+ static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
+ {
+ 	mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
+@@ -168,6 +187,10 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
+ 
+ static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
+ {
++	/* perhaps the _DSM functions are mutually exclusive, but prepare for
++	 * the future */
++	if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
++		return 0;
+ 	if (id == VGA_SWITCHEROO_IGD)
+ 		return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
+ 	else
+@@ -180,6 +203,11 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
+ 	if (id == VGA_SWITCHEROO_IGD)
+ 		return 0;
+ 
++	/* Optimus laptops have the card already disabled in
++	 * nouveau_switcheroo_set_state */
++	if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
++		return 0;
++
+ 	return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
+ }
+ 
+@@ -212,8 +240,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
+ {
+ 	acpi_handle dhandle, nvidia_handle;
+ 	acpi_status status;
+-	int ret, retval = 0;
+-	uint32_t result;
++	int retval = 0;
+ 
+ 	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ 	if (!dhandle)
+@@ -224,13 +251,11 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
+ 		return false;
+ 	}
+ 
+-	ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
+-			  NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
+-	if (ret == 0)
++	if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
+ 		retval |= NOUVEAU_DSM_HAS_MUX;
+ 
+-	ret = nouveau_optimus_dsm(dhandle, 0, 0, &result);
+-	if (ret == 0)
++	if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
++		NOUVEAU_DSM_OPTIMUS_FN))
+ 		retval |= NOUVEAU_DSM_HAS_OPT;
+ 
+ 	if (retval)
+@@ -269,15 +294,22 @@ static bool nouveau_dsm_detect(void)
+ 	}
+ 
+ 	if (vga_count == 2 && has_dsm && guid_valid) {
+-		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
++		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
++			&buffer);
+ 		printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+-		       acpi_method_name);
++			acpi_method_name);
+ 		nouveau_dsm_priv.dsm_detected = true;
+ 		ret = true;
+ 	}
+ 
+-	if (has_optimus == 1)
++	if (has_optimus == 1) {
++		acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
++			&buffer);
++		printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
++			acpi_method_name);
+ 		nouveau_dsm_priv.optimus_detected = true;
++		ret = true;
++	}
+ 
+ 	return ret;
+ }
+@@ -293,6 +325,17 @@ void nouveau_register_dsm_handler(void)
+ 	vga_switcheroo_register_handler(&nouveau_dsm_handler);
+ }
+ 
++/* Must be called for Optimus models before the card can be turned off */
++void nouveau_switcheroo_optimus_dsm(void)
++{
++	u32 result = 0;
++	if (!nouveau_dsm_priv.optimus_detected)
++		return;
++
++	nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN,
++		NOUVEAU_DSM_OPTIMUS_ARGS, &result);
++}
++
+ void nouveau_unregister_dsm_handler(void)
+ {
+ 	vga_switcheroo_unregister_handler();
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+index 5fc201b..0be4a81 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -27,6 +27,7 @@
+ #include "nouveau_drv.h"
+ #include "nouveau_hw.h"
+ #include "nouveau_encoder.h"
++#include "nouveau_gpio.h"
+ 
+ #include <linux/io-mapping.h>
+ 
+@@ -34,9 +35,6 @@
+ #define NV_CIO_CRE_44_HEADA 0x0
+ #define NV_CIO_CRE_44_HEADB 0x3
+ #define FEATURE_MOBILE 0x10	/* also FEATURE_QUADRO for BMP */
+-#define LEGACY_I2C_CRT 0x80
+-#define LEGACY_I2C_PANEL 0x81
+-#define LEGACY_I2C_TV 0x82
+ 
+ #define EDID1_LEN 128
+ 
+@@ -67,195 +65,233 @@ static bool nv_cksum(const uint8_t *data, unsigned int length)
+ }
+ 
+ static int
+-score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable)
++score_vbios(struct nvbios *bios, const bool writeable)
+ {
+-	if (!(data[0] == 0x55 && data[1] == 0xAA)) {
+-		NV_TRACEWARN(dev, "... BIOS signature not found\n");
++	if (!bios->data || bios->data[0] != 0x55 || bios->data[1] != 0xAA) {
++		NV_TRACEWARN(bios->dev, "... BIOS signature not found\n");
+ 		return 0;
+ 	}
+ 
+-	if (nv_cksum(data, data[2] * 512)) {
+-		NV_TRACEWARN(dev, "... BIOS checksum invalid\n");
++	if (nv_cksum(bios->data, bios->data[2] * 512)) {
++		NV_TRACEWARN(bios->dev, "... BIOS checksum invalid\n");
+ 		/* if a ro image is somewhat bad, it's probably all rubbish */
+ 		return writeable ? 2 : 1;
+-	} else
+-		NV_TRACE(dev, "... appears to be valid\n");
++	}
+ 
++	NV_TRACE(bios->dev, "... appears to be valid\n");
+ 	return 3;
+ }
+ 
+-static void load_vbios_prom(struct drm_device *dev, uint8_t *data)
++static void
++bios_shadow_prom(struct nvbios *bios)
+ {
++	struct drm_device *dev = bios->dev;
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	uint32_t pci_nv_20, save_pci_nv_20;
+-	int pcir_ptr;
++	u32 pcireg, access;
++	u16 pcir;
+ 	int i;
+ 
++	/* enable access to rom */
+ 	if (dev_priv->card_type >= NV_50)
+-		pci_nv_20 = 0x88050;
++		pcireg = 0x088050;
+ 	else
+-		pci_nv_20 = NV_PBUS_PCI_NV_20;
++		pcireg = NV_PBUS_PCI_NV_20;
++	access = nv_mask(dev, pcireg, 0x00000001, 0x00000000);
+ 
+-	/* enable ROM access */
+-	save_pci_nv_20 = nvReadMC(dev, pci_nv_20);
+-	nvWriteMC(dev, pci_nv_20,
+-		  save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
++	/* bail if no rom signature, with a workaround for a PROM reading
++	 * issue on some chipsets.  the first read after a period of
++	 * inactivity returns the wrong result, so retry the first header
++	 * byte a few times before giving up as a workaround
++	 */
++	i = 16;
++	do {
++		if (nv_rd08(dev, NV_PROM_OFFSET + 0) == 0x55)
++			break;
++	} while (i--);
+ 
+-	/* bail if no rom signature */
+-	if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 ||
+-	    nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
++	if (!i || nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
+ 		goto out;
+ 
+ 	/* additional check (see note below) - read PCI record header */
+-	pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
+-		   nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
+-	if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' ||
+-	    nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' ||
+-	    nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' ||
+-	    nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R')
++	pcir = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
++	       nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
++	if (nv_rd08(dev, NV_PROM_OFFSET + pcir + 0) != 'P' ||
++	    nv_rd08(dev, NV_PROM_OFFSET + pcir + 1) != 'C' ||
++	    nv_rd08(dev, NV_PROM_OFFSET + pcir + 2) != 'I' ||
++	    nv_rd08(dev, NV_PROM_OFFSET + pcir + 3) != 'R')
+ 		goto out;
+ 
+-	/* on some 6600GT/6800LE prom reads are messed up.  nvclock alleges a
+-	 * a good read may be obtained by waiting or re-reading (cargocult: 5x)
+-	 * each byte.  we'll hope pramin has something usable instead
+-	 */
+-	for (i = 0; i < NV_PROM_SIZE; i++)
+-		data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
++	/* read entire bios image to system memory */
++	bios->length = nv_rd08(dev, NV_PROM_OFFSET + 2) * 512;
++	bios->data = kmalloc(bios->length, GFP_KERNEL);
++	if (bios->data) {
++		for (i = 0; i < bios->length; i++)
++			bios->data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
++	}
+ 
+ out:
+-	/* disable ROM access */
+-	nvWriteMC(dev, pci_nv_20,
+-		  save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
++	/* disable access to rom */
++	nv_wr32(dev, pcireg, access);
+ }
+ 
+-static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
++static void
++bios_shadow_pramin(struct nvbios *bios)
+ {
++	struct drm_device *dev = bios->dev;
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	uint32_t old_bar0_pramin = 0;
++	u32 bar0 = 0;
+ 	int i;
+ 
+ 	if (dev_priv->card_type >= NV_50) {
+ 		u64 addr = (u64)(nv_rd32(dev, 0x619f04) & 0xffffff00) << 8;
+ 		if (!addr) {
+-			addr  = (u64)nv_rd32(dev, 0x1700) << 16;
++			addr  = (u64)nv_rd32(dev, 0x001700) << 16;
+ 			addr += 0xf0000;
+ 		}
+ 
+-		old_bar0_pramin = nv_rd32(dev, 0x1700);
+-		nv_wr32(dev, 0x1700, addr >> 16);
++		bar0 = nv_mask(dev, 0x001700, 0xffffffff, addr >> 16);
+ 	}
+ 
+ 	/* bail if no rom signature */
+-	if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 ||
++	if (nv_rd08(dev, NV_PRAMIN_OFFSET + 0) != 0x55 ||
+ 	    nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
+ 		goto out;
+ 
+-	for (i = 0; i < NV_PROM_SIZE; i++)
+-		data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
++	bios->length = nv_rd08(dev, NV_PRAMIN_OFFSET + 2) * 512;
++	bios->data = kmalloc(bios->length, GFP_KERNEL);
++	if (bios->data) {
++		for (i = 0; i < bios->length; i++)
++			bios->data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
++	}
+ 
+ out:
+ 	if (dev_priv->card_type >= NV_50)
+-		nv_wr32(dev, 0x1700, old_bar0_pramin);
++		nv_wr32(dev, 0x001700, bar0);
++}
++
++static void
++bios_shadow_pci(struct nvbios *bios)
++{
++	struct pci_dev *pdev = bios->dev->pdev;
++	size_t length;
++
++	if (!pci_enable_rom(pdev)) {
++		void __iomem *rom = pci_map_rom(pdev, &length);
++		if (rom && length) {
++			bios->data = kmalloc(length, GFP_KERNEL);
++			if (bios->data) {
++				memcpy_fromio(bios->data, rom, length);
++				bios->length = length;
++			}
++		}
++		if (rom)
++			pci_unmap_rom(pdev, rom);
++
++		pci_disable_rom(pdev);
++	}
+ }
+ 
+-static void load_vbios_pci(struct drm_device *dev, uint8_t *data)
++static void
++bios_shadow_acpi(struct nvbios *bios)
+ {
+-	void __iomem *rom = NULL;
+-	size_t rom_len;
+-	int ret;
++	struct pci_dev *pdev = bios->dev->pdev;
++	int ptr, len, ret;
++	u8 data[3];
+ 
+-	ret = pci_enable_rom(dev->pdev);
+-	if (ret)
++	if (!nouveau_acpi_rom_supported(pdev))
+ 		return;
+ 
+-	rom = pci_map_rom(dev->pdev, &rom_len);
+-	if (!rom)
+-		goto out;
+-	memcpy_fromio(data, rom, rom_len);
+-	pci_unmap_rom(dev->pdev, rom);
++	ret = nouveau_acpi_get_bios_chunk(data, 0, sizeof(data));
++	if (ret != sizeof(data))
++		return;
+ 
+-out:
+-	pci_disable_rom(dev->pdev);
+-}
++	bios->length = min(data[2] * 512, 65536);
++	bios->data = kmalloc(bios->length, GFP_KERNEL);
++	if (!bios->data)
++		return;
+ 
+-static void load_vbios_acpi(struct drm_device *dev, uint8_t *data)
+-{
+-	int i;
+-	int ret;
+-	int size = 64 * 1024;
++	len = bios->length;
++	ptr = 0;
++	while (len) {
++		int size = (len > ROM_BIOS_PAGE) ? ROM_BIOS_PAGE : len;
+ 
+-	if (!nouveau_acpi_rom_supported(dev->pdev))
+-		return;
++		ret = nouveau_acpi_get_bios_chunk(bios->data, ptr, size);
++		if (ret != size) {
++			kfree(bios->data);
++			bios->data = NULL;
++			return;
++		}
+ 
+-	for (i = 0; i < (size / ROM_BIOS_PAGE); i++) {
+-		ret = nouveau_acpi_get_bios_chunk(data,
+-						  (i * ROM_BIOS_PAGE),
+-						  ROM_BIOS_PAGE);
+-		if (ret <= 0)
+-			break;
++		len -= size;
++		ptr += size;
+ 	}
+-	return;
+ }
+ 
+ struct methods {
+ 	const char desc[8];
+-	void (*loadbios)(struct drm_device *, uint8_t *);
++	void (*shadow)(struct nvbios *);
+ 	const bool rw;
++	int score;
++	u32 size;
++	u8 *data;
+ };
+ 
+-static struct methods shadow_methods[] = {
+-	{ "PRAMIN", load_vbios_pramin, true },
+-	{ "PROM", load_vbios_prom, false },
+-	{ "PCIROM", load_vbios_pci, true },
+-	{ "ACPI", load_vbios_acpi, true },
+-};
+-#define NUM_SHADOW_METHODS ARRAY_SIZE(shadow_methods)
+-
+-static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
+-{
+-	struct methods *methods = shadow_methods;
+-	int testscore = 3;
+-	int scores[NUM_SHADOW_METHODS], i;
++static bool
++bios_shadow(struct drm_device *dev)
++{
++	struct methods shadow_methods[] = {
++		{ "PRAMIN", bios_shadow_pramin, true, 0, 0, NULL },
++		{ "PROM", bios_shadow_prom, false, 0, 0, NULL },
++		{ "ACPI", bios_shadow_acpi, true, 0, 0, NULL },
++		{ "PCIROM", bios_shadow_pci, true, 0, 0, NULL },
++		{}
++	};
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->vbios;
++	struct methods *mthd, *best;
+ 
+ 	if (nouveau_vbios) {
+-		for (i = 0; i < NUM_SHADOW_METHODS; i++)
+-			if (!strcasecmp(nouveau_vbios, methods[i].desc))
+-				break;
+-
+-		if (i < NUM_SHADOW_METHODS) {
+-			NV_INFO(dev, "Attempting to use BIOS image from %s\n",
+-				methods[i].desc);
++		mthd = shadow_methods;
++		do {
++			if (strcasecmp(nouveau_vbios, mthd->desc))
++				continue;
++			NV_INFO(dev, "VBIOS source: %s\n", mthd->desc);
+ 
+-			methods[i].loadbios(dev, data);
+-			if (score_vbios(dev, data, methods[i].rw))
++			mthd->shadow(bios);
++			mthd->score = score_vbios(bios, mthd->rw);
++			if (mthd->score)
+ 				return true;
+-		}
++		} while ((++mthd)->shadow);
+ 
+ 		NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
+ 	}
+ 
+-	for (i = 0; i < NUM_SHADOW_METHODS; i++) {
+-		NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
+-			 methods[i].desc);
+-		data[0] = data[1] = 0;	/* avoid reuse of previous image */
+-		methods[i].loadbios(dev, data);
+-		scores[i] = score_vbios(dev, data, methods[i].rw);
+-		if (scores[i] == testscore)
+-			return true;
+-	}
+-
+-	while (--testscore > 0) {
+-		for (i = 0; i < NUM_SHADOW_METHODS; i++) {
+-			if (scores[i] == testscore) {
+-				NV_TRACE(dev, "Using BIOS image from %s\n",
+-					 methods[i].desc);
+-				methods[i].loadbios(dev, data);
+-				return true;
+-			}
++	mthd = shadow_methods;
++	do {
++		NV_TRACE(dev, "Checking %s for VBIOS\n", mthd->desc);
++		mthd->shadow(bios);
++		mthd->score = score_vbios(bios, mthd->rw);
++		mthd->size = bios->length;
++		mthd->data = bios->data;
++	} while (mthd->score != 3 && (++mthd)->shadow);
++
++	mthd = shadow_methods;
++	best = mthd;
++	do {
++		if (mthd->score > best->score) {
++			kfree(best->data);
++			best = mthd;
+ 		}
++	} while ((++mthd)->shadow);
++
++	if (best->score) {
++		NV_TRACE(dev, "Using VBIOS from %s\n", best->desc);
++		bios->length = best->size;
++		bios->data = best->data;
++		return true;
+ 	}
+ 
+-	NV_ERROR(dev, "No valid BIOS image found\n");
++	NV_ERROR(dev, "No valid VBIOS image found\n");
+ 	return false;
+ }
+ 
+@@ -723,115 +759,19 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
+ 	return dcb_entry;
+ }
+ 
+-static int
+-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
+-{
+-	uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
+-	int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
+-	int recordoffset = 0, rdofs = 1, wrofs = 0;
+-	uint8_t port_type = 0;
+-
+-	if (!i2ctable)
+-		return -EINVAL;
+-
+-	if (dcb_version >= 0x30) {
+-		if (i2ctable[0] != dcb_version) /* necessary? */
+-			NV_WARN(dev,
+-				"DCB I2C table version mismatch (%02X vs %02X)\n",
+-				i2ctable[0], dcb_version);
+-		dcb_i2c_ver = i2ctable[0];
+-		headerlen = i2ctable[1];
+-		if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
+-			i2c_entries = i2ctable[2];
+-		else
+-			NV_WARN(dev,
+-				"DCB I2C table has more entries than indexable "
+-				"(%d entries, max %d)\n", i2ctable[2],
+-				DCB_MAX_NUM_I2C_ENTRIES);
+-		entry_len = i2ctable[3];
+-		/* [4] is i2c_default_indices, read in parse_dcb_table() */
+-	}
+-	/*
+-	 * It's your own fault if you call this function on a DCB 1.1 BIOS --
+-	 * the test below is for DCB 1.2
+-	 */
+-	if (dcb_version < 0x14) {
+-		recordoffset = 2;
+-		rdofs = 0;
+-		wrofs = 1;
+-	}
+-
+-	if (index == 0xf)
+-		return 0;
+-	if (index >= i2c_entries) {
+-		NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
+-			 index, i2ctable[2]);
+-		return -ENOENT;
+-	}
+-	if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
+-		NV_ERROR(dev, "DCB I2C entry invalid\n");
+-		return -EINVAL;
+-	}
+-
+-	if (dcb_i2c_ver >= 0x30) {
+-		port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
+-
+-		/*
+-		 * Fixup for chips using same address offset for read and
+-		 * write.
+-		 */
+-		if (port_type == 4)	/* seen on C51 */
+-			rdofs = wrofs = 1;
+-		if (port_type >= 5)	/* G80+ */
+-			rdofs = wrofs = 0;
+-	}
+-
+-	if (dcb_i2c_ver >= 0x40) {
+-		if (port_type != 5 && port_type != 6)
+-			NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
+-
+-		i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
+-	}
+-
+-	i2c->port_type = port_type;
+-	i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
+-	i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
+-
+-	return 0;
+-}
+-
+ static struct nouveau_i2c_chan *
+ init_i2c_device_find(struct drm_device *dev, int i2c_index)
+ {
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct dcb_table *dcb = &dev_priv->vbios.dcb;
+-
+ 	if (i2c_index == 0xff) {
++		struct drm_nouveau_private *dev_priv = dev->dev_private;
++		struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ 		/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
+-		int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
+-		int default_indices = dcb->i2c_default_indices;
++		int idx = dcb_entry_idx_from_crtchead(dev);
+ 
++		i2c_index = NV_I2C_DEFAULT(0);
+ 		if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
+-			shift = 4;
+-
+-		i2c_index = (default_indices >> shift) & 0xf;
++			i2c_index = NV_I2C_DEFAULT(1);
+ 	}
+-	if (i2c_index == 0x80)	/* g80+ */
+-		i2c_index = dcb->i2c_default_indices & 0xf;
+-	else
+-	if (i2c_index == 0x81)
+-		i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
+-
+-	if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
+-		NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
+-		return NULL;
+-	}
+-
+-	/* Make sure i2c table entry has been parsed, it may not
+-	 * have been if this is a bus not referenced by a DCB encoder
+-	 */
+-	read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+-			   i2c_index, &dcb->i2c[i2c_index]);
+ 
+ 	return nouveau_i2c_find(dev, i2c_index);
+ }
+@@ -1199,17 +1139,14 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+ 
+ 	switch (cond) {
+ 	case 0:
+-	{
+-		struct dcb_connector_table_entry *ent =
+-			&bios->dcb.connector.entry[dcb->connector];
+-
+-		if (ent->type != DCB_CONNECTOR_eDP)
++		entry = dcb_conn(dev, dcb->connector);
++		if (!entry || entry[0] != DCB_CONNECTOR_eDP)
+ 			iexec->execute = false;
+-	}
+ 		break;
+ 	case 1:
+ 	case 2:
+-		if (!(entry[5] & cond))
++		if ((table[0]  < 0x40 && !(entry[5] & cond)) ||
++		    (table[0] == 0x40 && !(entry[4] & cond)))
+ 			iexec->execute = false;
+ 		break;
+ 	case 5:
+@@ -3227,49 +3164,6 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+ 	return 1;
+ }
+ 
+-static void
+-init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio)
+-{
+-	const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
+-	u32 r, s, v;
+-
+-	/* Not a clue, needs de-magicing */
+-	r = nv50_gpio_ctl[gpio->line >> 4];
+-	s = (gpio->line & 0x0f);
+-	v = bios_rd32(bios, r) & ~(0x00010001 << s);
+-	switch ((gpio->entry & 0x06000000) >> 25) {
+-	case 1:
+-		v |= (0x00000001 << s);
+-		break;
+-	case 2:
+-		v |= (0x00010000 << s);
+-		break;
+-	default:
+-		break;
+-	}
+-
+-	bios_wr32(bios, r, v);
+-}
+-
+-static void
+-init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio)
+-{
+-	u32 v, i;
+-
+-	v  = bios_rd32(bios, 0x00d610 + (gpio->line * 4));
+-	v &= 0xffffff00;
+-	v |= (gpio->entry & 0x00ff0000) >> 16;
+-	bios_wr32(bios, 0x00d610 + (gpio->line * 4), v);
+-
+-	i = (gpio->entry & 0x1f000000) >> 24;
+-	if (i) {
+-		v  = bios_rd32(bios, 0x00d640 + ((i - 1) * 4));
+-		v &= 0xffffff00;
+-		v |= gpio->line;
+-		bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v);
+-	}
+-}
+-
+ static int
+ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+ {
+@@ -3282,35 +3176,8 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+ 	 * each GPIO according to various values listed in each entry
+ 	 */
+ 
+-	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+-	int i;
+-
+-	if (dev_priv->card_type < NV_50) {
+-		NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
+-		return 1;
+-	}
+-
+-	if (!iexec->execute)
+-		return 1;
+-
+-	for (i = 0; i < bios->dcb.gpio.entries; i++) {
+-		struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
+-
+-		BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
+-
+-		BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
+-			offset, gpio->tag, gpio->state_default);
+-
+-		if (!bios->execute)
+-			continue;
+-
+-		pgpio->set(bios->dev, gpio->tag, gpio->state_default);
+-		if (dev_priv->card_type < NV_D0)
+-			init_gpio_unknv50(bios, gpio);
+-		else
+-			init_gpio_unknvd0(bios, gpio);
+-	}
++	if (iexec->execute && bios->execute)
++		nouveau_gpio_reset(bios->dev);
+ 
+ 	return 1;
+ }
+@@ -4407,18 +4274,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
+ 		break;
+ 	}
+ 
+-	/* Dell Latitude D620 reports a too-high value for the dual-link
+-	 * transition freq, causing us to program the panel incorrectly.
+-	 *
+-	 * It doesn't appear the VBIOS actually uses its transition freq
+-	 * (90000kHz), instead it uses the "Number of LVDS channels" field
+-	 * out of the panel ID structure (http://www.spwg.org/).
+-	 *
+-	 * For the moment, a quirk will do :)
+-	 */
+-	if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
+-		bios->fp.duallink_transition_clk = 80000;
+-
+ 	/* set dual_link flag for EDID case */
+ 	if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
+ 		bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
+@@ -4541,7 +4396,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
+ 	NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
+ 			dcbent->type, dcbent->location, dcbent->or);
+ 	for (i = 0; i < table[3]; i++) {
+-		otable = ROMPTR(bios, table[table[1] + (i * table[2])]);
++		otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
+ 		if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
+ 			break;
+ 	}
+@@ -4719,7 +4574,7 @@ static struct pll_mapping nv84_pll_mapping[] = {
+ 	{ PLL_CORE  , 0x004028 },
+ 	{ PLL_SHADER, 0x004020 },
+ 	{ PLL_MEMORY, 0x004008 },
+-	{ PLL_UNK05 , 0x004030 },
++	{ PLL_VDEC  , 0x004030 },
+ 	{ PLL_UNK41 , 0x00e818 },
+ 	{ PLL_VPLL0 , 0x614100 },
+ 	{ PLL_VPLL1 , 0x614900 },
+@@ -5485,6 +5340,9 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
+ 	struct nvbios *bios = &dev_priv->vbios;
+ 	u8 entries, *entry;
+ 
++	if (bios->type != NVBIOS_BIT)
++		return -ENODEV;
++
+ 	entries = bios->data[bios->offset + 10];
+ 	entry   = &bios->data[bios->offset + 12];
+ 	while (entries--) {
+@@ -5493,7 +5351,7 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
+ 			bit->version = entry[1];
+ 			bit->length = ROM16(entry[2]);
+ 			bit->offset = ROM16(entry[4]);
+-			bit->data = ROMPTR(bios, entry[4]);
++			bit->data = ROMPTR(dev, entry[4]);
+ 			return 0;
+ 		}
+ 
+@@ -5598,10 +5456,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
+ 	uint16_t legacy_scripts_offset, legacy_i2c_offset;
+ 
+ 	/* load needed defaults in case we can't parse this info */
+-	bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
+-	bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
+-	bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
+-	bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
+ 	bios->digital_min_front_porch = 0x4b;
+ 	bios->fmaxvco = 256000;
+ 	bios->fminvco = 128000;
+@@ -5709,14 +5563,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
+ 	bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
+ 	bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
+ 	bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
+-	if (bios->data[legacy_i2c_offset + 4])
+-		bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
+-	if (bios->data[legacy_i2c_offset + 5])
+-		bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
+-	if (bios->data[legacy_i2c_offset + 6])
+-		bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
+-	if (bios->data[legacy_i2c_offset + 7])
+-		bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
+ 
+ 	if (bmplength > 74) {
+ 		bios->fmaxvco = ROM32(bmp[67]);
+@@ -5767,286 +5613,128 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
+ 	return 0;
+ }
+ 
+-static struct dcb_gpio_entry *
+-new_gpio_entry(struct nvbios *bios)
+-{
+-	struct drm_device *dev = bios->dev;
+-	struct dcb_gpio_table *gpio = &bios->dcb.gpio;
+-
+-	if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
+-		NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
+-		return NULL;
+-	}
+-
+-	return &gpio->entry[gpio->entries++];
+-}
+-
+-struct dcb_gpio_entry *
+-nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
++void *
++dcb_table(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nvbios *bios = &dev_priv->vbios;
+-	int i;
+-
+-	for (i = 0; i < bios->dcb.gpio.entries; i++) {
+-		if (bios->dcb.gpio.entry[i].tag != tag)
+-			continue;
++	u8 *dcb = NULL;
+ 
+-		return &bios->dcb.gpio.entry[i];
++	if (dev_priv->card_type > NV_04)
++		dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]);
++	if (!dcb) {
++		NV_WARNONCE(dev, "No DCB data found in VBIOS\n");
++		return NULL;
+ 	}
+ 
+-	return NULL;
+-}
+-
+-static void
+-parse_dcb_gpio_table(struct nvbios *bios)
+-{
+-	struct drm_device *dev = bios->dev;
+-	struct dcb_gpio_entry *e;
+-	u8 headerlen, entries, recordlen;
+-	u8 *dcb, *gpio = NULL, *entry;
+-	int i;
+-
+-	dcb = ROMPTR(bios, bios->data[0x36]);
++	if (dcb[0] >= 0x41) {
++		NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]);
++		return NULL;
++	} else
+ 	if (dcb[0] >= 0x30) {
+-		gpio = ROMPTR(bios, dcb[10]);
+-		if (!gpio)
+-			goto no_table;
+-
+-		headerlen = gpio[1];
+-		entries   = gpio[2];
+-		recordlen = gpio[3];
++		if (ROM32(dcb[6]) == 0x4edcbdcb)
++			return dcb;
+ 	} else
+-	if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
+-		gpio = ROMPTR(bios, dcb[-15]);
+-		if (!gpio)
+-			goto no_table;
+-
+-		headerlen = 3;
+-		entries   = gpio[2];
+-		recordlen = gpio[1];
++	if (dcb[0] >= 0x20) {
++		if (ROM32(dcb[4]) == 0x4edcbdcb)
++			return dcb;
+ 	} else
+-	if (dcb[0] >= 0x22) {
+-		/* No GPIO table present, parse the TVDAC GPIO data. */
+-		uint8_t *tvdac_gpio = &dcb[-5];
+-
+-		if (tvdac_gpio[0] & 1) {
+-			e = new_gpio_entry(bios);
+-			e->tag = DCB_GPIO_TVDAC0;
+-			e->line = tvdac_gpio[1] >> 4;
+-			e->invert = tvdac_gpio[0] & 2;
+-		}
+-
+-		goto no_table;
++	if (dcb[0] >= 0x15) {
++		if (!memcmp(&dcb[-7], "DEV_REC", 7))
++			return dcb;
+ 	} else {
+-		NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
+-		goto no_table;
+-	}
+-
+-	entry = gpio + headerlen;
+-	for (i = 0; i < entries; i++, entry += recordlen) {
+-		e = new_gpio_entry(bios);
+-		if (!e)
+-			break;
+-
+-		if (gpio[0] < 0x40) {
+-			e->entry = ROM16(entry[0]);
+-			e->tag = (e->entry & 0x07e0) >> 5;
+-			if (e->tag == 0x3f) {
+-				bios->dcb.gpio.entries--;
+-				continue;
+-			}
+-
+-			e->line = (e->entry & 0x001f);
+-			e->invert = ((e->entry & 0xf800) >> 11) != 4;
+-		} else {
+-			e->entry = ROM32(entry[0]);
+-			e->tag = (e->entry & 0x0000ff00) >> 8;
+-			if (e->tag == 0xff) {
+-				bios->dcb.gpio.entries--;
+-				continue;
+-			}
+-
+-			e->line = (e->entry & 0x0000001f) >> 0;
+-			if (gpio[0] == 0x40) {
+-				e->state_default = (e->entry & 0x01000000) >> 24;
+-				e->state[0] = (e->entry & 0x18000000) >> 27;
+-				e->state[1] = (e->entry & 0x60000000) >> 29;
+-			} else {
+-				e->state_default = (e->entry & 0x00000080) >> 7;
+-				e->state[0] = (entry[4] >> 4) & 3;
+-				e->state[1] = (entry[4] >> 6) & 3;
+-			}
+-		}
+-	}
+-
+-no_table:
+-	/* Apple iMac G4 NV18 */
+-	if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+-		e = new_gpio_entry(bios);
+-		if (e) {
+-			e->tag = DCB_GPIO_TVDAC0;
+-			e->line = 4;
+-		}
+-	}
+-}
+-
+-struct dcb_connector_table_entry *
+-nouveau_bios_connector_entry(struct drm_device *dev, int index)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nvbios *bios = &dev_priv->vbios;
+-	struct dcb_connector_table_entry *cte;
+-
+-	if (index >= bios->dcb.connector.entries)
+-		return NULL;
+-
+-	cte = &bios->dcb.connector.entry[index];
+-	if (cte->type == 0xff)
++		/*
++		 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
++		 * always has the same single (crt) entry, even when tv-out
++		 * present, so the conclusion is this version cannot really
++		 * be used.
++		 *
++		 * v1.2 tables (some NV6/10, and NV15+) normally have the
++		 * same 5 entries, which are not specific to the card and so
++		 * no use.
++		 *
++		 * v1.2 does have an I2C table that read_dcb_i2c_table can
++		 * handle, but cards exist (nv11 in #14821) with a bad i2c
++		 * table pointer, so use the indices parsed in
++		 * parse_bmp_structure.
++		 *
++		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
++		 */
++		NV_WARNONCE(dev, "No useful DCB data in VBIOS\n");
+ 		return NULL;
++	}
+ 
+-	return cte;
++	NV_WARNONCE(dev, "DCB header validation failed\n");
++	return NULL;
+ }
+ 
+-static enum dcb_connector_type
+-divine_connector_type(struct nvbios *bios, int index)
++void *
++dcb_outp(struct drm_device *dev, u8 idx)
+ {
+-	struct dcb_table *dcb = &bios->dcb;
+-	unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
+-	int i;
+-
+-	for (i = 0; i < dcb->entries; i++) {
+-		if (dcb->entry[i].connector == index)
+-			encoders |= (1 << dcb->entry[i].type);
+-	}
+-
+-	if (encoders & (1 << OUTPUT_DP)) {
+-		if (encoders & (1 << OUTPUT_TMDS))
+-			type = DCB_CONNECTOR_DP;
+-		else
+-			type = DCB_CONNECTOR_eDP;
++	u8 *dcb = dcb_table(dev);
++	if (dcb && dcb[0] >= 0x30) {
++		if (idx < dcb[2])
++			return dcb + dcb[1] + (idx * dcb[3]);
+ 	} else
+-	if (encoders & (1 << OUTPUT_TMDS)) {
+-		if (encoders & (1 << OUTPUT_ANALOG))
+-			type = DCB_CONNECTOR_DVI_I;
+-		else
+-			type = DCB_CONNECTOR_DVI_D;
+-	} else
+-	if (encoders & (1 << OUTPUT_ANALOG)) {
+-		type = DCB_CONNECTOR_VGA;
+-	} else
+-	if (encoders & (1 << OUTPUT_LVDS)) {
+-		type = DCB_CONNECTOR_LVDS;
++	if (dcb && dcb[0] >= 0x20) {
++		u8 *i2c = ROMPTR(dev, dcb[2]);
++		u8 *ent = dcb + 8 + (idx * 8);
++		if (i2c && ent < i2c)
++			return ent;
+ 	} else
+-	if (encoders & (1 << OUTPUT_TV)) {
+-		type = DCB_CONNECTOR_TV_0;
++	if (dcb && dcb[0] >= 0x15) {
++		u8 *i2c = ROMPTR(dev, dcb[2]);
++		u8 *ent = dcb + 4 + (idx * 10);
++		if (i2c && ent < i2c)
++			return ent;
+ 	}
+ 
+-	return type;
++	return NULL;
+ }
+ 
+-static void
+-apply_dcb_connector_quirks(struct nvbios *bios, int idx)
+-{
+-	struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
+-	struct drm_device *dev = bios->dev;
++int
++dcb_outp_foreach(struct drm_device *dev, void *data,
++		 int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
++{
++	int ret, idx = -1;
++	u8 *outp = NULL;
++	while ((outp = dcb_outp(dev, ++idx))) {
++		if (ROM32(outp[0]) == 0x00000000)
++			break; /* seen on an NV11 with DCB v1.5 */
++		if (ROM32(outp[0]) == 0xffffffff)
++			break; /* seen on an NV17 with DCB v2.0 */
++
++		if ((outp[0] & 0x0f) == OUTPUT_UNUSED)
++			continue;
++		if ((outp[0] & 0x0f) == OUTPUT_EOL)
++			break;
+ 
+-	/* Gigabyte NX85T */
+-	if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
+-		if (cte->type == DCB_CONNECTOR_HDMI_1)
+-			cte->type = DCB_CONNECTOR_DVI_I;
++		ret = exec(dev, data, idx, outp);
++		if (ret)
++			return ret;
+ 	}
+ 
+-	/* Gigabyte GV-NX86T512H */
+-	if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
+-		if (cte->type == DCB_CONNECTOR_HDMI_1)
+-			cte->type = DCB_CONNECTOR_DVI_I;
+-	}
++	return 0;
+ }
+ 
+-static const u8 hpd_gpio[16] = {
+-	0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+-	0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+-};
+-
+-static void
+-parse_dcb_connector_table(struct nvbios *bios)
++u8 *
++dcb_conntab(struct drm_device *dev)
+ {
+-	struct drm_device *dev = bios->dev;
+-	struct dcb_connector_table *ct = &bios->dcb.connector;
+-	struct dcb_connector_table_entry *cte;
+-	uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
+-	uint8_t *entry;
+-	int i;
+-
+-	if (!bios->dcb.connector_table_ptr) {
+-		NV_DEBUG_KMS(dev, "No DCB connector table present\n");
+-		return;
+-	}
+-
+-	NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
+-		conntab[0], conntab[1], conntab[2], conntab[3]);
+-	if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
+-	    (conntab[3] != 2 && conntab[3] != 4)) {
+-		NV_ERROR(dev, "  Unknown!  Please report.\n");
+-		return;
++	u8 *dcb = dcb_table(dev);
++	if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
++		u8 *conntab = ROMPTR(dev, dcb[0x14]);
++		if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
++			return conntab;
+ 	}
++	return NULL;
++}
+ 
+-	ct->entries = conntab[2];
+-
+-	entry = conntab + conntab[1];
+-	cte = &ct->entry[0];
+-	for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
+-		cte->index = i;
+-		if (conntab[3] == 2)
+-			cte->entry = ROM16(entry[0]);
+-		else
+-			cte->entry = ROM32(entry[0]);
+-
+-		cte->type  = (cte->entry & 0x000000ff) >> 0;
+-		cte->index2 = (cte->entry & 0x00000f00) >> 8;
+-
+-		cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
+-		cte->gpio_tag = hpd_gpio[cte->gpio_tag];
+-
+-		if (cte->type == 0xff)
+-			continue;
+-
+-		apply_dcb_connector_quirks(bios, i);
+-
+-		NV_INFO(dev, "  %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
+-			i, cte->entry, cte->type, cte->index, cte->gpio_tag);
+-
+-		/* check for known types, fallback to guessing the type
+-		 * from attached encoders if we hit an unknown.
+-		 */
+-		switch (cte->type) {
+-		case DCB_CONNECTOR_VGA:
+-		case DCB_CONNECTOR_TV_0:
+-		case DCB_CONNECTOR_TV_1:
+-		case DCB_CONNECTOR_TV_3:
+-		case DCB_CONNECTOR_DVI_I:
+-		case DCB_CONNECTOR_DVI_D:
+-		case DCB_CONNECTOR_LVDS:
+-		case DCB_CONNECTOR_LVDS_SPWG:
+-		case DCB_CONNECTOR_DP:
+-		case DCB_CONNECTOR_eDP:
+-		case DCB_CONNECTOR_HDMI_0:
+-		case DCB_CONNECTOR_HDMI_1:
+-			break;
+-		default:
+-			cte->type = divine_connector_type(bios, cte->index);
+-			NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
+-			break;
+-		}
+-
+-		if (nouveau_override_conntype) {
+-			int type = divine_connector_type(bios, cte->index);
+-			if (type != cte->type)
+-				NV_WARN(dev, " -> type 0x%02x\n", cte->type);
+-		}
+-
+-	}
++u8 *
++dcb_conn(struct drm_device *dev, u8 idx)
++{
++	u8 *conntab = dcb_conntab(dev);
++	if (conntab && idx < conntab[2])
++		return conntab + conntab[1] + (idx * conntab[3]);
++	return NULL;
+ }
+ 
+ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
+@@ -6079,8 +5767,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
+ 	entry->type = conn & 0xf;
+ 	entry->i2c_index = (conn >> 4) & 0xf;
+ 	entry->heads = (conn >> 8) & 0xf;
+-	if (dcb->version >= 0x40)
+-		entry->connector = (conn >> 12) & 0xf;
++	entry->connector = (conn >> 12) & 0xf;
+ 	entry->bus = (conn >> 16) & 0xf;
+ 	entry->location = (conn >> 20) & 0x3;
+ 	entry->or = (conn >> 24) & 0xf;
+@@ -6252,25 +5939,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
+ 	return true;
+ }
+ 
+-static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
+-			    uint32_t conn, uint32_t conf)
+-{
+-	struct dcb_entry *entry = new_dcb_entry(dcb);
+-	bool ret;
+-
+-	if (dcb->version >= 0x20)
+-		ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
+-	else
+-		ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
+-	if (!ret)
+-		return ret;
+-
+-	read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+-			   entry->i2c_index, &dcb->i2c[entry->i2c_index]);
+-
+-	return true;
+-}
+-
+ static
+ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
+ {
+@@ -6431,154 +6099,122 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+ #endif
+ 
+ 	/* Make up some sane defaults */
+-	fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
++	fabricate_dcb_output(dcb, OUTPUT_ANALOG,
++			     bios->legacy.i2c_indices.crt, 1, 1);
+ 
+ 	if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+-		fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
++		fabricate_dcb_output(dcb, OUTPUT_TV,
++				     bios->legacy.i2c_indices.tv,
+ 				     all_heads, 0);
+ 
+ 	else if (bios->tmds.output0_script_ptr ||
+ 		 bios->tmds.output1_script_ptr)
+-		fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
++		fabricate_dcb_output(dcb, OUTPUT_TMDS,
++				     bios->legacy.i2c_indices.panel,
+ 				     all_heads, 1);
+ }
+ 
+ static int
+-parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
++parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct dcb_table *dcb = &bios->dcb;
+-	uint16_t dcbptr = 0, i2ctabptr = 0;
+-	uint8_t *dcbtable;
+-	uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
+-	bool configblock = true;
+-	int recordlength = 8, confofs = 4;
+-	int i;
+-
+-	/* get the offset from 0x36 */
+-	if (dev_priv->card_type > NV_04) {
+-		dcbptr = ROM16(bios->data[0x36]);
+-		if (dcbptr == 0x0000)
+-			NV_WARN(dev, "No output data (DCB) found in BIOS\n");
+-	}
+-
+-	/* this situation likely means a really old card, pre DCB */
+-	if (dcbptr == 0x0) {
+-		fabricate_dcb_encoder_table(dev, bios);
+-		return 0;
+-	}
+-
+-	dcbtable = &bios->data[dcbptr];
+-
+-	/* get DCB version */
+-	dcb->version = dcbtable[0];
+-	NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
+-		 dcb->version >> 4, dcb->version & 0xf);
+-
+-	if (dcb->version >= 0x20) { /* NV17+ */
+-		uint32_t sig;
++	struct dcb_table *dcb = &dev_priv->vbios.dcb;
++	u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
++	u32 conn = ROM32(outp[0]);
++	bool ret;
+ 
+-		if (dcb->version >= 0x30) { /* NV40+ */
+-			headerlen = dcbtable[1];
+-			entries = dcbtable[2];
+-			recordlength = dcbtable[3];
+-			i2ctabptr = ROM16(dcbtable[4]);
+-			sig = ROM32(dcbtable[6]);
+-			dcb->gpio_table_ptr = ROM16(dcbtable[10]);
+-			dcb->connector_table_ptr = ROM16(dcbtable[20]);
+-		} else {
+-			i2ctabptr = ROM16(dcbtable[2]);
+-			sig = ROM32(dcbtable[4]);
+-			headerlen = 8;
+-		}
++	if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
++		struct dcb_entry *entry = new_dcb_entry(dcb);
+ 
+-		if (sig != 0x4edcbdcb) {
+-			NV_ERROR(dev, "Bad Display Configuration Block "
+-					"signature (%08X)\n", sig);
+-			return -EINVAL;
+-		}
+-	} else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
+-		char sig[8] = { 0 };
++		NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
+ 
+-		strncpy(sig, (char *)&dcbtable[-7], 7);
+-		i2ctabptr = ROM16(dcbtable[2]);
+-		recordlength = 10;
+-		confofs = 6;
++		if (dcb->version >= 0x20)
++			ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
++		else
++			ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
++		if (!ret)
++			return 1; /* stop parsing */
+ 
+-		if (strcmp(sig, "DEV_REC")) {
+-			NV_ERROR(dev, "Bad Display Configuration Block "
+-					"signature (%s)\n", sig);
+-			return -EINVAL;
+-		}
+-	} else {
+-		/*
+-		 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
+-		 * has the same single (crt) entry, even when tv-out present, so
+-		 * the conclusion is this version cannot really be used.
+-		 * v1.2 tables (some NV6/10, and NV15+) normally have the same
+-		 * 5 entries, which are not specific to the card and so no use.
+-		 * v1.2 does have an I2C table that read_dcb_i2c_table can
+-		 * handle, but cards exist (nv11 in #14821) with a bad i2c table
+-		 * pointer, so use the indices parsed in parse_bmp_structure.
+-		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
++		/* Ignore the I2C index for on-chip TV-out, as there
++		 * are cards with bogus values (nv31m in bug 23212),
++		 * and it's otherwise useless.
+ 		 */
+-		NV_TRACEWARN(dev, "No useful information in BIOS output table; "
+-				  "adding all possible outputs\n");
+-		fabricate_dcb_encoder_table(dev, bios);
+-		return 0;
++		if (entry->type == OUTPUT_TV &&
++		    entry->location == DCB_LOC_ON_CHIP)
++			entry->i2c_index = 0x0f;
+ 	}
+ 
+-	if (!i2ctabptr)
+-		NV_WARN(dev, "No pointer to DCB I2C port table\n");
+-	else {
+-		dcb->i2c_table = &bios->data[i2ctabptr];
+-		if (dcb->version >= 0x30)
+-			dcb->i2c_default_indices = dcb->i2c_table[4];
++	return 0;
++}
+ 
+-		/*
+-		 * Parse the "management" I2C bus, used for hardware
+-		 * monitoring and some external TMDS transmitters.
+-		 */
+-		if (dcb->version >= 0x22) {
+-			int idx = (dcb->version >= 0x40 ?
+-				   dcb->i2c_default_indices & 0xf :
+-				   2);
++static void
++dcb_fake_connectors(struct nvbios *bios)
++{
++	struct dcb_table *dcbt = &bios->dcb;
++	u8 map[16] = { };
++	int i, idx = 0;
+ 
+-			read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+-					   idx, &dcb->i2c[idx]);
++	/* heuristic: if we ever get a non-zero connector field, assume
++	 * that all the indices are valid and we don't need fake them.
++	 *
++	 * and, as usual, a blacklist of boards with bad bios data..
++	 */
++	if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) {
++		for (i = 0; i < dcbt->entries; i++) {
++			if (dcbt->entry[i].connector)
++				return;
+ 		}
+ 	}
+ 
+-	if (entries > DCB_MAX_NUM_ENTRIES)
+-		entries = DCB_MAX_NUM_ENTRIES;
+-
+-	for (i = 0; i < entries; i++) {
+-		uint32_t connection, config = 0;
+-
+-		connection = ROM32(dcbtable[headerlen + recordlength * i]);
+-		if (configblock)
+-			config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
+-
+-		/* seen on an NV11 with DCB v1.5 */
+-		if (connection == 0x00000000)
+-			break;
++	/* no useful connector info available, we need to make it up
++	 * ourselves.  the rule here is: anything on the same i2c bus
++	 * is considered to be on the same connector.  any output
++	 * without an associated i2c bus is assigned its own unique
++	 * connector index.
++	 */
++	for (i = 0; i < dcbt->entries; i++) {
++		u8 i2c = dcbt->entry[i].i2c_index;
++		if (i2c == 0x0f) {
++			dcbt->entry[i].connector = idx++;
++		} else {
++			if (!map[i2c])
++				map[i2c] = ++idx;
++			dcbt->entry[i].connector = map[i2c] - 1;
++		}
++	}
+ 
+-		/* seen on an NV17 with DCB v2.0 */
+-		if (connection == 0xffffffff)
+-			break;
++	/* if we created more than one connector, destroy the connector
++	 * table - just in case it has random, rather than stub, entries.
++	 */
++	if (i > 1) {
++		u8 *conntab = dcb_conntab(bios->dev);
++		if (conntab)
++			conntab[0] = 0x00;
++	}
++}
+ 
+-		if ((connection & 0x0000000f) == 0x0000000f)
+-			continue;
++static int
++parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
++{
++	struct dcb_table *dcb = &bios->dcb;
++	u8 *dcbt, *conn;
++	int idx;
++
++	dcbt = dcb_table(dev);
++	if (!dcbt) {
++		/* handle pre-DCB boards */
++		if (bios->type == NVBIOS_BMP) {
++			fabricate_dcb_encoder_table(dev, bios);
++			return 0;
++		}
+ 
+-		if (!apply_dcb_encoder_quirks(dev, i, &connection, &config))
+-			continue;
++		return -EINVAL;
++	}
+ 
+-		NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
+-			     dcb->entries, connection, config);
++	NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
+ 
+-		if (!parse_dcb_entry(dev, dcb, connection, config))
+-			break;
+-	}
++	dcb->version = dcbt[0];
++	dcb_outp_foreach(dev, NULL, parse_dcb_entry);
+ 
+ 	/*
+ 	 * apart for v2.1+ not being known for requiring merging, this
+@@ -6590,77 +6226,19 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+ 	if (!dcb->entries)
+ 		return -ENXIO;
+ 
+-	parse_dcb_gpio_table(bios);
+-	parse_dcb_connector_table(bios);
+-	return 0;
+-}
+-
+-static void
+-fixup_legacy_connector(struct nvbios *bios)
+-{
+-	struct dcb_table *dcb = &bios->dcb;
+-	int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
+-
+-	/*
+-	 * DCB 3.0 also has the table in most cases, but there are some cards
+-	 * where the table is filled with stub entries, and the DCB entriy
+-	 * indices are all 0.  We don't need the connector indices on pre-G80
+-	 * chips (yet?) so limit the use to DCB 4.0 and above.
+-	 */
+-	if (dcb->version >= 0x40)
+-		return;
+-
+-	dcb->connector.entries = 0;
+-
+-	/*
+-	 * No known connector info before v3.0, so make it up.  the rule here
+-	 * is: anything on the same i2c bus is considered to be on the same
+-	 * connector.  any output without an associated i2c bus is assigned
+-	 * its own unique connector index.
+-	 */
+-	for (i = 0; i < dcb->entries; i++) {
+-		/*
+-		 * Ignore the I2C index for on-chip TV-out, as there
+-		 * are cards with bogus values (nv31m in bug 23212),
+-		 * and it's otherwise useless.
+-		 */
+-		if (dcb->entry[i].type == OUTPUT_TV &&
+-		    dcb->entry[i].location == DCB_LOC_ON_CHIP)
+-			dcb->entry[i].i2c_index = 0xf;
+-		i2c = dcb->entry[i].i2c_index;
+-
+-		if (i2c_conn[i2c]) {
+-			dcb->entry[i].connector = i2c_conn[i2c] - 1;
+-			continue;
++	/* dump connector table entries to log, if any exist */
++	idx = -1;
++	while ((conn = dcb_conn(dev, ++idx))) {
++		if (conn[0] != 0xff) {
++			NV_TRACE(dev, "DCB conn %02d: ", idx);
++			if (dcb_conntab(dev)[3] < 4)
++				printk("%04x\n", ROM16(conn[0]));
++			else
++				printk("%08x\n", ROM32(conn[0]));
+ 		}
+-
+-		dcb->entry[i].connector = dcb->connector.entries++;
+-		if (i2c != 0xf)
+-			i2c_conn[i2c] = dcb->connector.entries;
+-	}
+-
+-	/* Fake the connector table as well as just connector indices */
+-	for (i = 0; i < dcb->connector.entries; i++) {
+-		dcb->connector.entry[i].index = i;
+-		dcb->connector.entry[i].type = divine_connector_type(bios, i);
+-		dcb->connector.entry[i].gpio_tag = 0xff;
+-	}
+-}
+-
+-static void
+-fixup_legacy_i2c(struct nvbios *bios)
+-{
+-	struct dcb_table *dcb = &bios->dcb;
+-	int i;
+-
+-	for (i = 0; i < dcb->entries; i++) {
+-		if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
+-			dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
+-		if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
+-			dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
+-		if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
+-			dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
+ 	}
++	dcb_fake_connectors(bios);
++	return 0;
+ }
+ 
+ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
+@@ -6799,11 +6377,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
+ 	spin_lock_init(&bios->lock);
+ 	bios->dev = dev;
+ 
+-	if (!NVShadowVBIOS(dev, bios->data))
+-		return false;
+-
+-	bios->length = NV_PROM_SIZE;
+-	return true;
++	return bios_shadow(dev);
+ }
+ 
+ static int nouveau_parse_vbios_struct(struct drm_device *dev)
+@@ -6879,19 +6453,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
+ 	return ret;
+ }
+ 
+-static void
+-nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nvbios *bios = &dev_priv->vbios;
+-	struct dcb_i2c_entry *entry;
+-	int i;
+-
+-	entry = &bios->dcb.i2c[0];
+-	for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
+-		nouveau_i2c_fini(dev, entry);
+-}
+-
+ static bool
+ nouveau_bios_posted(struct drm_device *dev)
+ {
+@@ -6928,12 +6489,17 @@ nouveau_bios_init(struct drm_device *dev)
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = parse_dcb_table(dev, bios);
++	ret = nouveau_i2c_init(dev);
+ 	if (ret)
+ 		return ret;
+ 
+-	fixup_legacy_i2c(bios);
+-	fixup_legacy_connector(bios);
++	ret = nouveau_mxm_init(dev);
++	if (ret)
++		return ret;
++
++	ret = parse_dcb_table(dev, bios);
++	if (ret)
++		return ret;
+ 
+ 	if (!bios->major_version)	/* we don't run version 0 bios */
+ 		return 0;
+@@ -6971,5 +6537,10 @@ nouveau_bios_init(struct drm_device *dev)
+ void
+ nouveau_bios_takedown(struct drm_device *dev)
+ {
+-	nouveau_bios_i2c_devices_takedown(dev);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	nouveau_mxm_fini(dev);
++	nouveau_i2c_fini(dev);
++
++	kfree(dev_priv->vbios.data);
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
+index 8adb69e..298a3af 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
+@@ -34,9 +34,14 @@
+ 
+ #define DCB_LOC_ON_CHIP 0
+ 
+-#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
+-#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
+-#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
++#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
++#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
++#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
++#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
++#define ROMPTR(d,x) ({            \
++	struct drm_nouveau_private *dev_priv = (d)->dev_private; \
++	ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \
++})
+ 
+ struct bit_entry {
+ 	uint8_t  id;
+@@ -48,30 +53,13 @@ struct bit_entry {
+ 
+ int bit_table(struct drm_device *, u8 id, struct bit_entry *);
+ 
+-struct dcb_i2c_entry {
+-	uint32_t entry;
+-	uint8_t port_type;
+-	uint8_t read, write;
+-	struct nouveau_i2c_chan *chan;
+-};
+-
+ enum dcb_gpio_tag {
+-	DCB_GPIO_TVDAC0 = 0xc,
++	DCB_GPIO_PANEL_POWER = 0x01,
++	DCB_GPIO_TVDAC0 = 0x0c,
+ 	DCB_GPIO_TVDAC1 = 0x2d,
+-};
+-
+-struct dcb_gpio_entry {
+-	enum dcb_gpio_tag tag;
+-	int line;
+-	bool invert;
+-	uint32_t entry;
+-	uint8_t state_default;
+-	uint8_t state[2];
+-};
+-
+-struct dcb_gpio_table {
+-	int entries;
+-	struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
++	DCB_GPIO_PWM_FAN = 0x09,
++	DCB_GPIO_FAN_SENSE = 0x3d,
++	DCB_GPIO_UNUSED = 0xff
+ };
+ 
+ enum dcb_connector_type {
+@@ -81,29 +69,19 @@ enum dcb_connector_type {
+ 	DCB_CONNECTOR_TV_3 = 0x13,
+ 	DCB_CONNECTOR_DVI_I = 0x30,
+ 	DCB_CONNECTOR_DVI_D = 0x31,
++	DCB_CONNECTOR_DMS59_0 = 0x38,
++	DCB_CONNECTOR_DMS59_1 = 0x39,
+ 	DCB_CONNECTOR_LVDS = 0x40,
+ 	DCB_CONNECTOR_LVDS_SPWG = 0x41,
+ 	DCB_CONNECTOR_DP = 0x46,
+ 	DCB_CONNECTOR_eDP = 0x47,
+ 	DCB_CONNECTOR_HDMI_0 = 0x60,
+ 	DCB_CONNECTOR_HDMI_1 = 0x61,
++	DCB_CONNECTOR_DMS59_DP0 = 0x64,
++	DCB_CONNECTOR_DMS59_DP1 = 0x65,
+ 	DCB_CONNECTOR_NONE = 0xff
+ };
+ 
+-struct dcb_connector_table_entry {
+-	uint8_t index;
+-	uint32_t entry;
+-	enum dcb_connector_type type;
+-	uint8_t index2;
+-	uint8_t gpio_tag;
+-	void *drm;
+-};
+-
+-struct dcb_connector_table {
+-	int entries;
+-	struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
+-};
+-
+ enum dcb_type {
+ 	OUTPUT_ANALOG = 0,
+ 	OUTPUT_TV = 1,
+@@ -111,6 +89,7 @@ enum dcb_type {
+ 	OUTPUT_LVDS = 3,
+ 	OUTPUT_DP = 6,
+ 	OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
++	OUTPUT_UNUSED = 15,
+ 	OUTPUT_ANY = -1
+ };
+ 
+@@ -155,18 +134,8 @@ struct dcb_entry {
+ 
+ struct dcb_table {
+ 	uint8_t version;
+-
+ 	int entries;
+ 	struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
+-
+-	uint8_t *i2c_table;
+-	uint8_t i2c_default_indices;
+-	struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
+-
+-	uint16_t gpio_table_ptr;
+-	struct dcb_gpio_table gpio;
+-	uint16_t connector_table_ptr;
+-	struct dcb_connector_table connector;
+ };
+ 
+ enum nouveau_or {
+@@ -195,7 +164,7 @@ enum pll_types {
+ 	PLL_SHADER = 0x02,
+ 	PLL_UNK03  = 0x03,
+ 	PLL_MEMORY = 0x04,
+-	PLL_UNK05  = 0x05,
++	PLL_VDEC   = 0x05,
+ 	PLL_UNK40  = 0x40,
+ 	PLL_UNK41  = 0x41,
+ 	PLL_UNK42  = 0x42,
+@@ -244,6 +213,8 @@ struct nvbios {
+ 		NVBIOS_BIT
+ 	} type;
+ 	uint16_t offset;
++	uint32_t length;
++	uint8_t *data;
+ 
+ 	uint8_t chip_version;
+ 
+@@ -254,8 +225,6 @@ struct nvbios {
+ 
+ 	spinlock_t lock;
+ 
+-	uint8_t data[NV_PROM_SIZE];
+-	unsigned int length;
+ 	bool execute;
+ 
+ 	uint8_t major_version;
+@@ -333,4 +302,11 @@ struct nvbios {
+ 	} legacy;
+ };
+ 
++void *dcb_table(struct drm_device *);
++void *dcb_outp(struct drm_device *, u8 idx);
++int dcb_outp_foreach(struct drm_device *, void *data,
++		     int (*)(struct drm_device *, void *, int idx, u8 *outp));
++u8 *dcb_conntab(struct drm_device *);
++u8 *dcb_conn(struct drm_device *, u8 idx);
++
+ #endif
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+index 2bb29c9..12ce044 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -28,6 +28,7 @@
+  */
+ 
+ #include "drmP.h"
++#include "ttm/ttm_page_alloc.h"
+ 
+ #include "nouveau_drm.h"
+ #include "nouveau_drv.h"
+@@ -92,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_bo *nvbo;
++	size_t acc_size;
+ 	int ret;
+ 
+ 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
+@@ -114,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
+ 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
+ 	nouveau_bo_placement_set(nvbo, flags, 0);
+ 
++	acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
++				       sizeof(struct nouveau_bo));
++
+ 	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
+ 			  ttm_bo_type_device, &nvbo->placement,
+-			  align >> PAGE_SHIFT, 0, false, NULL, size,
++			  align >> PAGE_SHIFT, 0, false, NULL, acc_size,
+ 			  nouveau_bo_del_ttm);
+ 	if (ret) {
+ 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
+@@ -343,8 +348,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
+ 		*mem = val;
+ }
+ 
+-static struct ttm_backend *
+-nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
++static struct ttm_tt *
++nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
++		      unsigned long size, uint32_t page_flags,
++		      struct page *dummy_read_page)
+ {
+ 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ 	struct drm_device *dev = dev_priv->dev;
+@@ -352,11 +359,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+ 	switch (dev_priv->gart_info.type) {
+ #if __OS_HAS_AGP
+ 	case NOUVEAU_GART_AGP:
+-		return ttm_agp_backend_init(bdev, dev->agp->bridge);
++		return ttm_agp_tt_create(bdev, dev->agp->bridge,
++					 size, page_flags, dummy_read_page);
+ #endif
+ 	case NOUVEAU_GART_PDMA:
+ 	case NOUVEAU_GART_HW:
+-		return nouveau_sgdma_init_ttm(dev);
++		return nouveau_sgdma_create_ttm(bdev, size, page_flags,
++						dummy_read_page);
+ 	default:
+ 		NV_ERROR(dev, "Unknown GART type %d\n",
+ 			 dev_priv->gart_info.type);
+@@ -673,8 +682,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
+ 	if (mem->mem_type == TTM_PL_VRAM)
+ 		nouveau_vm_map(vma, node);
+ 	else
+-		nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
+-				  node, node->pages);
++		nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
+ 
+ 	return 0;
+ }
+@@ -685,16 +693,12 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+ 		     struct ttm_mem_reg *new_mem)
+ {
+ 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
++	struct nouveau_channel *chan = chan = dev_priv->channel;
+ 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+ 	struct ttm_mem_reg *old_mem = &bo->mem;
+-	struct nouveau_channel *chan;
+ 	int ret;
+ 
+-	chan = nvbo->channel;
+-	if (!chan) {
+-		chan = dev_priv->channel;
+-		mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
+-	}
++	mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
+ 
+ 	/* create temporary vmas for the transfer and attach them to the
+ 	 * old nouveau_mem node, these will get cleaned up after ttm has
+@@ -726,8 +730,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+ 	}
+ 
+ out:
+-	if (chan == dev_priv->channel)
+-		mutex_unlock(&chan->mutex);
++	mutex_unlock(&chan->mutex);
+ 	return ret;
+ }
+ 
+@@ -801,19 +804,22 @@ out:
+ static void
+ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
+ {
+-	struct nouveau_mem *node = new_mem->mm_node;
+ 	struct nouveau_bo *nvbo = nouveau_bo(bo);
+ 	struct nouveau_vma *vma;
+ 
++	/* ttm can now (stupidly) pass the driver bos it didn't create... */
++	if (bo->destroy != nouveau_bo_del_ttm)
++		return;
++
+ 	list_for_each_entry(vma, &nvbo->vma_list, head) {
+-		if (new_mem->mem_type == TTM_PL_VRAM) {
++		if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
+ 			nouveau_vm_map(vma, new_mem->mm_node);
+ 		} else
+-		if (new_mem->mem_type == TTM_PL_TT &&
++		if (new_mem && new_mem->mem_type == TTM_PL_TT &&
+ 		    nvbo->page_shift == vma->vm->spg_shift) {
+ 			nouveau_vm_map_sg(vma, 0, new_mem->
+ 					  num_pages << PAGE_SHIFT,
+-					  node, node->pages);
++					  new_mem->mm_node);
+ 		} else {
+ 			nouveau_vm_unmap(vma);
+ 		}
+@@ -940,7 +946,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+ 		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ 			mem->bus.offset = mem->start << PAGE_SHIFT;
+ 			mem->bus.base = dev_priv->gart_info.aper_base;
+-			mem->bus.is_iomem = !dev->agp->cant_use_aperture;
++			mem->bus.is_iomem = true;
+ 		}
+ #endif
+ 		break;
+@@ -1044,8 +1050,94 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+ 	nouveau_fence_unref(&old_fence);
+ }
+ 
++static int
++nouveau_ttm_tt_populate(struct ttm_tt *ttm)
++{
++	struct ttm_dma_tt *ttm_dma = (void *)ttm;
++	struct drm_nouveau_private *dev_priv;
++	struct drm_device *dev;
++	unsigned i;
++	int r;
++
++	if (ttm->state != tt_unpopulated)
++		return 0;
++
++	dev_priv = nouveau_bdev(ttm->bdev);
++	dev = dev_priv->dev;
++
++#if __OS_HAS_AGP
++	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
++		return ttm_agp_tt_populate(ttm);
++	}
++#endif
++
++#ifdef CONFIG_SWIOTLB
++	if (swiotlb_nr_tbl()) {
++		return ttm_dma_populate((void *)ttm, dev->dev);
++	}
++#endif
++
++	r = ttm_pool_populate(ttm);
++	if (r) {
++		return r;
++	}
++
++	for (i = 0; i < ttm->num_pages; i++) {
++		ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
++						   0, PAGE_SIZE,
++						   PCI_DMA_BIDIRECTIONAL);
++		if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
++			while (--i) {
++				pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
++					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++				ttm_dma->dma_address[i] = 0;
++			}
++			ttm_pool_unpopulate(ttm);
++			return -EFAULT;
++		}
++	}
++	return 0;
++}
++
++static void
++nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
++{
++	struct ttm_dma_tt *ttm_dma = (void *)ttm;
++	struct drm_nouveau_private *dev_priv;
++	struct drm_device *dev;
++	unsigned i;
++
++	dev_priv = nouveau_bdev(ttm->bdev);
++	dev = dev_priv->dev;
++
++#if __OS_HAS_AGP
++	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
++		ttm_agp_tt_unpopulate(ttm);
++		return;
++	}
++#endif
++
++#ifdef CONFIG_SWIOTLB
++	if (swiotlb_nr_tbl()) {
++		ttm_dma_unpopulate((void *)ttm, dev->dev);
++		return;
++	}
++#endif
++
++	for (i = 0; i < ttm->num_pages; i++) {
++		if (ttm_dma->dma_address[i]) {
++			pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
++				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++		}
++	}
++
++	ttm_pool_unpopulate(ttm);
++}
++
+ struct ttm_bo_driver nouveau_bo_driver = {
+-	.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
++	.ttm_tt_create = &nouveau_ttm_tt_create,
++	.ttm_tt_populate = &nouveau_ttm_tt_populate,
++	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
+ 	.invalidate_caches = nouveau_bo_invalidate_caches,
+ 	.init_mem_type = nouveau_bo_init_mem_type,
+ 	.evict_flags = nouveau_bo_evict_flags,
+@@ -1091,7 +1183,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
+ 		nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
+ 	else
+ 	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+-		nouveau_vm_map_sg(vma, 0, size, node, node->pages);
++		nouveau_vm_map_sg(vma, 0, size, node);
+ 
+ 	list_add_tail(&vma->head, &nvbo->vma_list);
+ 	vma->refcount = 1;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
+index bb6ec9e..846afb0 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
++++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
+@@ -122,7 +122,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
+ 	struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ 	struct nouveau_channel *chan;
+ 	unsigned long flags;
+-	int ret;
++	int ret, i;
+ 
+ 	/* allocate and lock channel structure */
+ 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+@@ -184,9 +184,11 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
+ 		return ret;
+ 	}
+ 
+-	nouveau_dma_pre_init(chan);
++	nouveau_dma_init(chan);
+ 	chan->user_put = 0x40;
+ 	chan->user_get = 0x44;
++	if (dev_priv->card_type >= NV_50)
++                chan->user_get_hi = 0x60;
+ 
+ 	/* disable the fifo caches */
+ 	pfifo->reassign(dev, false);
+@@ -200,9 +202,18 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
+ 
+ 	pfifo->reassign(dev, true);
+ 
+-	ret = nouveau_dma_init(chan);
+-	if (!ret)
+-		ret = nouveau_fence_channel_init(chan);
++	/* Insert NOPs for NOUVEAU_DMA_SKIPS */
++	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
++	if (ret) {
++		nouveau_channel_put(&chan);
++		return ret;
++	}
++
++	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
++		OUT_RING  (chan, 0x00000000);
++	FIRE_RING(chan);
++
++	ret = nouveau_fence_channel_init(chan);
+ 	if (ret) {
+ 		nouveau_channel_put(&chan);
+ 		return ret;
+@@ -425,18 +436,11 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
+ 	}
+ 
+ 	if (dev_priv->card_type < NV_C0) {
+-		init->subchan[0].handle = NvM2MF;
+-		if (dev_priv->card_type < NV_50)
+-			init->subchan[0].grclass = 0x0039;
+-		else
+-			init->subchan[0].grclass = 0x5039;
++		init->subchan[0].handle = 0x00000000;
++		init->subchan[0].grclass = 0x0000;
+ 		init->subchan[1].handle = NvSw;
+ 		init->subchan[1].grclass = NV_SW;
+ 		init->nr_subchan = 2;
+-	} else {
+-		init->subchan[0].handle  = 0x9039;
+-		init->subchan[0].grclass = 0x9039;
+-		init->nr_subchan = 1;
+ 	}
+ 
+ 	/* Named memory object area */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 1e72db5..7b11edb 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -35,6 +35,7 @@
+ #include "nouveau_encoder.h"
+ #include "nouveau_crtc.h"
+ #include "nouveau_connector.h"
++#include "nouveau_gpio.h"
+ #include "nouveau_hw.h"
+ 
+ static void nouveau_connector_hotplug(void *, int);
+@@ -78,29 +79,11 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
+ 	return NULL;
+ }
+ 
+-/*TODO: This could use improvement, and learn to handle the fixed
+- *      BIOS tables etc.  It's fine currently, for its only user.
+- */
+-int
+-nouveau_connector_bpp(struct drm_connector *connector)
+-{
+-	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+-
+-	if (nv_connector->edid && nv_connector->edid->revision >= 4) {
+-		u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
+-		if (bpc > 4)
+-			return bpc;
+-	}
+-
+-	return 18;
+-}
+-
+ static void
+ nouveau_connector_destroy(struct drm_connector *connector)
+ {
+ 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ 	struct drm_nouveau_private *dev_priv;
+-	struct nouveau_gpio_engine *pgpio;
+ 	struct drm_device *dev;
+ 
+ 	if (!nv_connector)
+@@ -110,10 +93,9 @@ nouveau_connector_destroy(struct drm_connector *connector)
+ 	dev_priv = dev->dev_private;
+ 	NV_DEBUG_KMS(dev, "\n");
+ 
+-	pgpio = &dev_priv->engine.gpio;
+-	if (pgpio->irq_unregister) {
+-		pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
+-				      nouveau_connector_hotplug, connector);
++	if (nv_connector->hpd != DCB_GPIO_UNUSED) {
++		nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff,
++				     nouveau_connector_hotplug, connector);
+ 	}
+ 
+ 	kfree(nv_connector->edid);
+@@ -198,6 +180,10 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
+ 		return;
+ 	nv_connector->detected_encoder = nv_encoder;
+ 
++	if (dev_priv->card_type >= NV_50) {
++		connector->interlace_allowed = true;
++		connector->doublescan_allowed = true;
++	} else
+ 	if (nv_encoder->dcb->type == OUTPUT_LVDS ||
+ 	    nv_encoder->dcb->type == OUTPUT_TMDS) {
+ 		connector->doublescan_allowed = false;
+@@ -214,7 +200,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
+ 			connector->interlace_allowed = true;
+ 	}
+ 
+-	if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
++	if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
+ 		drm_connector_property_set_value(connector,
+ 			dev->mode_config.dvi_i_subconnector_property,
+ 			nv_encoder->dcb->type == OUTPUT_TMDS ?
+@@ -397,7 +383,7 @@ nouveau_connector_force(struct drm_connector *connector)
+ 	struct nouveau_encoder *nv_encoder;
+ 	int type;
+ 
+-	if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
++	if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
+ 		if (connector->force == DRM_FORCE_ON_DIGITAL)
+ 			type = OUTPUT_TMDS;
+ 		else
+@@ -420,15 +406,21 @@ static int
+ nouveau_connector_set_property(struct drm_connector *connector,
+ 			       struct drm_property *property, uint64_t value)
+ {
++	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
++	struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ 	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ 	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ 	struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
+ 	struct drm_device *dev = connector->dev;
++	struct nouveau_crtc *nv_crtc;
+ 	int ret;
+ 
++	nv_crtc = NULL;
++	if (connector->encoder && connector->encoder->crtc)
++		nv_crtc = nouveau_crtc(connector->encoder->crtc);
++
+ 	/* Scaling mode */
+ 	if (property == dev->mode_config.scaling_mode_property) {
+-		struct nouveau_crtc *nv_crtc = NULL;
+ 		bool modeset = false;
+ 
+ 		switch (value) {
+@@ -454,8 +446,6 @@ nouveau_connector_set_property(struct drm_connector *connector,
+ 			modeset = true;
+ 		nv_connector->scaling_mode = value;
+ 
+-		if (connector->encoder && connector->encoder->crtc)
+-			nv_crtc = nouveau_crtc(connector->encoder->crtc);
+ 		if (!nv_crtc)
+ 			return 0;
+ 
+@@ -467,7 +457,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
+ 			if (!ret)
+ 				return -EINVAL;
+ 		} else {
+-			ret = nv_crtc->set_scale(nv_crtc, value, true);
++			ret = nv_crtc->set_scale(nv_crtc, true);
+ 			if (ret)
+ 				return ret;
+ 		}
+@@ -475,23 +465,71 @@ nouveau_connector_set_property(struct drm_connector *connector,
+ 		return 0;
+ 	}
+ 
+-	/* Dithering */
+-	if (property == dev->mode_config.dithering_mode_property) {
+-		struct nouveau_crtc *nv_crtc = NULL;
++	/* Underscan */
++	if (property == disp->underscan_property) {
++		if (nv_connector->underscan != value) {
++			nv_connector->underscan = value;
++			if (!nv_crtc || !nv_crtc->set_scale)
++				return 0;
+ 
+-		if (value == DRM_MODE_DITHERING_ON)
+-			nv_connector->use_dithering = true;
+-		else
+-			nv_connector->use_dithering = false;
++			return nv_crtc->set_scale(nv_crtc, true);
++		}
++
++		return 0;
++	}
++
++	if (property == disp->underscan_hborder_property) {
++		if (nv_connector->underscan_hborder != value) {
++			nv_connector->underscan_hborder = value;
++			if (!nv_crtc || !nv_crtc->set_scale)
++				return 0;
++
++			return nv_crtc->set_scale(nv_crtc, true);
++		}
++
++		return 0;
++	}
++
++	if (property == disp->underscan_vborder_property) {
++		if (nv_connector->underscan_vborder != value) {
++			nv_connector->underscan_vborder = value;
++			if (!nv_crtc || !nv_crtc->set_scale)
++				return 0;
++
++			return nv_crtc->set_scale(nv_crtc, true);
++		}
++
++		return 0;
++	}
+ 
+-		if (connector->encoder && connector->encoder->crtc)
+-			nv_crtc = nouveau_crtc(connector->encoder->crtc);
++	/* Dithering */
++	if (property == disp->dithering_mode) {
++		nv_connector->dithering_mode = value;
++		if (!nv_crtc || !nv_crtc->set_dither)
++			return 0;
++
++		return nv_crtc->set_dither(nv_crtc, true);
++	}
+ 
++	if (property == disp->dithering_depth) {
++		nv_connector->dithering_depth = value;
+ 		if (!nv_crtc || !nv_crtc->set_dither)
+ 			return 0;
+ 
+-		return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
+-					   true);
++		return nv_crtc->set_dither(nv_crtc, true);
++	}
++
++	if (nv_crtc && nv_crtc->set_color_vibrance) {
++		/* Hue */
++		if (property == disp->vibrant_hue_property) {
++			nv_crtc->vibrant_hue = value - 90;
++			return nv_crtc->set_color_vibrance(nv_crtc, true);
++		}
++		/* Saturation */
++		if (property == disp->color_vibrance_property) {
++			nv_crtc->color_vibrance = value - 100;
++			return nv_crtc->set_color_vibrance(nv_crtc, true);
++		}
+ 	}
+ 
+ 	if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
+@@ -617,7 +655,7 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
+ 		return;
+ 
+ 	/* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */
+-	if (nv_connector->dcb->type == DCB_CONNECTOR_eDP) {
++	if (nv_connector->type == DCB_CONNECTOR_eDP) {
+ 		connector->display_info.bpc = 6;
+ 		return;
+ 	}
+@@ -641,7 +679,7 @@ nouveau_connector_detect_depth(struct drm_connector *connector)
+ 	 * know which if_is_24bit flag to check...
+ 	 */
+ 	if (nv_connector->edid &&
+-	    nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG)
++	    nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
+ 		duallink = ((u8 *)nv_connector->edid)[121] == 2;
+ 	else
+ 		duallink = mode->clock >= bios->fp.duallink_transition_clk;
+@@ -680,6 +718,12 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ 		nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
+ 	}
+ 
++	/* Determine display colour depth for everything except LVDS now,
++	 * DP requires this before mode_valid() is called.
++	 */
++	if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
++		nouveau_connector_detect_depth(connector);
++
+ 	/* Find the native mode if this is a digital panel, if we didn't
+ 	 * find any modes through DDC previously add the native mode to
+ 	 * the list of modes.
+@@ -695,18 +739,19 @@ nouveau_connector_get_modes(struct drm_connector *connector)
+ 		ret = 1;
+ 	}
+ 
+-	/* Attempt to determine display colour depth, this has to happen after
+-	 * we've determined the "native" mode for LVDS, as the VBIOS tables
+-	 * require us to compare against a pixel clock in some cases..
++	/* Determine LVDS colour depth, must happen after determining
++	 * "native" mode as some VBIOS tables require us to use the
++	 * pixel clock as part of the lookup...
+ 	 */
+-	nouveau_connector_detect_depth(connector);
++	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
++		nouveau_connector_detect_depth(connector);
+ 
+ 	if (nv_encoder->dcb->type == OUTPUT_TV)
+ 		ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
+ 
+-	if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
+-	    nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG ||
+-	    nv_connector->dcb->type == DCB_CONNECTOR_eDP)
++	if (nv_connector->type == DCB_CONNECTOR_LVDS ||
++	    nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
++	    nv_connector->type == DCB_CONNECTOR_eDP)
+ 		ret += nouveau_connector_scaler_modes_add(connector);
+ 
+ 	return ret;
+@@ -765,7 +810,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
+ 	case OUTPUT_DP:
+ 		max_clock  = nv_encoder->dp.link_nr;
+ 		max_clock *= nv_encoder->dp.link_bw;
+-		clock = clock * nouveau_connector_bpp(connector) / 10;
++		clock = clock * (connector->display_info.bpc * 3) / 10;
+ 		break;
+ 	default:
+ 		BUG_ON(1);
+@@ -823,96 +868,188 @@ nouveau_connector_funcs_lvds = {
+ 	.force = nouveau_connector_force
+ };
+ 
++static int
++drm_conntype_from_dcb(enum dcb_connector_type dcb)
++{
++	switch (dcb) {
++	case DCB_CONNECTOR_VGA      : return DRM_MODE_CONNECTOR_VGA;
++	case DCB_CONNECTOR_TV_0     :
++	case DCB_CONNECTOR_TV_1     :
++	case DCB_CONNECTOR_TV_3     : return DRM_MODE_CONNECTOR_TV;
++	case DCB_CONNECTOR_DMS59_0  :
++	case DCB_CONNECTOR_DMS59_1  :
++	case DCB_CONNECTOR_DVI_I    : return DRM_MODE_CONNECTOR_DVII;
++	case DCB_CONNECTOR_DVI_D    : return DRM_MODE_CONNECTOR_DVID;
++	case DCB_CONNECTOR_LVDS     :
++	case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
++	case DCB_CONNECTOR_DMS59_DP0:
++	case DCB_CONNECTOR_DMS59_DP1:
++	case DCB_CONNECTOR_DP       : return DRM_MODE_CONNECTOR_DisplayPort;
++	case DCB_CONNECTOR_eDP      : return DRM_MODE_CONNECTOR_eDP;
++	case DCB_CONNECTOR_HDMI_0   :
++	case DCB_CONNECTOR_HDMI_1   : return DRM_MODE_CONNECTOR_HDMIA;
++	default:
++		break;
++	}
++
++	return DRM_MODE_CONNECTOR_Unknown;
++}
++
+ struct drm_connector *
+ nouveau_connector_create(struct drm_device *dev, int index)
+ {
+ 	const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++	struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ 	struct nouveau_connector *nv_connector = NULL;
+-	struct dcb_connector_table_entry *dcb = NULL;
+ 	struct drm_connector *connector;
+ 	int type, ret = 0;
++	bool dummy;
+ 
+ 	NV_DEBUG_KMS(dev, "\n");
+ 
+-	if (index >= dev_priv->vbios.dcb.connector.entries)
+-		return ERR_PTR(-EINVAL);
+-
+-	dcb = &dev_priv->vbios.dcb.connector.entry[index];
+-	if (dcb->drm)
+-		return dcb->drm;
+-
+-	switch (dcb->type) {
+-	case DCB_CONNECTOR_VGA:
+-		type = DRM_MODE_CONNECTOR_VGA;
+-		break;
+-	case DCB_CONNECTOR_TV_0:
+-	case DCB_CONNECTOR_TV_1:
+-	case DCB_CONNECTOR_TV_3:
+-		type = DRM_MODE_CONNECTOR_TV;
+-		break;
+-	case DCB_CONNECTOR_DVI_I:
+-		type = DRM_MODE_CONNECTOR_DVII;
+-		break;
+-	case DCB_CONNECTOR_DVI_D:
+-		type = DRM_MODE_CONNECTOR_DVID;
+-		break;
+-	case DCB_CONNECTOR_HDMI_0:
+-	case DCB_CONNECTOR_HDMI_1:
+-		type = DRM_MODE_CONNECTOR_HDMIA;
+-		break;
+-	case DCB_CONNECTOR_LVDS:
+-	case DCB_CONNECTOR_LVDS_SPWG:
+-		type = DRM_MODE_CONNECTOR_LVDS;
+-		funcs = &nouveau_connector_funcs_lvds;
+-		break;
+-	case DCB_CONNECTOR_DP:
+-		type = DRM_MODE_CONNECTOR_DisplayPort;
+-		break;
+-	case DCB_CONNECTOR_eDP:
+-		type = DRM_MODE_CONNECTOR_eDP;
+-		break;
+-	default:
+-		NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
+-		return ERR_PTR(-EINVAL);
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		nv_connector = nouveau_connector(connector);
++		if (nv_connector->index == index)
++			return connector;
+ 	}
+ 
+ 	nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
+ 	if (!nv_connector)
+ 		return ERR_PTR(-ENOMEM);
+-	nv_connector->dcb = dcb;
++
+ 	connector = &nv_connector->base;
++	nv_connector->index = index;
++
++	/* attempt to parse vbios connector type and hotplug gpio */
++	nv_connector->dcb = dcb_conn(dev, index);
++	if (nv_connector->dcb) {
++		static const u8 hpd[16] = {
++			0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
++			0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
++		};
++
++		u32 entry = ROM16(nv_connector->dcb[0]);
++		if (dcb_conntab(dev)[3] >= 4)
++			entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
++
++		nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
++		nv_connector->hpd = hpd[nv_connector->hpd];
++
++		nv_connector->type = nv_connector->dcb[0];
++		if (drm_conntype_from_dcb(nv_connector->type) ==
++					  DRM_MODE_CONNECTOR_Unknown) {
++			NV_WARN(dev, "unknown connector type %02x\n",
++				nv_connector->type);
++			nv_connector->type = DCB_CONNECTOR_NONE;
++		}
+ 
+-	/* defaults, will get overridden in detect() */
+-	connector->interlace_allowed = false;
+-	connector->doublescan_allowed = false;
++		/* Gigabyte NX85T */
++		if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
++			if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
++				nv_connector->type = DCB_CONNECTOR_DVI_I;
++		}
+ 
+-	drm_connector_init(dev, connector, funcs, type);
+-	drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
++		/* Gigabyte GV-NX86T512H */
++		if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
++			if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
++				nv_connector->type = DCB_CONNECTOR_DVI_I;
++		}
++	} else {
++		nv_connector->type = DCB_CONNECTOR_NONE;
++		nv_connector->hpd = DCB_GPIO_UNUSED;
++	}
++
++	/* no vbios data, or an unknown dcb connector type - attempt to
++	 * figure out something suitable ourselves
++	 */
++	if (nv_connector->type == DCB_CONNECTOR_NONE) {
++		struct drm_nouveau_private *dev_priv = dev->dev_private;
++		struct dcb_table *dcbt = &dev_priv->vbios.dcb;
++		u32 encoders = 0;
++		int i;
++
++		for (i = 0; i < dcbt->entries; i++) {
++			if (dcbt->entry[i].connector == nv_connector->index)
++				encoders |= (1 << dcbt->entry[i].type);
++		}
+ 
+-	/* Check if we need dithering enabled */
+-	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+-		bool dummy, is_24bit = false;
++		if (encoders & (1 << OUTPUT_DP)) {
++			if (encoders & (1 << OUTPUT_TMDS))
++				nv_connector->type = DCB_CONNECTOR_DP;
++			else
++				nv_connector->type = DCB_CONNECTOR_eDP;
++		} else
++		if (encoders & (1 << OUTPUT_TMDS)) {
++			if (encoders & (1 << OUTPUT_ANALOG))
++				nv_connector->type = DCB_CONNECTOR_DVI_I;
++			else
++				nv_connector->type = DCB_CONNECTOR_DVI_D;
++		} else
++		if (encoders & (1 << OUTPUT_ANALOG)) {
++			nv_connector->type = DCB_CONNECTOR_VGA;
++		} else
++		if (encoders & (1 << OUTPUT_LVDS)) {
++			nv_connector->type = DCB_CONNECTOR_LVDS;
++		} else
++		if (encoders & (1 << OUTPUT_TV)) {
++			nv_connector->type = DCB_CONNECTOR_TV_0;
++		}
++	}
+ 
+-		ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
++	type = drm_conntype_from_dcb(nv_connector->type);
++	if (type == DRM_MODE_CONNECTOR_LVDS) {
++		ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
+ 		if (ret) {
+-			NV_ERROR(dev, "Error parsing LVDS table, disabling "
+-				 "LVDS\n");
+-			goto fail;
++			NV_ERROR(dev, "Error parsing LVDS table, disabling\n");
++			kfree(nv_connector);
++			return ERR_PTR(ret);
+ 		}
+ 
+-		nv_connector->use_dithering = !is_24bit;
++		funcs = &nouveau_connector_funcs_lvds;
++	} else {
++		funcs = &nouveau_connector_funcs;
+ 	}
+ 
++	/* defaults, will get overridden in detect() */
++	connector->interlace_allowed = false;
++	connector->doublescan_allowed = false;
++
++	drm_connector_init(dev, connector, funcs, type);
++	drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
++
+ 	/* Init DVI-I specific properties */
+-	if (dcb->type == DCB_CONNECTOR_DVI_I) {
+-		drm_mode_create_dvi_i_properties(dev);
++	if (nv_connector->type == DCB_CONNECTOR_DVI_I)
+ 		drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
+-		drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
++
++	/* Add overscan compensation options to digital outputs */
++	if (disp->underscan_property &&
++	    (type == DRM_MODE_CONNECTOR_DVID ||
++	     type == DRM_MODE_CONNECTOR_DVII ||
++	     type == DRM_MODE_CONNECTOR_HDMIA ||
++	     type == DRM_MODE_CONNECTOR_DisplayPort)) {
++		drm_connector_attach_property(connector,
++					      disp->underscan_property,
++					      UNDERSCAN_OFF);
++		drm_connector_attach_property(connector,
++					      disp->underscan_hborder_property,
++					      0);
++		drm_connector_attach_property(connector,
++					      disp->underscan_vborder_property,
++					      0);
+ 	}
+ 
+-	switch (dcb->type) {
++	/* Add hue and saturation options */
++	if (disp->vibrant_hue_property)
++		drm_connector_attach_property(connector,
++					      disp->vibrant_hue_property,
++					      90);
++	if (disp->color_vibrance_property)
++		drm_connector_attach_property(connector,
++					      disp->color_vibrance_property,
++					      150);
++
++	switch (nv_connector->type) {
+ 	case DCB_CONNECTOR_VGA:
+ 		if (dev_priv->card_type >= NV_50) {
+ 			drm_connector_attach_property(connector,
+@@ -931,32 +1068,32 @@ nouveau_connector_create(struct drm_device *dev, int index)
+ 		drm_connector_attach_property(connector,
+ 				dev->mode_config.scaling_mode_property,
+ 				nv_connector->scaling_mode);
+-		drm_connector_attach_property(connector,
+-				dev->mode_config.dithering_mode_property,
+-				nv_connector->use_dithering ?
+-				DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
++		if (disp->dithering_mode) {
++			nv_connector->dithering_mode = DITHERING_MODE_AUTO;
++			drm_connector_attach_property(connector,
++						disp->dithering_mode,
++						nv_connector->dithering_mode);
++		}
++		if (disp->dithering_depth) {
++			nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
++			drm_connector_attach_property(connector,
++						disp->dithering_depth,
++						nv_connector->dithering_depth);
++		}
+ 		break;
+ 	}
+ 
+-	if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) {
+-		pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
+-				    nouveau_connector_hotplug, connector);
+-
+-		connector->polled = DRM_CONNECTOR_POLL_HPD;
+-	} else {
+-		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++	connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++	if (nv_connector->hpd != DCB_GPIO_UNUSED) {
++		ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff,
++					   nouveau_connector_hotplug,
++					   connector);
++		if (ret == 0)
++			connector->polled = DRM_CONNECTOR_POLL_HPD;
+ 	}
+ 
+ 	drm_sysfs_connector_add(connector);
+-
+-	dcb->drm = connector;
+-	return dcb->drm;
+-
+-fail:
+-	drm_connector_cleanup(connector);
+-	kfree(connector);
+-	return ERR_PTR(ret);
+-
++	return connector;
+ }
+ 
+ static void
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
+index 711b1e9..e485702 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
+@@ -30,13 +30,43 @@
+ #include "drm_edid.h"
+ #include "nouveau_i2c.h"
+ 
++enum nouveau_underscan_type {
++	UNDERSCAN_OFF,
++	UNDERSCAN_ON,
++	UNDERSCAN_AUTO,
++};
++
++/* the enum values specifically defined here match nv50/nvd0 hw values, and
++ * the code relies on this
++ */
++enum nouveau_dithering_mode {
++	DITHERING_MODE_OFF = 0x00,
++	DITHERING_MODE_ON = 0x01,
++	DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
++	DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
++	DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
++	DITHERING_MODE_AUTO
++};
++
++enum nouveau_dithering_depth {
++	DITHERING_DEPTH_6BPC = 0x00,
++	DITHERING_DEPTH_8BPC = 0x02,
++	DITHERING_DEPTH_AUTO
++};
++
+ struct nouveau_connector {
+ 	struct drm_connector base;
++	enum dcb_connector_type type;
++	u8 index;
++	u8 *dcb;
++	u8 hpd;
+ 
+-	struct dcb_connector_table_entry *dcb;
+-
++	int dithering_mode;
++	int dithering_depth;
+ 	int scaling_mode;
+-	bool use_dithering;
++	enum nouveau_underscan_type underscan;
++	u32 underscan_hborder;
++	u32 underscan_vborder;
+ 
+ 	struct nouveau_encoder *detected_encoder;
+ 	struct edid *edid;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
+index bf8e128..e6d0d1e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
++++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
+@@ -32,11 +32,11 @@ struct nouveau_crtc {
+ 
+ 	int index;
+ 
+-	struct drm_display_mode *mode;
+-
+ 	uint32_t dpms_saved_fp_control;
+ 	uint32_t fp_users;
+ 	int saturation;
++	int color_vibrance;
++	int vibrant_hue;
+ 	int sharpness;
+ 	int last_dpms;
+ 
+@@ -67,8 +67,9 @@ struct nouveau_crtc {
+ 		int depth;
+ 	} lut;
+ 
+-	int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
+-	int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
++	int (*set_dither)(struct nouveau_crtc *crtc, bool update);
++	int (*set_scale)(struct nouveau_crtc *crtc, bool update);
++	int (*set_color_vibrance)(struct nouveau_crtc *crtc, bool update);
+ };
+ 
+ static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+index 8e15923..fa2ec49 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+@@ -44,7 +44,7 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
+ 	seq_printf(m, "channel id    : %d\n", chan->id);
+ 
+ 	seq_printf(m, "cpu fifo state:\n");
+-	seq_printf(m, "          base: 0x%08x\n", chan->pushbuf_base);
++	seq_printf(m, "          base: 0x%10llx\n", chan->pushbuf_base);
+ 	seq_printf(m, "           max: 0x%08x\n", chan->dma.max << 2);
+ 	seq_printf(m, "           cur: 0x%08x\n", chan->dma.cur << 2);
+ 	seq_printf(m, "           put: 0x%08x\n", chan->dma.put << 2);
+@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
+ 	{ "memory", nouveau_debugfs_memory_info, 0, NULL },
+ 	{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+ 	{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
++	{ "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
+ };
+ #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+index 6adef06..f233b8f 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_display.c
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -32,6 +32,8 @@
+ #include "nouveau_hw.h"
+ #include "nouveau_crtc.h"
+ #include "nouveau_dma.h"
++#include "nouveau_connector.h"
++#include "nouveau_gpio.h"
+ #include "nv50_display.h"
+ 
+ static void
+@@ -64,7 +66,7 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
+ int
+ nouveau_framebuffer_init(struct drm_device *dev,
+ 			 struct nouveau_framebuffer *nv_fb,
+-			 struct drm_mode_fb_cmd *mode_cmd,
++			 struct drm_mode_fb_cmd2 *mode_cmd,
+ 			 struct nouveau_bo *nvbo)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+@@ -107,14 +109,14 @@ nouveau_framebuffer_init(struct drm_device *dev,
+ 
+ 		if (!tile_flags) {
+ 			if (dev_priv->card_type < NV_D0)
+-				nv_fb->r_pitch = 0x00100000 | fb->pitch;
++				nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
+ 			else
+-				nv_fb->r_pitch = 0x01000000 | fb->pitch;
++				nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
+ 		} else {
+ 			u32 mode = nvbo->tile_mode;
+ 			if (dev_priv->card_type >= NV_C0)
+ 				mode >>= 4;
+-			nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
++			nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
+ 		}
+ 	}
+ 
+@@ -124,13 +126,13 @@ nouveau_framebuffer_init(struct drm_device *dev,
+ static struct drm_framebuffer *
+ nouveau_user_framebuffer_create(struct drm_device *dev,
+ 				struct drm_file *file_priv,
+-				struct drm_mode_fb_cmd *mode_cmd)
++				struct drm_mode_fb_cmd2 *mode_cmd)
+ {
+ 	struct nouveau_framebuffer *nouveau_fb;
+ 	struct drm_gem_object *gem;
+ 	int ret;
+ 
+-	gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
++	gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ 	if (!gem)
+ 		return ERR_PTR(-ENOENT);
+ 
+@@ -147,11 +149,207 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
+ 	return &nouveau_fb->base;
+ }
+ 
+-const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
++static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+ 	.fb_create = nouveau_user_framebuffer_create,
+ 	.output_poll_changed = nouveau_fbcon_output_poll_changed,
+ };
+ 
++
++struct nouveau_drm_prop_enum_list {
++	u8 gen_mask;
++	int type;
++	char *name;
++};
++
++static struct nouveau_drm_prop_enum_list underscan[] = {
++	{ 6, UNDERSCAN_AUTO, "auto" },
++	{ 6, UNDERSCAN_OFF, "off" },
++	{ 6, UNDERSCAN_ON, "on" },
++	{}
++};
++
++static struct nouveau_drm_prop_enum_list dither_mode[] = {
++	{ 7, DITHERING_MODE_AUTO, "auto" },
++	{ 7, DITHERING_MODE_OFF, "off" },
++	{ 1, DITHERING_MODE_ON, "on" },
++	{ 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
++	{ 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
++	{ 4, DITHERING_MODE_TEMPORAL, "temporal" },
++	{}
++};
++
++static struct nouveau_drm_prop_enum_list dither_depth[] = {
++	{ 6, DITHERING_DEPTH_AUTO, "auto" },
++	{ 6, DITHERING_DEPTH_6BPC, "6 bpc" },
++	{ 6, DITHERING_DEPTH_8BPC, "8 bpc" },
++	{}
++};
++
++#define PROP_ENUM(p,gen,n,list) do {                                           \
++	struct nouveau_drm_prop_enum_list *l = (list);                         \
++	int c = 0;                                                             \
++	while (l->gen_mask) {                                                  \
++		if (l->gen_mask & (1 << (gen)))                                \
++			c++;                                                   \
++		l++;                                                           \
++	}                                                                      \
++	if (c) {                                                               \
++		p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c);        \
++		l = (list);                                                    \
++		c = 0;                                                         \
++		while (p && l->gen_mask) {                                     \
++			if (l->gen_mask & (1 << (gen))) {                      \
++				drm_property_add_enum(p, c, l->type, l->name); \
++				c++;                                           \
++			}                                                      \
++			l++;                                                   \
++		}                                                              \
++	}                                                                      \
++} while(0)
++
++int
++nouveau_display_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_display_engine *disp = &dev_priv->engine.display;
++	struct drm_connector *connector;
++	int ret;
++
++	ret = disp->init(dev);
++	if (ret)
++		return ret;
++
++	/* power on internal panel if it's not already.  the init tables of
++	 * some vbios default this to off for some reason, causing the
++	 * panel to not work after resume
++	 */
++	if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) {
++		nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true);
++		msleep(300);
++	}
++
++	/* enable polling for external displays */
++	drm_kms_helper_poll_enable(dev);
++
++	/* enable hotplug interrupts */
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		struct nouveau_connector *conn = nouveau_connector(connector);
++		nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true);
++	}
++
++	return ret;
++}
++
++void
++nouveau_display_fini(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_display_engine *disp = &dev_priv->engine.display;
++	struct drm_connector *connector;
++
++	/* disable hotplug interrupts */
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		struct nouveau_connector *conn = nouveau_connector(connector);
++		nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false);
++	}
++
++	drm_kms_helper_poll_disable(dev);
++	disp->fini(dev);
++}
++
++int
++nouveau_display_create(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_display_engine *disp = &dev_priv->engine.display;
++	int ret, gen;
++
++	drm_mode_config_init(dev);
++	drm_mode_create_scaling_mode_property(dev);
++	drm_mode_create_dvi_i_properties(dev);
++
++	if (dev_priv->card_type < NV_50)
++		gen = 0;
++	else
++	if (dev_priv->card_type < NV_D0)
++		gen = 1;
++	else
++		gen = 2;
++
++	PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
++	PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
++	PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
++
++	disp->underscan_hborder_property =
++		drm_property_create_range(dev, 0, "underscan hborder", 0, 128);
++
++	disp->underscan_vborder_property =
++		drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
++
++	if (gen == 1) {
++		disp->vibrant_hue_property =
++			drm_property_create(dev, DRM_MODE_PROP_RANGE,
++					    "vibrant hue", 2);
++		disp->vibrant_hue_property->values[0] = 0;
++		disp->vibrant_hue_property->values[1] = 180; /* -90..+90 */
++
++		disp->color_vibrance_property =
++			drm_property_create(dev, DRM_MODE_PROP_RANGE,
++					    "color vibrance", 2);
++		disp->color_vibrance_property->values[0] = 0;
++		disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
++	}
++
++	dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
++	dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
++
++	dev->mode_config.min_width = 0;
++	dev->mode_config.min_height = 0;
++	if (dev_priv->card_type < NV_10) {
++		dev->mode_config.max_width = 2048;
++		dev->mode_config.max_height = 2048;
++	} else
++	if (dev_priv->card_type < NV_50) {
++		dev->mode_config.max_width = 4096;
++		dev->mode_config.max_height = 4096;
++	} else {
++		dev->mode_config.max_width = 8192;
++		dev->mode_config.max_height = 8192;
++	}
++
++	dev->mode_config.preferred_depth = 24;
++	dev->mode_config.prefer_shadow = 1;
++
++	drm_kms_helper_poll_init(dev);
++	drm_kms_helper_poll_disable(dev);
++
++	ret = disp->create(dev);
++	if (ret)
++		return ret;
++
++	if (dev->mode_config.num_crtc) {
++		ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
++		if (ret)
++			return ret;
++	}
++
++	return ret;
++}
++
++void
++nouveau_display_destroy(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_display_engine *disp = &dev_priv->engine.display;
++
++	drm_vblank_cleanup(dev);
++
++	disp->destroy(dev);
++
++	drm_kms_helper_poll_fini(dev);
++	drm_mode_config_cleanup(dev);
++}
++
+ int
+ nouveau_vblank_enable(struct drm_device *dev, int crtc)
+ {
+@@ -243,15 +441,19 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
+ 		goto fail;
+ 
+ 	/* Emit the pageflip */
+-	ret = RING_SPACE(chan, 2);
++	ret = RING_SPACE(chan, 3);
+ 	if (ret)
+ 		goto fail;
+ 
+-	if (dev_priv->card_type < NV_C0)
++	if (dev_priv->card_type < NV_C0) {
+ 		BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+-	else
+-		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0500, 1);
+-	OUT_RING  (chan, 0);
++		OUT_RING  (chan, 0x00000000);
++		OUT_RING  (chan, 0x00000000);
++	} else {
++		BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
++		OUT_RING  (chan, ++chan->fence.sequence);
++		BEGIN_NVC0(chan, 8, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
++	}
+ 	FIRE_RING (chan);
+ 
+ 	ret = nouveau_fence_new(chan, pfence, true);
+@@ -294,7 +496,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ 	/* Initialize a page flip struct */
+ 	*s = (struct nouveau_page_flip_state)
+ 		{ { }, event, nouveau_crtc(crtc)->index,
+-		  fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
++		  fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
+ 		  new_bo->bo.offset };
+ 
+ 	/* Choose the channel the flip will be handled in */
+@@ -305,7 +507,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ 
+ 	/* Emit a page flip */
+ 	if (dev_priv->card_type >= NV_50) {
+-		ret = nv50_display_flip_next(crtc, fb, chan);
++		if (dev_priv->card_type >= NV_D0)
++			ret = nvd0_display_flip_next(crtc, fb, chan, 0);
++		else
++			ret = nv50_display_flip_next(crtc, fb, chan);
+ 		if (ret) {
+ 			nouveau_channel_put(&chan);
+ 			goto fail_unreserve;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
+index 00bc6ea..295932e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
+@@ -31,7 +31,7 @@
+ #include "nouveau_ramht.h"
+ 
+ void
+-nouveau_dma_pre_init(struct nouveau_channel *chan)
++nouveau_dma_init(struct nouveau_channel *chan)
+ {
+ 	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ 	struct nouveau_bo *pushbuf = chan->pushbuf_bo;
+@@ -54,65 +54,6 @@ nouveau_dma_pre_init(struct nouveau_channel *chan)
+ 	chan->dma.free = chan->dma.max - chan->dma.cur;
+ }
+ 
+-int
+-nouveau_dma_init(struct nouveau_channel *chan)
+-{
+-	struct drm_device *dev = chan->dev;
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	int ret, i;
+-
+-	if (dev_priv->card_type >= NV_C0) {
+-		ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
+-		if (ret)
+-			return ret;
+-
+-		ret = RING_SPACE(chan, 2);
+-		if (ret)
+-			return ret;
+-
+-		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
+-		OUT_RING  (chan, 0x00009039);
+-		FIRE_RING (chan);
+-		return 0;
+-	}
+-
+-	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+-	ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
+-				    0x0039 : 0x5039);
+-	if (ret)
+-		return ret;
+-
+-	/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
+-	ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
+-				     &chan->m2mf_ntfy);
+-	if (ret)
+-		return ret;
+-
+-	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
+-	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
+-	if (ret)
+-		return ret;
+-
+-	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+-		OUT_RING(chan, 0);
+-
+-	/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
+-	ret = RING_SPACE(chan, 6);
+-	if (ret)
+-		return ret;
+-	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
+-	OUT_RING  (chan, NvM2MF);
+-	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
+-	OUT_RING  (chan, NvNotify0);
+-	OUT_RING  (chan, chan->vram_handle);
+-	OUT_RING  (chan, chan->gart_handle);
+-
+-	/* Sit back and pray the channel works.. */
+-	FIRE_RING(chan);
+-
+-	return 0;
+-}
+-
+ void
+ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
+ {
+@@ -134,11 +75,13 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
+  *  -EBUSY if timeout exceeded
+  */
+ static inline int
+-READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
++READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
+ {
+-	uint32_t val;
++	uint64_t val;
+ 
+ 	val = nvchan_rd32(chan, chan->user_get);
++        if (chan->user_get_hi)
++                val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
+ 
+ 	/* reset counter as long as GET is still advancing, this is
+ 	 * to avoid misdetecting a GPU lockup if the GPU happens to
+@@ -218,8 +161,8 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
+ static int
+ nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
+ {
+-	uint32_t cnt = 0, prev_get = 0;
+-	int ret;
++	uint64_t prev_get = 0;
++	int ret, cnt = 0;
+ 
+ 	ret = nv50_dma_push_wait(chan, slots + 1);
+ 	if (unlikely(ret))
+@@ -261,8 +204,8 @@ nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
+ int
+ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
+ {
+-	uint32_t prev_get = 0, cnt = 0;
+-	int get;
++	uint64_t prev_get = 0;
++	int cnt = 0, get;
+ 
+ 	if (chan->dma.ib_max)
+ 		return nv50_dma_wait(chan, slots, size);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
+index de5efe7..d996134 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
++++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
+@@ -29,6 +29,7 @@
+ #include "nouveau_connector.h"
+ #include "nouveau_encoder.h"
+ #include "nouveau_crtc.h"
++#include "nouveau_gpio.h"
+ 
+ /******************************************************************************
+  * aux channel util functions
+@@ -160,121 +161,9 @@ out:
+ 	return ret;
+ }
+ 
+-static u32
+-dp_link_bw_get(struct drm_device *dev, int or, int link)
+-{
+-	u32 ctrl = nv_rd32(dev, 0x614300 + (or * 0x800));
+-	if (!(ctrl & 0x000c0000))
+-		return 162000;
+-	return 270000;
+-}
+-
+-static int
+-dp_lane_count_get(struct drm_device *dev, int or, int link)
+-{
+-	u32 ctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+-	switch (ctrl & 0x000f0000) {
+-	case 0x00010000: return 1;
+-	case 0x00030000: return 2;
+-	default:
+-		return 4;
+-	}
+-}
+-
+-void
+-nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
+-{
+-	const u32 symbol = 100000;
+-	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
+-	int TU, VTUi, VTUf, VTUa;
+-	u64 link_data_rate, link_ratio, unk;
+-	u32 best_diff = 64 * symbol;
+-	u32 link_nr, link_bw, r;
+-
+-	/* calculate packed data rate for each lane */
+-	link_nr = dp_lane_count_get(dev, or, link);
+-	link_data_rate = (clk * bpp / 8) / link_nr;
+-
+-	/* calculate ratio of packed data rate to link symbol rate */
+-	link_bw = dp_link_bw_get(dev, or, link);
+-	link_ratio = link_data_rate * symbol;
+-	r = do_div(link_ratio, link_bw);
+-
+-	for (TU = 64; TU >= 32; TU--) {
+-		/* calculate average number of valid symbols in each TU */
+-		u32 tu_valid = link_ratio * TU;
+-		u32 calc, diff;
+-
+-		/* find a hw representation for the fraction.. */
+-		VTUi = tu_valid / symbol;
+-		calc = VTUi * symbol;
+-		diff = tu_valid - calc;
+-		if (diff) {
+-			if (diff >= (symbol / 2)) {
+-				VTUf = symbol / (symbol - diff);
+-				if (symbol - (VTUf * diff))
+-					VTUf++;
+-
+-				if (VTUf <= 15) {
+-					VTUa  = 1;
+-					calc += symbol - (symbol / VTUf);
+-				} else {
+-					VTUa  = 0;
+-					VTUf  = 1;
+-					calc += symbol;
+-				}
+-			} else {
+-				VTUa  = 0;
+-				VTUf  = min((int)(symbol / diff), 15);
+-				calc += symbol / VTUf;
+-			}
+-
+-			diff = calc - tu_valid;
+-		} else {
+-			/* no remainder, but the hw doesn't like the fractional
+-			 * part to be zero.  decrement the integer part and
+-			 * have the fraction add a whole symbol back
+-			 */
+-			VTUa = 0;
+-			VTUf = 1;
+-			VTUi--;
+-		}
+-
+-		if (diff < best_diff) {
+-			best_diff = diff;
+-			bestTU = TU;
+-			bestVTUa = VTUa;
+-			bestVTUf = VTUf;
+-			bestVTUi = VTUi;
+-			if (diff == 0)
+-				break;
+-		}
+-	}
+-
+-	if (!bestTU) {
+-		NV_ERROR(dev, "DP: unable to find suitable config\n");
+-		return;
+-	}
+-
+-	/* XXX close to vbios numbers, but not right */
+-	unk  = (symbol - link_ratio) * bestTU;
+-	unk *= link_ratio;
+-	r = do_div(unk, symbol);
+-	r = do_div(unk, symbol);
+-	unk += 6;
+-
+-	nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
+-	nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
+-							     bestVTUf << 16 |
+-							     bestVTUi << 8 |
+-							     unk);
+-}
+-
+ u8 *
+ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
+ {
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nvbios *bios = &dev_priv->vbios;
+ 	struct bit_entry d;
+ 	u8 *table;
+ 	int i;
+@@ -289,7 +178,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
+ 		return NULL;
+ 	}
+ 
+-	table = ROMPTR(bios, d.data[0]);
++	table = ROMPTR(dev, d.data[0]);
+ 	if (!table) {
+ 		NV_ERROR(dev, "displayport table pointer invalid\n");
+ 		return NULL;
+@@ -299,6 +188,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
+ 	case 0x20:
+ 	case 0x21:
+ 	case 0x30:
++	case 0x40:
+ 		break;
+ 	default:
+ 		NV_ERROR(dev, "displayport table 0x%02x unknown\n", table[0]);
+@@ -306,7 +196,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
+ 	}
+ 
+ 	for (i = 0; i < table[3]; i++) {
+-		*entry = ROMPTR(bios, table[table[1] + (i * table[2])]);
++		*entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
+ 		if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
+ 			return table;
+ 	}
+@@ -319,13 +209,10 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
+  * link training
+  *****************************************************************************/
+ struct dp_state {
++	struct dp_train_func *func;
+ 	struct dcb_entry *dcb;
+-	u8 *table;
+-	u8 *entry;
+ 	int auxch;
+ 	int crtc;
+-	int or;
+-	int link;
+ 	u8 *dpcd;
+ 	int link_nr;
+ 	u32 link_bw;
+@@ -336,143 +223,58 @@ struct dp_state {
+ static void
+ dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
+ {
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	int or = dp->or, link = dp->link;
+-	u8 *entry, sink[2];
+-	u32 dp_ctrl;
+-	u16 script;
++	u8 sink[2];
+ 
+ 	NV_DEBUG_KMS(dev, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
+ 
+-	/* set selected link rate on source */
+-	switch (dp->link_bw) {
+-	case 270000:
+-		nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00040000);
+-		sink[0] = DP_LINK_BW_2_7;
+-		break;
+-	default:
+-		nv_mask(dev, 0x614300 + (or * 0x800), 0x000c0000, 0x00000000);
+-		sink[0] = DP_LINK_BW_1_62;
+-		break;
+-	}
+-
+-	/* offset +0x0a of each dp encoder table entry is a pointer to another
+-	 * table, that has (among other things) pointers to more scripts that
+-	 * need to be executed, this time depending on link speed.
+-	 */
+-	entry = ROMPTR(&dev_priv->vbios, dp->entry[10]);
+-	if (entry) {
+-		if (dp->table[0] < 0x30) {
+-			while (dp->link_bw < (ROM16(entry[0]) * 10))
+-				entry += 4;
+-			script = ROM16(entry[2]);
+-		} else {
+-			while (dp->link_bw < (entry[0] * 27000))
+-				entry += 3;
+-			script = ROM16(entry[1]);
+-		}
+-
+-		nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
+-	}
++	/* set desired link configuration on the source */
++	dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw,
++			   dp->dpcd[2] & DP_ENHANCED_FRAME_CAP);
+ 
+-	/* configure lane count on the source */
+-	dp_ctrl = ((1 << dp->link_nr) - 1) << 16;
++	/* inform the sink of the new configuration */
++	sink[0] = dp->link_bw / 27000;
+ 	sink[1] = dp->link_nr;
+-	if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP) {
+-		dp_ctrl |= 0x00004000;
++	if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
+ 		sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+-	}
+-
+-	nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x001f4000, dp_ctrl);
+ 
+-	/* inform the sink of the new configuration */
+ 	auxch_tx(dev, dp->auxch, 8, DP_LINK_BW_SET, sink, 2);
+ }
+ 
+ static void
+-dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 tp)
++dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
+ {
+ 	u8 sink_tp;
+ 
+-	NV_DEBUG_KMS(dev, "training pattern %d\n", tp);
++	NV_DEBUG_KMS(dev, "training pattern %d\n", pattern);
+ 
+-	nv_mask(dev, NV50_SOR_DP_CTRL(dp->or, dp->link), 0x0f000000, tp << 24);
++	dp->func->train_set(dev, dp->dcb, pattern);
+ 
+ 	auxch_tx(dev, dp->auxch, 9, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
+ 	sink_tp &= ~DP_TRAINING_PATTERN_MASK;
+-	sink_tp |= tp;
++	sink_tp |= pattern;
+ 	auxch_tx(dev, dp->auxch, 8, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
+ }
+ 
+-static const u8 nv50_lane_map[] = { 16, 8, 0, 24 };
+-static const u8 nvaf_lane_map[] = { 24, 16, 8, 0 };
+-
+ static int
+ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
+ {
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	u32 mask = 0, drv = 0, pre = 0, unk = 0;
+-	const u8 *shifts;
+-	int link = dp->link;
+-	int or = dp->or;
+ 	int i;
+ 
+-	if (dev_priv->chipset != 0xaf)
+-		shifts = nv50_lane_map;
+-	else
+-		shifts = nvaf_lane_map;
+-
+ 	for (i = 0; i < dp->link_nr; i++) {
+-		u8 *conf = dp->entry + dp->table[4];
+ 		u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
+ 		u8 lpre = (lane & 0x0c) >> 2;
+ 		u8 lvsw = (lane & 0x03) >> 0;
+ 
+-		mask |= 0xff << shifts[i];
+-		unk |= 1 << (shifts[i] >> 3);
+-
+ 		dp->conf[i] = (lpre << 3) | lvsw;
+ 		if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200)
+ 			dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED;
+-		if (lpre == DP_TRAIN_PRE_EMPHASIS_9_5)
++		if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5)
+ 			dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ 
+ 		NV_DEBUG_KMS(dev, "config lane %d %02x\n", i, dp->conf[i]);
+-
+-		if (dp->table[0] < 0x30) {
+-			u8 *last = conf + (dp->entry[4] * dp->table[5]);
+-			while (lvsw != conf[0] || lpre != conf[1]) {
+-				conf += dp->table[5];
+-				if (conf >= last)
+-					return -EINVAL;
+-			}
+-
+-			conf += 2;
+-		} else {
+-			/* no lookup table anymore, set entries for each
+-			 * combination of voltage swing and pre-emphasis
+-			 * level allowed by the DP spec.
+-			 */
+-			switch (lvsw) {
+-			case 0: lpre += 0; break;
+-			case 1: lpre += 4; break;
+-			case 2: lpre += 7; break;
+-			case 3: lpre += 9; break;
+-			}
+-
+-			conf = conf + (lpre * dp->table[5]);
+-			conf++;
+-		}
+-
+-		drv |= conf[0] << shifts[i];
+-		pre |= conf[1] << shifts[i];
+-		unk  = (unk & ~0x0000ff00) | (conf[2] << 8);
++		dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre);
+ 	}
+ 
+-	nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, drv);
+-	nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, pre);
+-	nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff0f, unk);
+-
+ 	return auxch_tx(dev, dp->auxch, 8, DP_TRAINING_LANE0_SET, dp->conf, 4);
+ }
+ 
+@@ -556,11 +358,61 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
+ 	return eq_done ? 0 : -1;
+ }
+ 
++static void
++dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable)
++{
++	u16 script = 0x0000;
++	u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
++	if (table) {
++		if (table[0] >= 0x20 && table[0] <= 0x30) {
++			if (enable) script = ROM16(entry[12]);
++			else        script = ROM16(entry[14]);
++		} else
++		if (table[0] == 0x40) {
++			if (enable) script = ROM16(entry[11]);
++			else        script = ROM16(entry[13]);
++		}
++	}
++
++	nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
++}
++
++static void
++dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
++{
++	u16 script = 0x0000;
++	u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
++	if (table) {
++		if (table[0] >= 0x20 && table[0] <= 0x30)
++			script = ROM16(entry[6]);
++		else
++		if (table[0] == 0x40)
++			script = ROM16(entry[5]);
++	}
++
++	nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
++}
++
++static void
++dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
++{
++	u16 script = 0x0000;
++	u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
++	if (table) {
++		if (table[0] >= 0x20 && table[0] <= 0x30)
++			script = ROM16(entry[8]);
++		else
++		if (table[0] == 0x40)
++			script = ROM16(entry[7]);
++	}
++
++	nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
++}
++
+ bool
+-nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
++nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
++		      struct dp_train_func *func)
+ {
+-	struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ 	struct nouveau_connector *nv_connector =
+@@ -575,34 +427,26 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
+ 	if (!auxch)
+ 		return false;
+ 
+-	dp.table = nouveau_dp_bios_data(dev, nv_encoder->dcb, &dp.entry);
+-	if (!dp.table)
+-		return -EINVAL;
+-
++	dp.func = func;
+ 	dp.dcb = nv_encoder->dcb;
+ 	dp.crtc = nv_crtc->index;
+-	dp.auxch = auxch->rd;
+-	dp.or = nv_encoder->or;
+-	dp.link = !(nv_encoder->dcb->sorconf.link & 1);
++	dp.auxch = auxch->drive;
+ 	dp.dpcd = nv_encoder->dp.dpcd;
+ 
++	/* adjust required bandwidth for 8B/10B coding overhead */
++	datarate = (datarate / 8) * 10;
++
+ 	/* some sinks toggle hotplug in response to some of the actions
+ 	 * we take during link training (DP_SET_POWER is one), we need
+ 	 * to ignore them for the moment to avoid races.
+ 	 */
+-	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
++	nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false);
+ 
+ 	/* enable down-spreading, if possible */
+-	if (dp.table[1] >= 16) {
+-		u16 script = ROM16(dp.entry[14]);
+-		if (nv_encoder->dp.dpcd[3] & 1)
+-			script = ROM16(dp.entry[12]);
+-
+-		nouveau_bios_run_init_table(dev, script, dp.dcb, dp.crtc);
+-	}
++	dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
+ 
+ 	/* execute pre-train script from vbios */
+-	nouveau_bios_run_init_table(dev, ROM16(dp.entry[6]), dp.dcb, dp.crtc);
++	dp_link_train_init(dev, &dp);
+ 
+ 	/* start off at highest link rate supported by encoder and display */
+ 	while (*link_bw > nv_encoder->dp.link_bw)
+@@ -636,13 +480,36 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
+ 	dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE);
+ 
+ 	/* execute post-train script from vbios */
+-	nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc);
++	dp_link_train_fini(dev, &dp);
+ 
+ 	/* re-enable hotplug detect */
+-	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
++	nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true);
+ 	return true;
+ }
+ 
++void
++nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
++		struct dp_train_func *func)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_i2c_chan *auxch;
++	u8 status;
++
++	auxch = nouveau_i2c_find(encoder->dev, nv_encoder->dcb->i2c_index);
++	if (!auxch)
++		return;
++
++	if (mode == DRM_MODE_DPMS_ON)
++		status = DP_SET_POWER_D0;
++	else
++		status = DP_SET_POWER_D3;
++
++	nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
++
++	if (mode == DRM_MODE_DPMS_ON)
++		nouveau_dp_link_train(encoder, datarate, func);
++}
++
+ bool
+ nouveau_dp_detect(struct drm_encoder *encoder)
+ {
+@@ -656,7 +523,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
+ 	if (!auxch)
+ 		return false;
+ 
+-	ret = auxch_tx(dev, auxch->rd, 9, DP_DPCD_REV, dpcd, 8);
++	ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8);
+ 	if (ret)
+ 		return false;
+ 
+@@ -684,7 +551,7 @@ int
+ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+ 		 uint8_t *data, int data_nr)
+ {
+-	return auxch_tx(auxch->dev, auxch->rd, cmd, addr, data, data_nr);
++	return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
+index 8c084c0..05091c2 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
+@@ -57,6 +57,10 @@ MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
+ int nouveau_vram_notify = 0;
+ module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
+ 
++MODULE_PARM_DESC(vram_type, "Override detected VRAM type");
++char *nouveau_vram_type;
++module_param_named(vram_type, nouveau_vram_type, charp, 0400);
++
+ MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
+ int nouveau_duallink = 1;
+ module_param_named(duallink, nouveau_duallink, int, 0400);
+@@ -89,7 +93,7 @@ MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
+ int nouveau_override_conntype = 0;
+ module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
+ 
+-MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
++MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
+ int nouveau_tv_disable = 0;
+ module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
+ 
+@@ -104,26 +108,30 @@ module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
+ MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
+ 		"\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
+ 		"\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
+-		"\t\t0x100 vgaattr, 0x200 EVO (G80+). ");
++		"\t\t0x100 vgaattr, 0x200 EVO (G80+)");
+ int nouveau_reg_debug;
+ module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
+ 
+-MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n");
++MODULE_PARM_DESC(perflvl, "Performance level (default: boot)");
+ char *nouveau_perflvl;
+ module_param_named(perflvl, nouveau_perflvl, charp, 0400);
+ 
+-MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
++MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)");
+ int nouveau_perflvl_wr;
+ module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
+ 
+-MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
++MODULE_PARM_DESC(msi, "Enable MSI (default: off)");
+ int nouveau_msi;
+ module_param_named(msi, nouveau_msi, int, 0400);
+ 
+-MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
++MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)");
+ int nouveau_ctxfw;
+ module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
+ 
++MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS");
++int nouveau_mxmdcb = 1;
++module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
++
+ int nouveau_fbpercrtc;
+ #if 0
+ module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
+@@ -179,8 +187,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+ 		return 0;
+ 
+ 	if (dev->mode_config.num_crtc) {
+-		NV_INFO(dev, "Disabling fbcon acceleration...\n");
+-		nouveau_fbcon_save_disable_accel(dev);
++		NV_INFO(dev, "Disabling display...\n");
++		nouveau_display_fini(dev);
++
++		NV_INFO(dev, "Disabling fbcon...\n");
++		nouveau_fbcon_set_suspend(dev, 1);
+ 	}
+ 
+ 	NV_INFO(dev, "Unpinning framebuffer(s)...\n");
+@@ -222,7 +233,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+ 
+ 		ret = dev_priv->eng[e]->fini(dev, e, true);
+ 		if (ret) {
+-			NV_ERROR(dev, "... engine %d failed: %d\n", i, ret);
++			NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
+ 			goto out_abort;
+ 		}
+ 	}
+@@ -248,12 +259,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
+ 		pci_set_power_state(pdev, PCI_D3hot);
+ 	}
+ 
+-	if (dev->mode_config.num_crtc) {
+-		console_lock();
+-		nouveau_fbcon_set_suspend(dev, 1);
+-		console_unlock();
+-		nouveau_fbcon_restore_accel(dev);
+-	}
+ 	return 0;
+ 
+ out_abort:
+@@ -279,9 +284,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
+ 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ 		return 0;
+ 
+-	if (dev->mode_config.num_crtc)
+-		nouveau_fbcon_save_disable_accel(dev);
+-
+ 	NV_INFO(dev, "We're back, enabling device...\n");
+ 	pci_set_power_state(pdev, PCI_D0);
+ 	pci_restore_state(pdev);
+@@ -301,8 +303,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
+ 	if (ret)
+ 		return ret;
+ 
+-	nouveau_pm_resume(dev);
+-
+ 	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ 		ret = nouveau_mem_init_agp(dev);
+ 		if (ret) {
+@@ -342,6 +342,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
+ 		}
+ 	}
+ 
++	nouveau_pm_resume(dev);
++
+ 	NV_INFO(dev, "Restoring mode...\n");
+ 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 		struct nouveau_framebuffer *nouveau_fb;
+@@ -363,15 +365,11 @@ nouveau_pci_resume(struct pci_dev *pdev)
+ 			NV_ERROR(dev, "Could not pin/map cursor.\n");
+ 	}
+ 
+-	engine->display.init(dev);
+-
+-	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+-		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+-		u32 offset = nv_crtc->cursor.nvbo->bo.offset;
++	if (dev->mode_config.num_crtc) {
++		nouveau_fbcon_set_suspend(dev, 0);
++		nouveau_fbcon_zfill_all(dev);
+ 
+-		nv_crtc->cursor.set_offset(nv_crtc, offset);
+-		nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+-						 nv_crtc->cursor_saved_y);
++		nouveau_display_init(dev);
+ 	}
+ 
+ 	/* Force CLUT to get re-loaded during modeset */
+@@ -381,21 +379,35 @@ nouveau_pci_resume(struct pci_dev *pdev)
+ 		nv_crtc->lut.depth = 0;
+ 	}
+ 
+-	if (dev->mode_config.num_crtc) {
+-		console_lock();
+-		nouveau_fbcon_set_suspend(dev, 0);
+-		console_unlock();
++	drm_helper_resume_force_mode(dev);
+ 
+-		nouveau_fbcon_zfill_all(dev);
+-	}
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++		u32 offset = nv_crtc->cursor.nvbo->bo.offset;
+ 
+-	drm_helper_resume_force_mode(dev);
++		nv_crtc->cursor.set_offset(nv_crtc, offset);
++		nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
++						 nv_crtc->cursor_saved_y);
++	}
+ 
+-	if (dev->mode_config.num_crtc)
+-		nouveau_fbcon_restore_accel(dev);
+ 	return 0;
+ }
+ 
++static const struct file_operations nouveau_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = nouveau_ttm_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++	.read = drm_read,
++#if defined(CONFIG_COMPAT)
++	.compat_ioctl = nouveau_compat_ioctl,
++#endif
++	.llseek = noop_llseek,
++};
++
+ static struct drm_driver driver = {
+ 	.driver_features =
+ 		DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+@@ -421,21 +433,7 @@ static struct drm_driver driver = {
+ 	.disable_vblank = nouveau_vblank_disable,
+ 	.reclaim_buffers = drm_core_reclaim_buffers,
+ 	.ioctls = nouveau_ioctls,
+-	.fops = {
+-		.owner = THIS_MODULE,
+-		.open = drm_open,
+-		.release = drm_release,
+-		.unlocked_ioctl = drm_ioctl,
+-		.mmap = nouveau_ttm_mmap,
+-		.poll = drm_poll,
+-		.fasync = drm_fasync,
+-		.read = drm_read,
+-#if defined(CONFIG_COMPAT)
+-		.compat_ioctl = nouveau_compat_ioctl,
+-#endif
+-		.llseek = noop_llseek,
+-	},
+-
++	.fops = &nouveau_driver_fops,
+ 	.gem_init_object = nouveau_gem_object_new,
+ 	.gem_free_object = nouveau_gem_object_del,
+ 	.gem_open_object = nouveau_gem_object_open,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
+index 4c0be3a..3aef353 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
+@@ -26,15 +26,15 @@
+ #define __NOUVEAU_DRV_H__
+ 
+ #define DRIVER_AUTHOR		"Stephane Marchesin"
+-#define DRIVER_EMAIL		"dri-devel at lists.sourceforge.net"
++#define DRIVER_EMAIL		"nouveau at lists.freedesktop.org"
+ 
+ #define DRIVER_NAME		"nouveau"
+ #define DRIVER_DESC		"nVidia Riva/TNT/GeForce"
+-#define DRIVER_DATE		"20090420"
++#define DRIVER_DATE		"20120316"
+ 
+-#define DRIVER_MAJOR		0
++#define DRIVER_MAJOR		1
+ #define DRIVER_MINOR		0
+-#define DRIVER_PATCHLEVEL	16
++#define DRIVER_PATCHLEVEL	0
+ 
+ #define NOUVEAU_FAMILY   0x0000FFFF
+ #define NOUVEAU_FLAGS    0xFFFF0000
+@@ -113,8 +113,6 @@ struct nouveau_bo {
+ 	int pbbo_index;
+ 	bool validate_mapped;
+ 
+-	struct nouveau_channel *channel;
+-
+ 	struct list_head vma_list;
+ 	unsigned page_shift;
+ 
+@@ -163,6 +161,9 @@ enum nouveau_flags {
+ #define NVOBJ_ENGINE_COPY0	3
+ #define NVOBJ_ENGINE_COPY1	4
+ #define NVOBJ_ENGINE_MPEG	5
++#define NVOBJ_ENGINE_PPP	NVOBJ_ENGINE_MPEG
++#define NVOBJ_ENGINE_BSP	6
++#define NVOBJ_ENGINE_VP		7
+ #define NVOBJ_ENGINE_DISPLAY	15
+ #define NVOBJ_ENGINE_NR		16
+ 
+@@ -229,6 +230,7 @@ struct nouveau_channel {
+ 	/* mapping of the regs controlling the fifo */
+ 	void __iomem *user;
+ 	uint32_t user_get;
++	uint32_t user_get_hi;
+ 	uint32_t user_put;
+ 
+ 	/* Fencing */
+@@ -246,7 +248,7 @@ struct nouveau_channel {
+ 	struct nouveau_gpuobj *pushbuf;
+ 	struct nouveau_bo     *pushbuf_bo;
+ 	struct nouveau_vma     pushbuf_vma;
+-	uint32_t               pushbuf_base;
++	uint64_t               pushbuf_base;
+ 
+ 	/* Notifier memory */
+ 	struct nouveau_bo *notifier_bo;
+@@ -292,7 +294,7 @@ struct nouveau_channel {
+ 
+ 	uint32_t sw_subchannel[8];
+ 
+-	struct nouveau_vma dispc_vma[2];
++	struct nouveau_vma dispc_vma[4];
+ 	struct {
+ 		struct nouveau_gpuobj *vblsem;
+ 		uint32_t vblsem_head;
+@@ -393,24 +395,28 @@ struct nouveau_display_engine {
+ 	int (*early_init)(struct drm_device *);
+ 	void (*late_takedown)(struct drm_device *);
+ 	int (*create)(struct drm_device *);
+-	int (*init)(struct drm_device *);
+ 	void (*destroy)(struct drm_device *);
++	int (*init)(struct drm_device *);
++	void (*fini)(struct drm_device *);
++
++	struct drm_property *dithering_mode;
++	struct drm_property *dithering_depth;
++	struct drm_property *underscan_property;
++	struct drm_property *underscan_hborder_property;
++	struct drm_property *underscan_vborder_property;
++	/* not really hue and saturation: */
++	struct drm_property *vibrant_hue_property;
++	struct drm_property *color_vibrance_property;
+ };
+ 
+ struct nouveau_gpio_engine {
+-	void *priv;
+-
+-	int  (*init)(struct drm_device *);
+-	void (*takedown)(struct drm_device *);
+-
+-	int  (*get)(struct drm_device *, enum dcb_gpio_tag);
+-	int  (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
+-
+-	int  (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
+-			     void (*)(void *, int), void *);
+-	void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
+-			       void (*)(void *, int), void *);
+-	bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
++	spinlock_t lock;
++	struct list_head isr;
++	int (*init)(struct drm_device *);
++	void (*fini)(struct drm_device *);
++	int (*drive)(struct drm_device *, int line, int dir, int out);
++	int (*sense)(struct drm_device *, int line);
++	void (*irq_enable)(struct drm_device *, int line, bool);
+ };
+ 
+ struct nouveau_pm_voltage_level {
+@@ -427,64 +433,91 @@ struct nouveau_pm_voltage {
+ 	int nr_level;
+ };
+ 
++/* Exclusive upper limits */
++#define NV_MEM_CL_DDR2_MAX 8
++#define NV_MEM_WR_DDR2_MAX 9
++#define NV_MEM_CL_DDR3_MAX 17
++#define NV_MEM_WR_DDR3_MAX 17
++#define NV_MEM_CL_GDDR3_MAX 16
++#define NV_MEM_WR_GDDR3_MAX 18
++#define NV_MEM_CL_GDDR5_MAX 21
++#define NV_MEM_WR_GDDR5_MAX 20
++
+ struct nouveau_pm_memtiming {
+ 	int id;
+-	u32 reg_0; /* 0x10f290 on Fermi, 0x100220 for older */
+-	u32 reg_1;
+-	u32 reg_2;
+-	u32 reg_3;
+-	u32 reg_4;
+-	u32 reg_5;
+-	u32 reg_6;
+-	u32 reg_7;
+-	u32 reg_8;
+-	/* To be written to 0x1002c0 */
+-	u8 CL;
+-	u8 WR;
++
++	u32 reg[9];
++	u32 mr[4];
++
++	u8 tCWL;
++
++	u8 odt;
++	u8 drive_strength;
+ };
+ 
+-struct nouveau_pm_tbl_header{
++struct nouveau_pm_tbl_header {
+ 	u8 version;
+ 	u8 header_len;
+ 	u8 entry_cnt;
+ 	u8 entry_len;
+ };
+ 
+-struct nouveau_pm_tbl_entry{
++struct nouveau_pm_tbl_entry {
+ 	u8 tWR;
+-	u8 tUNK_1;
++	u8 tWTR;
+ 	u8 tCL;
+-	u8 tRP;		/* Byte 3 */
++	u8 tRC;
+ 	u8 empty_4;
+-	u8 tRAS;	/* Byte 5 */
++	u8 tRFC;	/* Byte 5 */
+ 	u8 empty_6;
+-	u8 tRFC;	/* Byte 7 */
++	u8 tRAS;	/* Byte 7 */
+ 	u8 empty_8;
+-	u8 tRC;		/* Byte 9 */
+-	u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
+-	u8 empty_15,empty_16,empty_17;
+-	u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
++	u8 tRP;		/* Byte 9 */
++	u8 tRCDRD;
++	u8 tRCDWR;
++	u8 tRRD;
++	u8 tUNK_13;
++	u8 RAM_FT1;		/* 14, a bitmask of random RAM features */
++	u8 empty_15;
++	u8 tUNK_16;
++	u8 empty_17;
++	u8 tUNK_18;
++	u8 tCWL;
++	u8 tUNK_20, tUNK_21;
+ };
+ 
+-/* nouveau_mem.c */
+-void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+-							struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
+-							struct nouveau_pm_memtiming *timing);
++struct nouveau_pm_profile;
++struct nouveau_pm_profile_func {
++	void (*destroy)(struct nouveau_pm_profile *);
++	void (*init)(struct nouveau_pm_profile *);
++	void (*fini)(struct nouveau_pm_profile *);
++	struct nouveau_pm_level *(*select)(struct nouveau_pm_profile *);
++};
++
++struct nouveau_pm_profile {
++	const struct nouveau_pm_profile_func *func;
++	struct list_head head;
++	char name[8];
++};
+ 
+ #define NOUVEAU_PM_MAX_LEVEL 8
+ struct nouveau_pm_level {
++	struct nouveau_pm_profile profile;
+ 	struct device_attribute dev_attr;
+ 	char name[32];
+ 	int id;
+ 
+-	u32 core;
++	struct nouveau_pm_memtiming timing;
+ 	u32 memory;
++	u16 memscript;
++
++	u32 core;
+ 	u32 shader;
+ 	u32 rop;
+ 	u32 copy;
+ 	u32 daemon;
+ 	u32 vdec;
+-	u32 unk05;	/* nv50:nva3, roughly.. */
++	u32 dom6;
+ 	u32 unka0;	/* nva3:nvc0 */
+ 	u32 hub01;	/* nvc0- */
+ 	u32 hub06;	/* nvc0- */
+@@ -493,9 +526,6 @@ struct nouveau_pm_level {
+ 	u32 volt_min; /* microvolts */
+ 	u32 volt_max;
+ 	u8  fanspeed;
+-
+-	u16 memscript;
+-	struct nouveau_pm_memtiming *timing;
+ };
+ 
+ struct nouveau_pm_temp_sensor_constants {
+@@ -512,19 +542,26 @@ struct nouveau_pm_threshold_temp {
+ 	s16 fan_boost;
+ };
+ 
+-struct nouveau_pm_memtimings {
+-	bool supported;
+-	struct nouveau_pm_memtiming *timing;
+-	int nr_timing;
++struct nouveau_pm_fan {
++	u32 percent;
++	u32 min_duty;
++	u32 max_duty;
++	u32 pwm_freq;
++	u32 pwm_divisor;
+ };
+ 
+ struct nouveau_pm_engine {
+ 	struct nouveau_pm_voltage voltage;
+ 	struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
+ 	int nr_perflvl;
+-	struct nouveau_pm_memtimings memtimings;
+ 	struct nouveau_pm_temp_sensor_constants sensor_constants;
+ 	struct nouveau_pm_threshold_temp threshold_temp;
++	struct nouveau_pm_fan fan;
++
++	struct nouveau_pm_profile *profile_ac;
++	struct nouveau_pm_profile *profile_dc;
++	struct nouveau_pm_profile *profile;
++	struct list_head profiles;
+ 
+ 	struct nouveau_pm_level boot;
+ 	struct nouveau_pm_level *cur;
+@@ -532,19 +569,14 @@ struct nouveau_pm_engine {
+ 	struct device *hwmon;
+ 	struct notifier_block acpi_nb;
+ 
+-	int (*clock_get)(struct drm_device *, u32 id);
+-	void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
+-			   u32 id, int khz);
+-	void (*clock_set)(struct drm_device *, void *);
+-
+ 	int  (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
+ 	void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
+-	void (*clocks_set)(struct drm_device *, void *);
++	int (*clocks_set)(struct drm_device *, void *);
+ 
+ 	int (*voltage_get)(struct drm_device *);
+ 	int (*voltage_set)(struct drm_device *, int voltage);
+-	int (*fanspeed_get)(struct drm_device *);
+-	int (*fanspeed_set)(struct drm_device *, int fanspeed);
++	int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
++	int (*pwm_set)(struct drm_device *, int line, u32, u32);
+ 	int (*temp_get)(struct drm_device *);
+ };
+ 
+@@ -661,14 +693,15 @@ struct nv04_mode_state {
+ };
+ 
+ enum nouveau_card_type {
+-	NV_04      = 0x00,
++	NV_04      = 0x04,
+ 	NV_10      = 0x10,
+ 	NV_20      = 0x20,
+ 	NV_30      = 0x30,
+ 	NV_40      = 0x40,
+ 	NV_50      = 0x50,
+ 	NV_C0      = 0xc0,
+-	NV_D0      = 0xd0
++	NV_D0      = 0xd0,
++	NV_E0      = 0xe0,
+ };
+ 
+ struct drm_nouveau_private {
+@@ -764,8 +797,22 @@ struct drm_nouveau_private {
+ 	} tile;
+ 
+ 	/* VRAM/fb configuration */
++	enum {
++		NV_MEM_TYPE_UNKNOWN = 0,
++		NV_MEM_TYPE_STOLEN,
++		NV_MEM_TYPE_SGRAM,
++		NV_MEM_TYPE_SDRAM,
++		NV_MEM_TYPE_DDR1,
++		NV_MEM_TYPE_DDR2,
++		NV_MEM_TYPE_DDR3,
++		NV_MEM_TYPE_GDDR2,
++		NV_MEM_TYPE_GDDR3,
++		NV_MEM_TYPE_GDDR4,
++		NV_MEM_TYPE_GDDR5
++	} vram_type;
+ 	uint64_t vram_size;
+ 	uint64_t vram_sys_base;
++	bool vram_rank_B;
+ 
+ 	uint64_t fb_available_size;
+ 	uint64_t fb_mappable_pages;
+@@ -780,6 +827,8 @@ struct drm_nouveau_private {
+ 	struct nouveau_vm *chan_vm;
+ 
+ 	struct nvbios vbios;
++	u8 *mxms;
++	struct list_head i2c_ports;
+ 
+ 	struct nv04_mode_state mode_reg;
+ 	struct nv04_mode_state saved_reg;
+@@ -836,6 +885,7 @@ extern int nouveau_uscript_lvds;
+ extern int nouveau_uscript_tmds;
+ extern int nouveau_vram_pushbuf;
+ extern int nouveau_vram_notify;
++extern char *nouveau_vram_type;
+ extern int nouveau_fbpercrtc;
+ extern int nouveau_tv_disable;
+ extern char *nouveau_tv_norm;
+@@ -850,6 +900,7 @@ extern char *nouveau_perflvl;
+ extern int nouveau_perflvl_wr;
+ extern int nouveau_msi;
+ extern int nouveau_ctxfw;
++extern int nouveau_mxmdcb;
+ 
+ extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
+ extern int nouveau_pci_resume(struct pci_dev *pdev);
+@@ -883,8 +934,12 @@ extern void nouveau_mem_gart_fini(struct drm_device *);
+ extern int  nouveau_mem_init_agp(struct drm_device *);
+ extern int  nouveau_mem_reset_agp(struct drm_device *);
+ extern void nouveau_mem_close(struct drm_device *);
+-extern int  nouveau_mem_detect(struct drm_device *);
+ extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
++extern int  nouveau_mem_timing_calc(struct drm_device *, u32 freq,
++				    struct nouveau_pm_memtiming *);
++extern void nouveau_mem_timing_read(struct drm_device *,
++				    struct nouveau_pm_memtiming *);
++extern int nouveau_mem_vbios_type(struct drm_device *);
+ extern struct nouveau_tile_reg *nv10_mem_set_tiling(
+ 	struct drm_device *dev, uint32_t addr, uint32_t size,
+ 	uint32_t pitch, uint32_t flags);
+@@ -1000,7 +1055,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
+ extern void nouveau_sgdma_takedown(struct drm_device *);
+ extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
+ 					   uint32_t offset);
+-extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
++extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
++					       unsigned long size,
++					       uint32_t page_flags,
++					       struct page *dummy_read_page);
+ 
+ /* nouveau_debugfs.c */
+ #if defined(CONFIG_DRM_NOUVEAU_DEBUG)
+@@ -1032,8 +1090,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
+ #endif
+ 
+ /* nouveau_dma.c */
+-extern void nouveau_dma_pre_init(struct nouveau_channel *);
+-extern int  nouveau_dma_init(struct nouveau_channel *);
++extern void nouveau_dma_init(struct nouveau_channel *);
+ extern int  nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
+ 
+ /* nouveau_acpi.c */
+@@ -1041,12 +1098,14 @@ extern int  nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
+ #if defined(CONFIG_ACPI)
+ void nouveau_register_dsm_handler(void);
+ void nouveau_unregister_dsm_handler(void);
++void nouveau_switcheroo_optimus_dsm(void);
+ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
+ bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+ int nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
+ #else
+ static inline void nouveau_register_dsm_handler(void) {}
+ static inline void nouveau_unregister_dsm_handler(void) {}
++static inline void nouveau_switcheroo_optimus_dsm(void) {}
+ static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
+ static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
+ static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; }
+@@ -1072,8 +1131,6 @@ extern int nouveau_run_vbios_init(struct drm_device *);
+ extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
+ 					struct dcb_entry *, int crtc);
+ extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
+-extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
+-						      enum dcb_gpio_tag);
+ extern struct dcb_connector_table_entry *
+ nouveau_bios_connector_entry(struct drm_device *, int index);
+ extern u32 get_pll_register(struct drm_device *, enum pll_types);
+@@ -1091,24 +1148,26 @@ extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
+ 			    enum LVDS_script, int pxclk);
+ bool bios_encoder_match(struct dcb_entry *, u32 hash);
+ 
++/* nouveau_mxm.c */
++int  nouveau_mxm_init(struct drm_device *dev);
++void nouveau_mxm_fini(struct drm_device *dev);
++
+ /* nouveau_ttm.c */
+ int nouveau_ttm_global_init(struct drm_nouveau_private *);
+ void nouveau_ttm_global_release(struct drm_nouveau_private *);
+ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
+ 
+-/* nouveau_dp.c */
+-int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
+-		     uint8_t *data, int data_nr);
+-bool nouveau_dp_detect(struct drm_encoder *);
+-bool nouveau_dp_link_train(struct drm_encoder *, u32 datarate);
+-void nouveau_dp_tu_update(struct drm_device *, int, int, u32, u32);
+-u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **);
++/* nouveau_hdmi.c */
++void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+ 
+ /* nv04_fb.c */
++extern int  nv04_fb_vram_init(struct drm_device *);
+ extern int  nv04_fb_init(struct drm_device *);
+ extern void nv04_fb_takedown(struct drm_device *);
+ 
+ /* nv10_fb.c */
++extern int  nv10_fb_vram_init(struct drm_device *dev);
++extern int  nv1a_fb_vram_init(struct drm_device *dev);
+ extern int  nv10_fb_init(struct drm_device *);
+ extern void nv10_fb_takedown(struct drm_device *);
+ extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
+@@ -1117,6 +1176,16 @@ extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
+ extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
+ extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
+ 
++/* nv20_fb.c */
++extern int  nv20_fb_vram_init(struct drm_device *dev);
++extern int  nv20_fb_init(struct drm_device *);
++extern void nv20_fb_takedown(struct drm_device *);
++extern void nv20_fb_init_tile_region(struct drm_device *dev, int i,
++				     uint32_t addr, uint32_t size,
++				     uint32_t pitch, uint32_t flags);
++extern void nv20_fb_set_tile_region(struct drm_device *dev, int i);
++extern void nv20_fb_free_tile_region(struct drm_device *dev, int i);
++
+ /* nv30_fb.c */
+ extern int  nv30_fb_init(struct drm_device *);
+ extern void nv30_fb_takedown(struct drm_device *);
+@@ -1126,6 +1195,7 @@ extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
+ extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
+ 
+ /* nv40_fb.c */
++extern int  nv40_fb_vram_init(struct drm_device *dev);
+ extern int  nv40_fb_init(struct drm_device *);
+ extern void nv40_fb_takedown(struct drm_device *);
+ extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
+@@ -1222,6 +1292,9 @@ extern int  nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
+ /* nv84_crypt.c */
+ extern int  nv84_crypt_create(struct drm_device *);
+ 
++/* nv98_crypt.c */
++extern int  nv98_crypt_create(struct drm_device *dev);
++
+ /* nva3_copy.c */
+ extern int  nva3_copy_create(struct drm_device *dev);
+ 
+@@ -1234,6 +1307,17 @@ extern int  nv31_mpeg_create(struct drm_device *dev);
+ /* nv50_mpeg.c */
+ extern int  nv50_mpeg_create(struct drm_device *dev);
+ 
++/* nv84_bsp.c */
++/* nv98_bsp.c */
++extern int  nv84_bsp_create(struct drm_device *dev);
++
++/* nv84_vp.c */
++/* nv98_vp.c */
++extern int  nv84_vp_create(struct drm_device *dev);
++
++/* nv98_ppp.c */
++extern int  nv98_ppp_create(struct drm_device *dev);
++
+ /* nv04_instmem.c */
+ extern int  nv04_instmem_init(struct drm_device *);
+ extern void nv04_instmem_takedown(struct drm_device *);
+@@ -1311,13 +1395,19 @@ extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
+ extern int nv04_display_early_init(struct drm_device *);
+ extern void nv04_display_late_takedown(struct drm_device *);
+ extern int nv04_display_create(struct drm_device *);
+-extern int nv04_display_init(struct drm_device *);
+ extern void nv04_display_destroy(struct drm_device *);
++extern int nv04_display_init(struct drm_device *);
++extern void nv04_display_fini(struct drm_device *);
+ 
+ /* nvd0_display.c */
+ extern int nvd0_display_create(struct drm_device *);
+-extern int nvd0_display_init(struct drm_device *);
+ extern void nvd0_display_destroy(struct drm_device *);
++extern int nvd0_display_init(struct drm_device *);
++extern void nvd0_display_fini(struct drm_device *);
++struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
++void nvd0_display_flip_stop(struct drm_crtc *);
++int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
++			   struct nouveau_channel *, u32 swap_interval);
+ 
+ /* nv04_crtc.c */
+ extern int nv04_crtc_create(struct drm_device *, int index);
+@@ -1412,6 +1502,10 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
+ 				  struct drm_file *);
+ 
+ /* nouveau_display.c */
++int nouveau_display_create(struct drm_device *dev);
++void nouveau_display_destroy(struct drm_device *dev);
++int nouveau_display_init(struct drm_device *dev);
++void nouveau_display_fini(struct drm_device *dev);
+ int nouveau_vblank_enable(struct drm_device *dev, int crtc);
+ void nouveau_vblank_disable(struct drm_device *dev, int crtc);
+ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+@@ -1426,23 +1520,22 @@ int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+ 				 uint32_t handle);
+ 
+ /* nv10_gpio.c */
+-int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+-int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
++int nv10_gpio_init(struct drm_device *dev);
++void nv10_gpio_fini(struct drm_device *dev);
++int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
++int nv10_gpio_sense(struct drm_device *dev, int line);
++void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
+ 
+ /* nv50_gpio.c */
+ int nv50_gpio_init(struct drm_device *dev);
+ void nv50_gpio_fini(struct drm_device *dev);
+-int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+-int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+-int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+-int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+-int  nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
+-			    void (*)(void *, int), void *);
+-void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
+-			      void (*)(void *, int), void *);
+-bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
+-
+-/* nv50_calc. */
++int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
++int nv50_gpio_sense(struct drm_device *dev, int line);
++void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
++int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
++int nvd0_gpio_sense(struct drm_device *dev, int line);
++
++/* nv50_calc.c */
+ int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
+ 		  int *N1, int *M1, int *N2, int *M2, int *P);
+ int nva3_calc_pll(struct drm_device *, struct pll_lims *,
+@@ -1565,6 +1658,13 @@ extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
+ #define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
+ #define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
+ #define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
++#define NV_WARNONCE(d, fmt, arg...) do {                                       \
++	static int _warned = 0;                                                \
++	if (!_warned) {                                                        \
++		NV_WARN(d, fmt, ##arg);                                        \
++		_warned = 1;                                                   \
++	}                                                                      \
++} while(0)
+ 
+ /* nouveau_reg_debug bitmask */
+ enum {
+@@ -1652,6 +1752,7 @@ nv44_graph_class(struct drm_device *dev)
+ #define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+ #define NV_MEM_ACCESS_SYS 4
+ #define NV_MEM_ACCESS_VM  8
++#define NV_MEM_ACCESS_NOSNOOP 16
+ 
+ #define NV_MEM_TARGET_VRAM        0
+ #define NV_MEM_TARGET_PCI         1
+@@ -1662,13 +1763,27 @@ nv44_graph_class(struct drm_device *dev)
+ #define NV_MEM_TYPE_VM 0x7f
+ #define NV_MEM_COMP_VM 0x03
+ 
++/* FIFO methods */
++#define NV01_SUBCHAN_OBJECT                                          0x00000000
++#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH                          0x00000010
++#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW                           0x00000014
++#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE                              0x00000018
++#define NV84_SUBCHAN_SEMAPHORE_TRIGGER                               0x0000001c
++#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL                 0x00000001
++#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG                    0x00000002
++#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL                0x00000004
++#define NV84_SUBCHAN_NOTIFY_INTR                                     0x00000020
++#define NV84_SUBCHAN_WRCACHE_FLUSH                                   0x00000024
++#define NV10_SUBCHAN_REF_CNT                                         0x00000050
++#define NVSW_SUBCHAN_PAGE_FLIP                                       0x00000054
++#define NV11_SUBCHAN_DMA_SEMAPHORE                                   0x00000060
++#define NV11_SUBCHAN_SEMAPHORE_OFFSET                                0x00000064
++#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE                               0x00000068
++#define NV11_SUBCHAN_SEMAPHORE_RELEASE                               0x0000006c
++#define NV40_SUBCHAN_YIELD                                           0x00000080
++
+ /* NV_SW object class */
+ #define NV_SW                                                        0x0000506e
+-#define NV_SW_DMA_SEMAPHORE                                          0x00000060
+-#define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
+-#define NV_SW_SEMAPHORE_ACQUIRE                                      0x00000068
+-#define NV_SW_SEMAPHORE_RELEASE                                      0x0000006c
+-#define NV_SW_YIELD                                                  0x00000080
+ #define NV_SW_DMA_VBLSEM                                             0x0000018c
+ #define NV_SW_VBLSEM_OFFSET                                          0x00000400
+ #define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
+diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
+index e5d6e3f..3dc14a3 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
++++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
+@@ -32,6 +32,14 @@
+ 
+ #define NV_DPMS_CLEARED 0x80
+ 
++struct dp_train_func {
++	void (*link_set)(struct drm_device *, struct dcb_entry *, int crtc,
++			 int nr, u32 bw, bool enhframe);
++	void (*train_set)(struct drm_device *, struct dcb_entry *, u8 pattern);
++	void (*train_adj)(struct drm_device *, struct dcb_entry *,
++			  u8 lane, u8 swing, u8 preem);
++};
++
+ struct nouveau_encoder {
+ 	struct drm_encoder_slave base;
+ 
+@@ -78,9 +86,19 @@ get_slave_funcs(struct drm_encoder *enc)
+ 	return to_encoder_slave(enc)->slave_funcs;
+ }
+ 
++/* nouveau_dp.c */
++int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
++		     uint8_t *data, int data_nr);
++bool nouveau_dp_detect(struct drm_encoder *);
++void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
++		     struct dp_train_func *);
++u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **);
++
+ struct nouveau_connector *
+ nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
+ int nv50_sor_create(struct drm_connector *, struct dcb_entry *);
++void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
+ int nv50_dac_create(struct drm_connector *, struct dcb_entry *);
+ 
++
+ #endif /* __NOUVEAU_ENCODER_H__ */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
+index 95c843e..f3fb649 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
++++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
+@@ -42,8 +42,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
+ 	return container_of(fb, struct nouveau_framebuffer, base);
+ }
+ 
+-extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
+-
+ int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
+-			     struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
++			     struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
+ #endif /* __NOUVEAU_FB_H__ */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index cc0801d..6fd2211 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -36,6 +36,7 @@
+ #include <linux/init.h>
+ #include <linux/screen_info.h>
+ #include <linux/vga_switcheroo.h>
++#include <linux/console.h>
+ 
+ #include "drmP.h"
+ #include "drm.h"
+@@ -281,7 +282,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+ 	struct nouveau_framebuffer *nouveau_fb;
+ 	struct nouveau_channel *chan;
+ 	struct nouveau_bo *nvbo;
+-	struct drm_mode_fb_cmd mode_cmd;
++	struct drm_mode_fb_cmd2 mode_cmd;
+ 	struct pci_dev *pdev = dev->pdev;
+ 	struct device *device = &pdev->dev;
+ 	int size, ret;
+@@ -289,12 +290,13 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+ 	mode_cmd.width = sizes->surface_width;
+ 	mode_cmd.height = sizes->surface_height;
+ 
+-	mode_cmd.bpp = sizes->surface_bpp;
+-	mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
+-	mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
+-	mode_cmd.depth = sizes->surface_depth;
++	mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
++	mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
+ 
+-	size = mode_cmd.pitch * mode_cmd.height;
++	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
++							  sizes->surface_depth);
++
++	size = mode_cmd.pitches[0] * mode_cmd.height;
+ 	size = roundup(size, PAGE_SIZE);
+ 
+ 	ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
+@@ -369,7 +371,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+ 	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
+ 	info->screen_size = size;
+ 
+-	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
++	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ 	drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
+ 
+ 	/* Set aperture base/size for vesafb takeover */
+@@ -379,11 +381,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+ 		goto out_unref;
+ 	}
+ 
+-	info->pixmap.size = 64*1024;
+-	info->pixmap.buf_align = 8;
+-	info->pixmap.access_align = 32;
+-	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+-	info->pixmap.scan_align = 1;
++	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+ 
+ 	mutex_unlock(&dev->struct_mutex);
+ 
+@@ -547,7 +545,13 @@ void nouveau_fbcon_restore_accel(struct drm_device *dev)
+ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	console_lock();
++	if (state == 0)
++		nouveau_fbcon_save_disable_accel(dev);
+ 	fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
++	if (state == 1)
++		nouveau_fbcon_restore_accel(dev);
++	console_unlock();
+ }
+ 
+ void nouveau_fbcon_zfill_all(struct drm_device *dev)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
+index 2f6daae..c1dc20f 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
+@@ -93,18 +93,17 @@ nouveau_fence_update(struct nouveau_channel *chan)
+ 	}
+ 
+ 	list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
+-		sequence = fence->sequence;
++		if (fence->sequence > chan->fence.sequence_ack)
++			break;
++
+ 		fence->signalled = true;
+ 		list_del(&fence->entry);
+-
+-		if (unlikely(fence->work))
++		if (fence->work)
+ 			fence->work(fence->priv, true);
+ 
+ 		kref_put(&fence->refcount, nouveau_fence_del);
+-
+-		if (sequence == chan->fence.sequence_ack)
+-			break;
+ 	}
++
+ out:
+ 	spin_unlock(&chan->fence.lock);
+ }
+@@ -165,9 +164,9 @@ nouveau_fence_emit(struct nouveau_fence *fence)
+ 
+ 	if (USE_REFCNT(dev)) {
+ 		if (dev_priv->card_type < NV_C0)
+-			BEGIN_RING(chan, NvSubSw, 0x0050, 1);
++			BEGIN_RING(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
+ 		else
+-			BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
++			BEGIN_NVC0(chan, 2, 0, NV10_SUBCHAN_REF_CNT, 1);
+ 	} else {
+ 		BEGIN_RING(chan, NvSubSw, 0x0150, 1);
+ 	}
+@@ -344,7 +343,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
+ 		if (ret)
+ 			return ret;
+ 
+-		BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
++		BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
+ 		OUT_RING  (chan, NvSema);
+ 		OUT_RING  (chan, offset);
+ 		OUT_RING  (chan, 1);
+@@ -354,9 +353,9 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
+ 		if (ret)
+ 			return ret;
+ 
+-		BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
++		BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ 		OUT_RING  (chan, chan->vram_handle);
+-		BEGIN_RING(chan, NvSubSw, 0x0010, 4);
++		BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ 		OUT_RING  (chan, upper_32_bits(offset));
+ 		OUT_RING  (chan, lower_32_bits(offset));
+ 		OUT_RING  (chan, 1);
+@@ -366,7 +365,7 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
+ 		if (ret)
+ 			return ret;
+ 
+-		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
++		BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ 		OUT_RING  (chan, upper_32_bits(offset));
+ 		OUT_RING  (chan, lower_32_bits(offset));
+ 		OUT_RING  (chan, 1);
+@@ -397,10 +396,10 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
+ 		if (ret)
+ 			return ret;
+ 
+-		BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
++		BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
+ 		OUT_RING  (chan, NvSema);
+ 		OUT_RING  (chan, offset);
+-		BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
++		BEGIN_RING(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
+ 		OUT_RING  (chan, 1);
+ 	} else
+ 	if (dev_priv->chipset < 0xc0) {
+@@ -408,9 +407,9 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
+ 		if (ret)
+ 			return ret;
+ 
+-		BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
++		BEGIN_RING(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
+ 		OUT_RING  (chan, chan->vram_handle);
+-		BEGIN_RING(chan, NvSubSw, 0x0010, 4);
++		BEGIN_RING(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ 		OUT_RING  (chan, upper_32_bits(offset));
+ 		OUT_RING  (chan, lower_32_bits(offset));
+ 		OUT_RING  (chan, 1);
+@@ -420,7 +419,7 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
+ 		if (ret)
+ 			return ret;
+ 
+-		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
++		BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
+ 		OUT_RING  (chan, upper_32_bits(offset));
+ 		OUT_RING  (chan, lower_32_bits(offset));
+ 		OUT_RING  (chan, 1);
+@@ -510,7 +509,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
+ 		if (ret)
+ 			return ret;
+ 
+-		BEGIN_RING(chan, NvSubSw, 0, 1);
++		BEGIN_RING(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
+ 		OUT_RING  (chan, NvSw);
+ 		FIRE_RING (chan);
+ 	}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 7ce3fde..ed52a6f 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -426,9 +426,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
+ 			return ret;
+ 		}
+ 
+-		nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
+ 		ret = nouveau_bo_validate(nvbo, true, false, false);
+-		nvbo->channel = NULL;
+ 		if (unlikely(ret)) {
+ 			if (ret != -ERESTARTSYS)
+ 				NV_ERROR(dev, "fail ttm_validate\n");
+@@ -678,19 +676,13 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
+ 		return PTR_ERR(bo);
+ 	}
+ 
+-	/* Mark push buffers as being used on PFIFO, the validation code
+-	 * will then make sure that if the pushbuf bo moves, that they
+-	 * happen on the kernel channel, which will in turn cause a sync
+-	 * to happen before we try and submit the push buffer.
+-	 */
++	/* Ensure all push buffers are on validate list */
+ 	for (i = 0; i < req->nr_push; i++) {
+ 		if (push[i].bo_index >= req->nr_buffers) {
+ 			NV_ERROR(dev, "push %d buffer not in list\n", i);
+ 			ret = -EINVAL;
+ 			goto out_prevalid;
+ 		}
+-
+-		bo[push[i].bo_index].read_domains |= (1 << 31);
+ 	}
+ 
+ 	/* Validate buffer list */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
+new file mode 100644
+index 0000000..a580cc6
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
+@@ -0,0 +1,400 @@
++/*
++ * Copyright 2011 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_i2c.h"
++#include "nouveau_gpio.h"
++
++static u8 *
++dcb_gpio_table(struct drm_device *dev)
++{
++	u8 *dcb = dcb_table(dev);
++	if (dcb) {
++		if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
++			return ROMPTR(dev, dcb[0x0a]);
++		if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
++			return ROMPTR(dev, dcb[-15]);
++	}
++	return NULL;
++}
++
++static u8 *
++dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
++{
++	u8 *table = dcb_gpio_table(dev);
++	if (table) {
++		*version = table[0];
++		if (*version < 0x30 && ent < table[2])
++			return table + 3 + (ent * table[1]);
++		else if (ent < table[2])
++			return table + table[1] + (ent * table[3]);
++	}
++	return NULL;
++}
++
++int
++nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++
++	return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
++}
++
++int
++nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++
++	return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
++}
++
++int
++nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
++		  struct gpio_func *gpio)
++{
++	u8 *table, *entry, version;
++	int i = -1;
++
++	if (line == 0xff && func == 0xff)
++		return -EINVAL;
++
++	while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
++		if (version < 0x40) {
++			u16 data = ROM16(entry[0]);
++			*gpio = (struct gpio_func) {
++				.line = (data & 0x001f) >> 0,
++				.func = (data & 0x07e0) >> 5,
++				.log[0] = (data & 0x1800) >> 11,
++				.log[1] = (data & 0x6000) >> 13,
++			};
++		} else
++		if (version < 0x41) {
++			*gpio = (struct gpio_func) {
++				.line = entry[0] & 0x1f,
++				.func = entry[1],
++				.log[0] = (entry[3] & 0x18) >> 3,
++				.log[1] = (entry[3] & 0x60) >> 5,
++			};
++		} else {
++			*gpio = (struct gpio_func) {
++				.line = entry[0] & 0x3f,
++				.func = entry[1],
++				.log[0] = (entry[4] & 0x30) >> 4,
++				.log[1] = (entry[4] & 0xc0) >> 6,
++			};
++		}
++
++		if ((line == 0xff || line == gpio->line) &&
++		    (func == 0xff || func == gpio->func))
++			return 0;
++	}
++
++	/* DCB 2.2, fixed TVDAC GPIO data */
++	if ((table = dcb_table(dev)) && table[0] >= 0x22) {
++		if (func == DCB_GPIO_TVDAC0) {
++			*gpio = (struct gpio_func) {
++				.func = DCB_GPIO_TVDAC0,
++				.line = table[-4] >> 4,
++				.log[0] = !!(table[-5] & 2),
++				.log[1] =  !(table[-5] & 2),
++			};
++			return 0;
++		}
++	}
++
++	/* Apple iMac G4 NV18 */
++	if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
++		if (func == DCB_GPIO_TVDAC0) {
++			*gpio = (struct gpio_func) {
++				.func = DCB_GPIO_TVDAC0,
++				.line = 4,
++				.log[0] = 0,
++				.log[1] = 1,
++			};
++			return 0;
++		}
++	}
++
++	return -EINVAL;
++}
++
++int
++nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
++{
++	struct gpio_func gpio;
++	int ret;
++
++	ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
++	if (ret == 0) {
++		int dir = !!(gpio.log[state] & 0x02);
++		int out = !!(gpio.log[state] & 0x01);
++		ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
++	}
++
++	return ret;
++}
++
++int
++nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
++{
++	struct gpio_func gpio;
++	int ret;
++
++	ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
++	if (ret == 0) {
++		ret = nouveau_gpio_sense(dev, idx, gpio.line);
++		if (ret >= 0)
++			ret = (ret == (gpio.log[1] & 1));
++	}
++
++	return ret;
++}
++
++int
++nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++	struct gpio_func gpio;
++	int ret;
++
++	ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
++	if (ret == 0) {
++		if (idx == 0 && pgpio->irq_enable)
++			pgpio->irq_enable(dev, gpio.line, on);
++		else
++			ret = -ENODEV;
++	}
++
++	return ret;
++}
++
++struct gpio_isr {
++	struct drm_device *dev;
++	struct list_head head;
++	struct work_struct work;
++	int idx;
++	struct gpio_func func;
++	void (*handler)(void *, int);
++	void *data;
++	bool inhibit;
++};
++
++static void
++nouveau_gpio_isr_bh(struct work_struct *work)
++{
++	struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
++	struct drm_device *dev = isr->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++	unsigned long flags;
++	int state;
++
++	state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
++	if (state >= 0)
++		isr->handler(isr->data, state);
++
++	spin_lock_irqsave(&pgpio->lock, flags);
++	isr->inhibit = false;
++	spin_unlock_irqrestore(&pgpio->lock, flags);
++}
++
++void
++nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++	struct gpio_isr *isr;
++
++	if (idx != 0)
++		return;
++
++	spin_lock(&pgpio->lock);
++	list_for_each_entry(isr, &pgpio->isr, head) {
++		if (line_mask & (1 << isr->func.line)) {
++			if (isr->inhibit)
++				continue;
++			isr->inhibit = true;
++			schedule_work(&isr->work);
++		}
++	}
++	spin_unlock(&pgpio->lock);
++}
++
++int
++nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
++		     void (*handler)(void *, int), void *data)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++	struct gpio_isr *isr;
++	unsigned long flags;
++	int ret;
++
++	isr = kzalloc(sizeof(*isr), GFP_KERNEL);
++	if (!isr)
++		return -ENOMEM;
++
++	ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
++	if (ret) {
++		kfree(isr);
++		return ret;
++	}
++
++	INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
++	isr->dev = dev;
++	isr->handler = handler;
++	isr->data = data;
++	isr->idx = idx;
++
++	spin_lock_irqsave(&pgpio->lock, flags);
++	list_add(&isr->head, &pgpio->isr);
++	spin_unlock_irqrestore(&pgpio->lock, flags);
++	return 0;
++}
++
++void
++nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
++		     void (*handler)(void *, int), void *data)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++	struct gpio_isr *isr, *tmp;
++	struct gpio_func func;
++	unsigned long flags;
++	LIST_HEAD(tofree);
++	int ret;
++
++	ret = nouveau_gpio_find(dev, idx, tag, line, &func);
++	if (ret == 0) {
++		spin_lock_irqsave(&pgpio->lock, flags);
++		list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
++			if (memcmp(&isr->func, &func, sizeof(func)) ||
++			    isr->idx != idx ||
++			    isr->handler != handler || isr->data != data)
++				continue;
++			list_move(&isr->head, &tofree);
++		}
++		spin_unlock_irqrestore(&pgpio->lock, flags);
++
++		list_for_each_entry_safe(isr, tmp, &tofree, head) {
++			flush_work_sync(&isr->work);
++			kfree(isr);
++		}
++	}
++}
++
++int
++nouveau_gpio_create(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++
++	INIT_LIST_HEAD(&pgpio->isr);
++	spin_lock_init(&pgpio->lock);
++
++	return nouveau_gpio_init(dev);
++}
++
++void
++nouveau_gpio_destroy(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++
++	nouveau_gpio_fini(dev);
++	BUG_ON(!list_empty(&pgpio->isr));
++}
++
++int
++nouveau_gpio_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++	int ret = 0;
++
++	if (pgpio->init)
++		ret = pgpio->init(dev);
++
++	return ret;
++}
++
++void
++nouveau_gpio_fini(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++
++	if (pgpio->fini)
++		pgpio->fini(dev);
++}
++
++void
++nouveau_gpio_reset(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u8 *entry, version;
++	int ent = -1;
++
++	while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
++		u8 func = 0xff, line, defs, unk0, unk1;
++		if (version >= 0x41) {
++			defs = !!(entry[0] & 0x80);
++			line = entry[0] & 0x3f;
++			func = entry[1];
++			unk0 = entry[2];
++			unk1 = entry[3] & 0x1f;
++		} else
++		if (version >= 0x40) {
++			line = entry[0] & 0x1f;
++			func = entry[1];
++			defs = !!(entry[3] & 0x01);
++			unk0 = !!(entry[3] & 0x02);
++			unk1 = !!(entry[3] & 0x04);
++		} else {
++			break;
++		}
++
++		if (func == 0xff)
++			continue;
++
++		nouveau_gpio_func_set(dev, func, defs);
++
++		if (dev_priv->card_type >= NV_D0) {
++			nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
++			if (unk1--)
++				nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
++		} else
++		if (dev_priv->card_type >= NV_50) {
++			static const u32 regs[] = { 0xe100, 0xe28c };
++			u32 val = (unk1 << 16) | unk0;
++			u32 reg = regs[line >> 4]; line &= 0x0f;
++
++			nv_mask(dev, reg, 0x00010001 << line, val << line);
++		}
++	}
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
+new file mode 100644
+index 0000000..64c5cb0
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_gpio.h
+@@ -0,0 +1,71 @@
++/*
++ * Copyright 2011 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __NOUVEAU_GPIO_H__
++#define __NOUVEAU_GPIO_H__
++
++struct gpio_func {
++	u8 func;
++	u8 line;
++	u8 log[2];
++};
++
++/* nouveau_gpio.c */
++int  nouveau_gpio_create(struct drm_device *);
++void nouveau_gpio_destroy(struct drm_device *);
++int  nouveau_gpio_init(struct drm_device *);
++void nouveau_gpio_fini(struct drm_device *);
++void nouveau_gpio_reset(struct drm_device *);
++int  nouveau_gpio_drive(struct drm_device *, int idx, int line,
++			int dir, int out);
++int  nouveau_gpio_sense(struct drm_device *, int idx, int line);
++int  nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
++		       struct gpio_func *);
++int  nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
++int  nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
++int  nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
++void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
++int  nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
++			  void (*)(void *, int state), void *data);
++void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
++			  void (*)(void *, int state), void *data);
++
++static inline bool
++nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
++{
++	struct gpio_func func;
++	return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
++}
++
++static inline int
++nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
++{
++	return nouveau_gpio_set(dev, 0, tag, 0xff, state);
++}
++
++static inline int
++nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
++{
++	return nouveau_gpio_get(dev, 0, tag, 0xff);
++}
++
++#endif
+diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+new file mode 100644
+index 0000000..c3de363
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+@@ -0,0 +1,260 @@
++/*
++ * Copyright 2011 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_connector.h"
++#include "nouveau_encoder.h"
++#include "nouveau_crtc.h"
++
++static bool
++hdmi_sor(struct drm_encoder *encoder)
++{
++	struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
++	if (dev_priv->chipset <  0xa3 ||
++	    dev_priv->chipset == 0xaa ||
++	    dev_priv->chipset == 0xac)
++		return false;
++	return true;
++}
++
++static inline u32
++hdmi_base(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
++	if (!hdmi_sor(encoder))
++		return 0x616500 + (nv_crtc->index * 0x800);
++	return 0x61c500 + (nv_encoder->or * 0x800);
++}
++
++static void
++hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
++{
++	nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val);
++}
++
++static u32
++hdmi_rd32(struct drm_encoder *encoder, u32 reg)
++{
++	return nv_rd32(encoder->dev, hdmi_base(encoder) + reg);
++}
++
++static u32
++hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
++{
++	u32 tmp = hdmi_rd32(encoder, reg);
++	hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
++	return tmp;
++}
++
++static void
++nouveau_audio_disconnect(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	u32 or = nv_encoder->or * 0x800;
++
++	if (hdmi_sor(encoder)) {
++		nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000);
++	}
++}
++
++static void
++nouveau_audio_mode_set(struct drm_encoder *encoder,
++		       struct drm_display_mode *mode)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_connector *nv_connector;
++	struct drm_device *dev = encoder->dev;
++	u32 or = nv_encoder->or * 0x800;
++	int i;
++
++	nv_connector = nouveau_encoder_connector_get(nv_encoder);
++	if (!drm_detect_monitor_audio(nv_connector->edid)) {
++		nouveau_audio_disconnect(encoder);
++		return;
++	}
++
++	if (hdmi_sor(encoder)) {
++		nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001);
++
++		drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
++		if (nv_connector->base.eld[0]) {
++			u8 *eld = nv_connector->base.eld;
++			for (i = 0; i < eld[2] * 4; i++)
++				nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]);
++			for (i = eld[2] * 4; i < 0x60; i++)
++				nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00);
++			nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002);
++		}
++	}
++}
++
++static void
++nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
++{
++	/* calculate checksum for the infoframe */
++	u8 sum = 0, i;
++	for (i = 0; i < frame[2]; i++)
++		sum += frame[i];
++	frame[3] = 256 - sum;
++
++	/* disable infoframe, and write header */
++	hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
++	hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
++
++	/* register scans tell me the audio infoframe has only one set of
++	 * subpack regs, according to tegra (gee nvidia, it'd be nice if we
++	 * could get those docs too!), the hdmi block pads out the rest of
++	 * the packet on its own.
++	 */
++	if (ctrl == 0x020)
++		frame[2] = 6;
++
++	/* write out checksum and data, weird weird 7 byte register pairs */
++	for (i = 0; i < frame[2] + 1; i += 7) {
++		u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
++		u32 *subpack = (u32 *)&frame[3 + i];
++		hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
++		hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
++	}
++
++	/* enable the infoframe */
++	hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
++}
++
++static void
++nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
++			     struct drm_display_mode *mode)
++{
++	const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
++	const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
++	const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
++	u8 frame[20];
++
++	frame[0x00] = 0x82; /* AVI infoframe */
++	frame[0x01] = 0x02; /* version */
++	frame[0x02] = 0x0d; /* length */
++	frame[0x03] = 0x00;
++	frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
++	frame[0x05] = (C << 6) | (M << 4) | R;
++	frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
++	frame[0x07] = VIC;
++	frame[0x08] = PR;
++	frame[0x09] = bar_top & 0xff;
++	frame[0x0a] = bar_top >> 8;
++	frame[0x0b] = bar_bottom & 0xff;
++	frame[0x0c] = bar_bottom >> 8;
++	frame[0x0d] = bar_left & 0xff;
++	frame[0x0e] = bar_left >> 8;
++	frame[0x0f] = bar_right & 0xff;
++	frame[0x10] = bar_right >> 8;
++	frame[0x11] = 0x00;
++	frame[0x12] = 0x00;
++	frame[0x13] = 0x00;
++
++	nouveau_hdmi_infoframe(encoder, 0x020, frame);
++}
++
++static void
++nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
++			     struct drm_display_mode *mode)
++{
++	const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
++	const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
++	u8 frame[12];
++
++	frame[0x00] = 0x84;	/* Audio infoframe */
++	frame[0x01] = 0x01;	/* version */
++	frame[0x02] = 0x0a;	/* length */
++	frame[0x03] = 0x00;
++	frame[0x04] = (CT << 4) | CC;
++	frame[0x05] = (SF << 2) | ceaSS;
++	frame[0x06] = FMT;
++	frame[0x07] = CA;
++	frame[0x08] = (DM_INH << 7) | (LSV << 3);
++	frame[0x09] = 0x00;
++	frame[0x0a] = 0x00;
++	frame[0x0b] = 0x00;
++
++	nouveau_hdmi_infoframe(encoder, 0x000, frame);
++}
++
++static void
++nouveau_hdmi_disconnect(struct drm_encoder *encoder)
++{
++	nouveau_audio_disconnect(encoder);
++
++	/* disable audio and avi infoframes */
++	hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
++	hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
++
++	/* disable hdmi */
++	hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
++}
++
++void
++nouveau_hdmi_mode_set(struct drm_encoder *encoder,
++		      struct drm_display_mode *mode)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_connector *nv_connector;
++	struct drm_device *dev = encoder->dev;
++	u32 max_ac_packet, rekey;
++
++	nv_connector = nouveau_encoder_connector_get(nv_encoder);
++	if (!mode || !nv_connector || !nv_connector->edid ||
++	    !drm_detect_hdmi_monitor(nv_connector->edid)) {
++		nouveau_hdmi_disconnect(encoder);
++		return;
++	}
++
++	nouveau_hdmi_video_infoframe(encoder, mode);
++	nouveau_hdmi_audio_infoframe(encoder, mode);
++
++	hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
++	hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
++	hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
++
++	nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
++	nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
++	nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
++
++	/* value matches nvidia binary driver, and tegra constant */
++	rekey = 56;
++
++	max_ac_packet  = mode->htotal - mode->hdisplay;
++	max_ac_packet -= rekey;
++	max_ac_packet -= 18; /* constant from tegra */
++	max_ac_packet /= 32;
++
++	/* enable hdmi */
++	hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
++					      0x1f000000 | /* unknown */
++					      max_ac_packet << 16 |
++					      rekey);
++
++	nouveau_audio_mode_set(encoder, mode);
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
+new file mode 100644
+index 0000000..6976875
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
+@@ -0,0 +1,115 @@
++/*
++ * Copyright 2010 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#ifndef __NOUVEAU_HWSQ_H__
++#define __NOUVEAU_HWSQ_H__
++
++struct hwsq_ucode {
++	u8 data[0x200];
++	union {
++		u8  *u08;
++		u16 *u16;
++		u32 *u32;
++	} ptr;
++	u16 len;
++
++	u32 reg;
++	u32 val;
++};
++
++static inline void
++hwsq_init(struct hwsq_ucode *hwsq)
++{
++	hwsq->ptr.u08 = hwsq->data;
++	hwsq->reg = 0xffffffff;
++	hwsq->val = 0xffffffff;
++}
++
++static inline void
++hwsq_fini(struct hwsq_ucode *hwsq)
++{
++	do {
++		*hwsq->ptr.u08++ = 0x7f;
++		hwsq->len = hwsq->ptr.u08 - hwsq->data;
++	} while (hwsq->len & 3);
++	hwsq->ptr.u08 = hwsq->data;
++}
++
++static inline void
++hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
++{
++	u32 shift = 0;
++	while (usec & ~3) {
++		usec >>= 2;
++		shift++;
++	}
++
++	*hwsq->ptr.u08++ = (shift << 2) | usec;
++}
++
++static inline void
++hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
++{
++	flag += 0x80;
++	if (val >= 0)
++		flag += 0x20;
++	if (val >= 1)
++		flag += 0x20;
++	*hwsq->ptr.u08++ = flag;
++}
++
++static inline void
++hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
++{
++	*hwsq->ptr.u08++ = 0x5f;
++	*hwsq->ptr.u08++ = v0;
++	*hwsq->ptr.u08++ = v1;
++}
++
++static inline void
++hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
++{
++	if (val != hwsq->val) {
++		if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
++			*hwsq->ptr.u08++ = 0x42;
++			*hwsq->ptr.u16++ = (val & 0x0000ffff);
++		} else {
++			*hwsq->ptr.u08++ = 0xe2;
++			*hwsq->ptr.u32++ = val;
++		}
++
++		hwsq->val = val;
++	}
++
++	if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
++		*hwsq->ptr.u08++ = 0x40;
++		*hwsq->ptr.u16++ = (reg & 0x0000ffff);
++	} else {
++		*hwsq->ptr.u08++ = 0xe0;
++		*hwsq->ptr.u32++ = reg;
++	}
++	hwsq->reg = reg;
++}
++
++#endif
+diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
+index d39b220..77e5646 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
++++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
+@@ -30,83 +30,83 @@
+ #include "nouveau_hw.h"
+ 
+ static void
+-nv04_i2c_setscl(void *data, int state)
++i2c_drive_scl(void *data, int state)
+ {
+-	struct nouveau_i2c_chan *i2c = data;
+-	struct drm_device *dev = i2c->dev;
+-	uint8_t val;
+-
+-	val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
+-	NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+-}
+-
+-static void
+-nv04_i2c_setsda(void *data, int state)
+-{
+-	struct nouveau_i2c_chan *i2c = data;
+-	struct drm_device *dev = i2c->dev;
+-	uint8_t val;
+-
+-	val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
+-	NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+-}
+-
+-static int
+-nv04_i2c_getscl(void *data)
+-{
+-	struct nouveau_i2c_chan *i2c = data;
+-	struct drm_device *dev = i2c->dev;
+-
+-	return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
+-}
+-
+-static int
+-nv04_i2c_getsda(void *data)
+-{
+-	struct nouveau_i2c_chan *i2c = data;
+-	struct drm_device *dev = i2c->dev;
+-
+-	return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
+-}
+-
+-static void
+-nv4e_i2c_setscl(void *data, int state)
+-{
+-	struct nouveau_i2c_chan *i2c = data;
+-	struct drm_device *dev = i2c->dev;
+-	uint8_t val;
+-
+-	val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
+-	nv_wr32(dev, i2c->wr, val | 0x01);
++	struct nouveau_i2c_chan *port = data;
++	if (port->type == 0) {
++		u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
++		if (state) val |= 0x20;
++		else	   val &= 0xdf;
++		NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
++	} else
++	if (port->type == 4) {
++		nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
++	} else
++	if (port->type == 5) {
++		if (state) port->state |= 0x01;
++		else	   port->state &= 0xfe;
++		nv_wr32(port->dev, port->drive, 4 | port->state);
++	}
+ }
+ 
+ static void
+-nv4e_i2c_setsda(void *data, int state)
++i2c_drive_sda(void *data, int state)
+ {
+-	struct nouveau_i2c_chan *i2c = data;
+-	struct drm_device *dev = i2c->dev;
+-	uint8_t val;
+-
+-	val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
+-	nv_wr32(dev, i2c->wr, val | 0x01);
++	struct nouveau_i2c_chan *port = data;
++	if (port->type == 0) {
++		u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
++		if (state) val |= 0x10;
++		else	   val &= 0xef;
++		NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
++	} else
++	if (port->type == 4) {
++		nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
++	} else
++	if (port->type == 5) {
++		if (state) port->state |= 0x02;
++		else	   port->state &= 0xfd;
++		nv_wr32(port->dev, port->drive, 4 | port->state);
++	}
+ }
+ 
+ static int
+-nv4e_i2c_getscl(void *data)
++i2c_sense_scl(void *data)
+ {
+-	struct nouveau_i2c_chan *i2c = data;
+-	struct drm_device *dev = i2c->dev;
+-
+-	return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
++	struct nouveau_i2c_chan *port = data;
++	struct drm_nouveau_private *dev_priv = port->dev->dev_private;
++	if (port->type == 0) {
++		return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
++	} else
++	if (port->type == 4) {
++		return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
++	} else
++	if (port->type == 5) {
++		if (dev_priv->card_type < NV_D0)
++			return !!(nv_rd32(port->dev, port->sense) & 0x01);
++		else
++			return !!(nv_rd32(port->dev, port->sense) & 0x10);
++	}
++	return 0;
+ }
+ 
+ static int
+-nv4e_i2c_getsda(void *data)
++i2c_sense_sda(void *data)
+ {
+-	struct nouveau_i2c_chan *i2c = data;
+-	struct drm_device *dev = i2c->dev;
+-
+-	return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
++	struct nouveau_i2c_chan *port = data;
++	struct drm_nouveau_private *dev_priv = port->dev->dev_private;
++	if (port->type == 0) {
++		return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
++	} else
++	if (port->type == 4) {
++		return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
++	} else
++	if (port->type == 5) {
++		if (dev_priv->card_type < NV_D0)
++			return !!(nv_rd32(port->dev, port->sense) & 0x02);
++		else
++			return !!(nv_rd32(port->dev, port->sense) & 0x20);
++	}
++	return 0;
+ }
+ 
+ static const uint32_t nv50_i2c_port[] = {
+@@ -114,177 +114,223 @@ static const uint32_t nv50_i2c_port[] = {
+ 	0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 	0x00e79c, 0x00e7b8
+ };
+-#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
+ 
+-static int
+-nv50_i2c_getscl(void *data)
+-{
+-	struct nouveau_i2c_chan *i2c = data;
+-	struct drm_device *dev = i2c->dev;
+-
+-	return !!(nv_rd32(dev, i2c->rd) & 1);
+-}
+-
+-
+-static int
+-nv50_i2c_getsda(void *data)
++static u8 *
++i2c_table(struct drm_device *dev, u8 *version)
+ {
+-	struct nouveau_i2c_chan *i2c = data;
+-	struct drm_device *dev = i2c->dev;
+-
+-	return !!(nv_rd32(dev, i2c->rd) & 2);
+-}
++	u8 *dcb = dcb_table(dev), *i2c = NULL;
++	if (dcb) {
++		if (dcb[0] >= 0x15)
++			i2c = ROMPTR(dev, dcb[2]);
++		if (dcb[0] >= 0x30)
++			i2c = ROMPTR(dev, dcb[4]);
++	}
+ 
+-static void
+-nv50_i2c_setscl(void *data, int state)
+-{
+-	struct nouveau_i2c_chan *i2c = data;
++	/* early revisions had no version number, use dcb version */
++	if (i2c) {
++		*version = dcb[0];
++		if (*version >= 0x30)
++			*version = i2c[0];
++	}
+ 
+-	nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
++	return i2c;
+ }
+ 
+-static void
+-nv50_i2c_setsda(void *data, int state)
++int
++nouveau_i2c_init(struct drm_device *dev)
+ {
+-	struct nouveau_i2c_chan *i2c = data;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->vbios;
++	struct nouveau_i2c_chan *port;
++	u8 version = 0x00, entries, recordlen;
++	u8 *i2c, *entry, legacy[2][4] = {};
++	int ret, i;
++
++	INIT_LIST_HEAD(&dev_priv->i2c_ports);
++
++	i2c = i2c_table(dev, &version);
++	if (!i2c) {
++		u8 *bmp = &bios->data[bios->offset];
++		if (bios->type != NVBIOS_BMP)
++			return -ENODEV;
++
++		legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
++		legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
++		legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
++		legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
++
++		/* BMP (from v4.0) has i2c info in the structure, it's in a
++		 * fixed location on earlier VBIOS
++		 */
++		if (bmp[5] < 4)
++			i2c = &bios->data[0x48];
++		else
++			i2c = &bmp[0x36];
++
++		if (i2c[4]) legacy[0][0] = i2c[4];
++		if (i2c[5]) legacy[0][1] = i2c[5];
++		if (i2c[6]) legacy[1][0] = i2c[6];
++		if (i2c[7]) legacy[1][1] = i2c[7];
++	}
+ 
+-	nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0));
+-	i2c->data = state;
+-}
++	if (version >= 0x30) {
++		entry     = i2c[1] + i2c;
++		entries   = i2c[2];
++		recordlen = i2c[3];
++	} else
++	if (version) {
++		entry     = i2c;
++		entries   = 16;
++		recordlen = 4;
++	} else {
++		entry     = legacy[0];
++		entries   = 2;
++		recordlen = 4;
++	}
+ 
+-static int
+-nvd0_i2c_getscl(void *data)
+-{
+-	struct nouveau_i2c_chan *i2c = data;
+-	return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10);
+-}
++	for (i = 0; i < entries; i++, entry += recordlen) {
++		port = kzalloc(sizeof(*port), GFP_KERNEL);
++		if (port == NULL) {
++			nouveau_i2c_fini(dev);
++			return -ENOMEM;
++		}
+ 
+-static int
+-nvd0_i2c_getsda(void *data)
+-{
+-	struct nouveau_i2c_chan *i2c = data;
+-	return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20);
+-}
++		port->type = entry[3];
++		if (version < 0x30) {
++			port->type &= 0x07;
++			if (port->type == 0x07)
++				port->type = 0xff;
++		}
+ 
+-int
+-nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_i2c_chan *i2c;
+-	int ret;
++		if (port->type == 0xff) {
++			kfree(port);
++			continue;
++		}
+ 
+-	if (entry->chan)
+-		return -EEXIST;
++		switch (port->type) {
++		case 0: /* NV04:NV50 */
++			port->drive = entry[0];
++			port->sense = entry[1];
++			break;
++		case 4: /* NV4E */
++			port->drive = 0x600800 + entry[1];
++			port->sense = port->drive;
++			break;
++		case 5: /* NV50- */
++			port->drive = entry[0] & 0x0f;
++			if (dev_priv->card_type < NV_D0) {
++				if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
++					break;
++				port->drive = nv50_i2c_port[port->drive];
++				port->sense = port->drive;
++			} else {
++				port->drive = 0x00d014 + (port->drive * 0x20);
++				port->sense = port->drive;
++			}
++			break;
++		case 6: /* NV50- DP AUX */
++			port->drive = entry[0];
++			port->sense = port->drive;
++			port->adapter.algo = &nouveau_dp_i2c_algo;
++			break;
++		default:
++			break;
++		}
+ 
+-	if (dev_priv->card_type >= NV_50 &&
+-	    dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) {
+-		NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
+-		return -EINVAL;
+-	}
++		if (!port->adapter.algo && !port->drive) {
++			NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
++				 i, port->type, port->drive, port->sense);
++			kfree(port);
++			continue;
++		}
+ 
+-	i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+-	if (i2c == NULL)
+-		return -ENOMEM;
+-
+-	switch (entry->port_type) {
+-	case 0:
+-		i2c->bit.setsda = nv04_i2c_setsda;
+-		i2c->bit.setscl = nv04_i2c_setscl;
+-		i2c->bit.getsda = nv04_i2c_getsda;
+-		i2c->bit.getscl = nv04_i2c_getscl;
+-		i2c->rd = entry->read;
+-		i2c->wr = entry->write;
+-		break;
+-	case 4:
+-		i2c->bit.setsda = nv4e_i2c_setsda;
+-		i2c->bit.setscl = nv4e_i2c_setscl;
+-		i2c->bit.getsda = nv4e_i2c_getsda;
+-		i2c->bit.getscl = nv4e_i2c_getscl;
+-		i2c->rd = 0x600800 + entry->read;
+-		i2c->wr = 0x600800 + entry->write;
+-		break;
+-	case 5:
+-		i2c->bit.setsda = nv50_i2c_setsda;
+-		i2c->bit.setscl = nv50_i2c_setscl;
+-		if (dev_priv->card_type < NV_D0) {
+-			i2c->bit.getsda = nv50_i2c_getsda;
+-			i2c->bit.getscl = nv50_i2c_getscl;
+-			i2c->rd = nv50_i2c_port[entry->read];
+-			i2c->wr = i2c->rd;
++		snprintf(port->adapter.name, sizeof(port->adapter.name),
++			 "nouveau-%s-%d", pci_name(dev->pdev), i);
++		port->adapter.owner = THIS_MODULE;
++		port->adapter.dev.parent = &dev->pdev->dev;
++		port->dev = dev;
++		port->index = i;
++		port->dcb = ROM32(entry[0]);
++		i2c_set_adapdata(&port->adapter, i2c);
++
++		if (port->adapter.algo != &nouveau_dp_i2c_algo) {
++			port->adapter.algo_data = &port->bit;
++			port->bit.udelay = 10;
++			port->bit.timeout = usecs_to_jiffies(2200);
++			port->bit.data = port;
++			port->bit.setsda = i2c_drive_sda;
++			port->bit.setscl = i2c_drive_scl;
++			port->bit.getsda = i2c_sense_sda;
++			port->bit.getscl = i2c_sense_scl;
++
++			i2c_drive_scl(port, 0);
++			i2c_drive_sda(port, 1);
++			i2c_drive_scl(port, 1);
++
++			ret = i2c_bit_add_bus(&port->adapter);
+ 		} else {
+-			i2c->bit.getsda = nvd0_i2c_getsda;
+-			i2c->bit.getscl = nvd0_i2c_getscl;
+-			i2c->rd = 0x00d014 + (entry->read * 0x20);
+-			i2c->wr = i2c->rd;
++			port->adapter.algo = &nouveau_dp_i2c_algo;
++			ret = i2c_add_adapter(&port->adapter);
+ 		}
+-		break;
+-	case 6:
+-		i2c->rd = entry->read;
+-		i2c->wr = entry->write;
+-		break;
+-	default:
+-		NV_ERROR(dev, "DCB I2C port type %d unknown\n",
+-			 entry->port_type);
+-		kfree(i2c);
+-		return -EINVAL;
+-	}
+ 
+-	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+-		 "nouveau-%s-%d", pci_name(dev->pdev), index);
+-	i2c->adapter.owner = THIS_MODULE;
+-	i2c->adapter.dev.parent = &dev->pdev->dev;
+-	i2c->dev = dev;
+-	i2c_set_adapdata(&i2c->adapter, i2c);
+-
+-	if (entry->port_type < 6) {
+-		i2c->adapter.algo_data = &i2c->bit;
+-		i2c->bit.udelay = 40;
+-		i2c->bit.timeout = usecs_to_jiffies(5000);
+-		i2c->bit.data = i2c;
+-		ret = i2c_bit_add_bus(&i2c->adapter);
+-	} else {
+-		i2c->adapter.algo = &nouveau_dp_i2c_algo;
+-		ret = i2c_add_adapter(&i2c->adapter);
+-	}
++		if (ret) {
++			NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
++			kfree(port);
++			continue;
++		}
+ 
+-	if (ret) {
+-		NV_ERROR(dev, "Failed to register i2c %d\n", index);
+-		kfree(i2c);
+-		return ret;
++		list_add_tail(&port->head, &dev_priv->i2c_ports);
+ 	}
+ 
+-	entry->chan = i2c;
+ 	return 0;
+ }
+ 
+ void
+-nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
++nouveau_i2c_fini(struct drm_device *dev)
+ {
+-	if (!entry->chan)
+-		return;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_i2c_chan *port, *tmp;
+ 
+-	i2c_del_adapter(&entry->chan->adapter);
+-	kfree(entry->chan);
+-	entry->chan = NULL;
++	list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
++		i2c_del_adapter(&port->adapter);
++		kfree(port);
++	}
+ }
+ 
+ struct nouveau_i2c_chan *
+-nouveau_i2c_find(struct drm_device *dev, int index)
++nouveau_i2c_find(struct drm_device *dev, u8 index)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
++	struct nouveau_i2c_chan *port;
++
++	if (index == NV_I2C_DEFAULT(0) ||
++	    index == NV_I2C_DEFAULT(1)) {
++		u8 version, *i2c = i2c_table(dev, &version);
++		if (i2c && version >= 0x30) {
++			if (index == NV_I2C_DEFAULT(0))
++				index = (i2c[4] & 0x0f);
++			else
++				index = (i2c[4] & 0xf0) >> 4;
++		} else {
++			index = 2;
++		}
++	}
+ 
+-	if (index >= DCB_MAX_NUM_I2C_ENTRIES)
+-		return NULL;
++	list_for_each_entry(port, &dev_priv->i2c_ports, head) {
++		if (port->index == index)
++			break;
++	}
+ 
+-	if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
+-		uint32_t reg = 0xe500, val;
++	if (&port->head == &dev_priv->i2c_ports)
++		return NULL;
+ 
+-		if (i2c->port_type == 6) {
+-			reg += i2c->read * 0x50;
++	if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
++		u32 reg = 0x00e500, val;
++		if (port->type == 6) {
++			reg += port->drive * 0x50;
+ 			val  = 0x2002;
+ 		} else {
+-			reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
++			reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
+ 			val  = 0xe001;
+ 		}
+ 
+@@ -294,9 +340,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
+ 		nv_mask(dev, reg + 0x00, 0x0000f003, val);
+ 	}
+ 
+-	if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
+-		return NULL;
+-	return i2c->chan;
++	return port;
+ }
+ 
+ bool
+@@ -331,9 +375,13 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
+ 	struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
+ 	int i;
+ 
+-	NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
++	if (!i2c) {
++		NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
++		return -ENODEV;
++	}
+ 
+-	for (i = 0; i2c && info[i].addr; i++) {
++	NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
++	for (i = 0; info[i].addr; i++) {
+ 		if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
+ 		    (!match || match(i2c, &info[i]))) {
+ 			NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
+@@ -342,6 +390,5 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
+ 	}
+ 
+ 	NV_DEBUG(dev, "No devices found.\n");
+-
+ 	return -ENODEV;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
+index 422b62f..1d08389 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
++++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
+@@ -27,20 +27,26 @@
+ #include <linux/i2c-algo-bit.h>
+ #include "drm_dp_helper.h"
+ 
+-struct dcb_i2c_entry;
++#define NV_I2C_PORT(n)    (0x00 + (n))
++#define NV_I2C_PORT_NUM    0x10
++#define NV_I2C_DEFAULT(n) (0x80 + (n))
+ 
+ struct nouveau_i2c_chan {
+ 	struct i2c_adapter adapter;
+ 	struct drm_device *dev;
+ 	struct i2c_algo_bit_data bit;
+-	unsigned rd;
+-	unsigned wr;
+-	unsigned data;
++	struct list_head head;
++	u8  index;
++	u8  type;
++	u32 dcb;
++	u32 drive;
++	u32 sense;
++	u32 state;
+ };
+ 
+-int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
+-void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
+-struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
++int  nouveau_i2c_init(struct drm_device *);
++void nouveau_i2c_fini(struct drm_device *);
++struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
+ bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
+ int nouveau_i2c_identify(struct drm_device *dev, const char *what,
+ 			 struct i2c_board_info *info,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
+index 36bec48..b08065f 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
+@@ -26,7 +26,8 @@
+  * DEALINGS IN THE SOFTWARE.
+  *
+  * Authors:
+- *    Keith Whitwell <keith at tungstengraphics.com>
++ *    Ben Skeggs <bskeggs at redhat.com>
++ *    Roy Spliet <r.spliet at student.tudelft.nl>
+  */
+ 
+ 
+@@ -192,75 +193,6 @@ nouveau_mem_gart_fini(struct drm_device *dev)
+ 	}
+ }
+ 
+-static uint32_t
+-nouveau_mem_detect_nv04(struct drm_device *dev)
+-{
+-	uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
+-
+-	if (boot0 & 0x00000100)
+-		return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
+-
+-	switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+-	case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+-		return 32 * 1024 * 1024;
+-	case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+-		return 16 * 1024 * 1024;
+-	case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+-		return 8 * 1024 * 1024;
+-	case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+-		return 4 * 1024 * 1024;
+-	}
+-
+-	return 0;
+-}
+-
+-static uint32_t
+-nouveau_mem_detect_nforce(struct drm_device *dev)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct pci_dev *bridge;
+-	uint32_t mem;
+-
+-	bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+-	if (!bridge) {
+-		NV_ERROR(dev, "no bridge device\n");
+-		return 0;
+-	}
+-
+-	if (dev_priv->flags & NV_NFORCE) {
+-		pci_read_config_dword(bridge, 0x7C, &mem);
+-		return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
+-	} else
+-	if (dev_priv->flags & NV_NFORCE2) {
+-		pci_read_config_dword(bridge, 0x84, &mem);
+-		return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
+-	}
+-
+-	NV_ERROR(dev, "impossible!\n");
+-	return 0;
+-}
+-
+-int
+-nouveau_mem_detect(struct drm_device *dev)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-
+-	if (dev_priv->card_type == NV_04) {
+-		dev_priv->vram_size = nouveau_mem_detect_nv04(dev);
+-	} else
+-	if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
+-		dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
+-	} else
+-	if (dev_priv->card_type < NV_50) {
+-		dev_priv->vram_size  = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+-		dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
+-	}
+-
+-	if (dev_priv->vram_size)
+-		return 0;
+-	return -ENOMEM;
+-}
+-
+ bool
+ nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
+ {
+@@ -385,11 +317,29 @@ nouveau_mem_init_agp(struct drm_device *dev)
+ 	return 0;
+ }
+ 
++static const struct vram_types {
++	int value;
++	const char *name;
++} vram_type_map[] = {
++	{ NV_MEM_TYPE_STOLEN , "stolen system memory" },
++	{ NV_MEM_TYPE_SGRAM  , "SGRAM" },
++	{ NV_MEM_TYPE_SDRAM  , "SDRAM" },
++	{ NV_MEM_TYPE_DDR1   , "DDR1" },
++	{ NV_MEM_TYPE_DDR2   , "DDR2" },
++	{ NV_MEM_TYPE_DDR3   , "DDR3" },
++	{ NV_MEM_TYPE_GDDR2  , "GDDR2" },
++	{ NV_MEM_TYPE_GDDR3  , "GDDR3" },
++	{ NV_MEM_TYPE_GDDR4  , "GDDR4" },
++	{ NV_MEM_TYPE_GDDR5  , "GDDR5" },
++	{ NV_MEM_TYPE_UNKNOWN, "unknown type" }
++};
++
+ int
+ nouveau_mem_vram_init(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
++	const struct vram_types *vram_type;
+ 	int ret, dma_bits;
+ 
+ 	dma_bits = 32;
+@@ -407,6 +357,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
+ 	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
+ 	if (ret)
+ 		return ret;
++	ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
++	if (ret) {
++		/* Reset to default value. */
++		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
++	}
++
+ 
+ 	ret = nouveau_ttm_global_init(dev_priv);
+ 	if (ret)
+@@ -421,7 +377,21 @@ nouveau_mem_vram_init(struct drm_device *dev)
+ 		return ret;
+ 	}
+ 
+-	NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
++	vram_type = vram_type_map;
++	while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
++		if (nouveau_vram_type) {
++			if (!strcasecmp(nouveau_vram_type, vram_type->name))
++				break;
++			dev_priv->vram_type = vram_type->value;
++		} else {
++			if (vram_type->value == dev_priv->vram_type)
++				break;
++		}
++		vram_type++;
++	}
++
++	NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
++		(int)(dev_priv->vram_size >> 20), vram_type->name);
+ 	if (dev_priv->vram_sys_base) {
+ 		NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
+ 			dev_priv->vram_sys_base);
+@@ -502,216 +472,617 @@ nouveau_mem_gart_init(struct drm_device *dev)
+ 	return 0;
+ }
+ 
+-/* XXX: For now a dummy. More samples required, possibly even a card
+- * Called from nouveau_perf.c */
+-void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+-							struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
+-							struct nouveau_pm_memtiming *timing) {
+-
+-	NV_DEBUG(dev,"Timing entry format unknown, please contact nouveau developers");
+-}
+-
+-void nv40_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+-							struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
+-							struct nouveau_pm_memtiming *timing) {
+-
+-	timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP);
++static int
++nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
++		     struct nouveau_pm_tbl_entry *e, u8 len,
++		     struct nouveau_pm_memtiming *boot,
++		     struct nouveau_pm_memtiming *t)
++{
++	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
+ 
+ 	/* XXX: I don't trust the -1's and +1's... they must come
+ 	 *      from somewhere! */
+-	timing->reg_1 = (e->tWR + 2 + magic_number) << 24 |
+-				  1 << 16 |
+-				  (e->tUNK_1 + 2 + magic_number) << 8 |
+-				  (e->tCL + 2 - magic_number);
+-	timing->reg_2 = (magic_number << 24 | e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10);
+-	timing->reg_2 |= 0x20200000;
+-
+-	NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", timing->id,
+-		 timing->reg_0, timing->reg_1,timing->reg_2);
++	t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
++		    1 << 16 |
++		    (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
++		    (e->tCL + 2 - (t->tCWL - 1));
++
++	t->reg[2] = 0x20200000 |
++		    ((t->tCWL - 1) << 24 |
++		     e->tRRD << 16 |
++		     e->tRCDWR << 8 |
++		     e->tRCDRD);
++
++	NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
++		 t->reg[0], t->reg[1], t->reg[2]);
++	return 0;
+ }
+ 
+-void nv50_mem_timing_entry(struct drm_device *dev, struct bit_entry *P, struct nouveau_pm_tbl_header *hdr,
+-							struct nouveau_pm_tbl_entry *e, uint8_t magic_number,struct nouveau_pm_memtiming *timing) {
++static int
++nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
++		     struct nouveau_pm_tbl_entry *e, u8 len,
++		     struct nouveau_pm_memtiming *boot,
++		     struct nouveau_pm_memtiming *t)
++{
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct bit_entry P;
++	uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
+ 
+-	uint8_t unk18 = 1,
+-		unk19 = 1,
+-		unk20 = 0,
+-		unk21 = 0;
++	if (bit_table(dev, 'P', &P))
++		return -EINVAL;
+ 
+-	switch (min(hdr->entry_len, (u8) 22)) {
++	switch (min(len, (u8) 22)) {
+ 	case 22:
+ 		unk21 = e->tUNK_21;
+ 	case 21:
+ 		unk20 = e->tUNK_20;
+ 	case 20:
+-		unk19 = e->tUNK_19;
++		if (e->tCWL > 0)
++			t->tCWL = e->tCWL;
+ 	case 19:
+ 		unk18 = e->tUNK_18;
+ 		break;
+ 	}
+ 
+-	timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP);
++	t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
+ 
+-	/* XXX: I don't trust the -1's and +1's... they must come
+-	 *      from somewhere! */
+-	timing->reg_1 = (e->tWR + unk19 + 1 + magic_number) << 24 |
+-				  max(unk18, (u8) 1) << 16 |
+-				  (e->tUNK_1 + unk19 + 1 + magic_number) << 8;
+-	if (dev_priv->chipset == 0xa8) {
+-		timing->reg_1 |= (e->tCL - 1);
+-	} else {
+-		timing->reg_1 |= (e->tCL + 2 - magic_number);
+-	}
+-	timing->reg_2 = (e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10);
+-
+-	timing->reg_5 = (e->tRAS << 24 | e->tRC);
+-	timing->reg_5 += max(e->tUNK_10, e->tUNK_11) << 16;
+-
+-	if (P->version == 1) {
+-		timing->reg_2 |= magic_number << 24;
+-		timing->reg_3 = (0x14 + e->tCL) << 24 |
+-						0x16 << 16 |
+-						(e->tCL - 1) << 8 |
+-						(e->tCL - 1);
+-		timing->reg_4 = (nv_rd32(dev,0x10022c) & 0xffff0000) | e->tUNK_13 << 8  | e->tUNK_13;
+-		timing->reg_5 |= (e->tCL + 2) << 8;
+-		timing->reg_7 = 0x4000202 | (e->tCL - 1) << 16;
++	t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
++				max(unk18, (u8) 1) << 16 |
++				(e->tWTR + 2 + (t->tCWL - 1)) << 8;
++
++	t->reg[2] = ((t->tCWL - 1) << 24 |
++		    e->tRRD << 16 |
++		    e->tRCDWR << 8 |
++		    e->tRCDRD);
++
++	t->reg[4] = e->tUNK_13 << 8  | e->tUNK_13;
++
++	t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
++
++	t->reg[8] = boot->reg[8] & 0xffffff00;
++
++	if (P.version == 1) {
++		t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
++
++		t->reg[3] = (0x14 + e->tCL) << 24 |
++			    0x16 << 16 |
++			    (e->tCL - 1) << 8 |
++			    (e->tCL - 1);
++
++		t->reg[4] |= boot->reg[4] & 0xffff0000;
++
++		t->reg[6] = (0x33 - t->tCWL) << 16 |
++			    t->tCWL << 8 |
++			    (0x2e + e->tCL - t->tCWL);
++
++		t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
++
++		/* XXX: P.version == 1 only has DDR2 and GDDR3? */
++		if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) {
++			t->reg[5] |= (e->tCL + 3) << 8;
++			t->reg[6] |= (t->tCWL - 2) << 8;
++			t->reg[8] |= (e->tCL - 4);
++		} else {
++			t->reg[5] |= (e->tCL + 2) << 8;
++			t->reg[6] |= t->tCWL << 8;
++			t->reg[8] |= (e->tCL - 2);
++		}
+ 	} else {
+-		timing->reg_2 |= (unk19 - 1) << 24;
+-		/* XXX: reg_10022c for recentish cards pretty much unknown*/
+-		timing->reg_3 = e->tCL - 1;
+-		timing->reg_4 = (unk20 << 24 | unk21 << 16 |
+-							e->tUNK_13 << 8  | e->tUNK_13);
++		t->reg[1] |= (5 + e->tCL - (t->tCWL));
++
++		/* XXX: 0xb? 0x30? */
++		t->reg[3] = (0x30 + e->tCL) << 24 |
++			    (boot->reg[3] & 0x00ff0000)|
++			    (0xb + e->tCL) << 8 |
++			    (e->tCL - 1);
++
++		t->reg[4] |= (unk20 << 24 | unk21 << 16);
++
+ 		/* XXX: +6? */
+-		timing->reg_5 |= (unk19 + 6) << 8;
++		t->reg[5] |= (t->tCWL + 6) << 8;
+ 
+-		/* XXX: reg_10023c currently unknown
+-		 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
+-		timing->reg_7 = 0x202;
++		t->reg[6] = (0x5a + e->tCL) << 16 |
++			    (6 - e->tCL + t->tCWL) << 8 |
++			    (0x50 + e->tCL - t->tCWL);
++
++		tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
++		t->reg[7] = (tmp7_3 << 24) |
++			    ((tmp7_3 - 6 + e->tCL) << 16) |
++			    0x202;
+ 	}
+ 
+-	NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", timing->id,
+-		 timing->reg_0, timing->reg_1,
+-		 timing->reg_2, timing->reg_3);
++	NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
++		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
+ 	NV_DEBUG(dev, "         230: %08x %08x %08x %08x\n",
+-		 timing->reg_4, timing->reg_5,
+-		 timing->reg_6, timing->reg_7);
+-	NV_DEBUG(dev, "         240: %08x\n", timing->reg_8);
+-}
+-
+-void nvc0_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+-							struct nouveau_pm_tbl_entry *e, struct nouveau_pm_memtiming *timing) {
+-	timing->reg_0 = (e->tRC << 24 | (e->tRFC & 0x7f) << 17 | e->tRAS << 8 | e->tRP);
+-	timing->reg_1 = (nv_rd32(dev,0x10f294) & 0xff000000) | (e->tUNK_11&0x0f) << 20 | (e->tUNK_19 << 7) | (e->tCL & 0x0f);
+-	timing->reg_2 = (nv_rd32(dev,0x10f298) & 0xff0000ff) | e->tWR << 16 | e->tUNK_1 << 8;
+-	timing->reg_3 = e->tUNK_20 << 9 | e->tUNK_13;
+-	timing->reg_4 = (nv_rd32(dev,0x10f2a0) & 0xfff000ff) | e->tUNK_12 << 15;
+-	NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", timing->id,
+-		 timing->reg_0, timing->reg_1,
+-		 timing->reg_2, timing->reg_3);
+-	NV_DEBUG(dev, "         2a0: %08x %08x %08x %08x\n",
+-		 timing->reg_4, timing->reg_5,
+-		 timing->reg_6, timing->reg_7);
++		 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
++	NV_DEBUG(dev, "         240: %08x\n", t->reg[8]);
++	return 0;
++}
++
++static int
++nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
++		     struct nouveau_pm_tbl_entry *e, u8 len,
++		     struct nouveau_pm_memtiming *boot,
++		     struct nouveau_pm_memtiming *t)
++{
++	if (e->tCWL > 0)
++		t->tCWL = e->tCWL;
++
++	t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
++		     e->tRFC << 8 | e->tRC);
++
++	t->reg[1] = (boot->reg[1] & 0xff000000) |
++		    (e->tRCDWR & 0x0f) << 20 |
++		    (e->tRCDRD & 0x0f) << 14 |
++		    (t->tCWL << 7) |
++		    (e->tCL & 0x0f);
++
++	t->reg[2] = (boot->reg[2] & 0xff0000ff) |
++		    e->tWR << 16 | e->tWTR << 8;
++
++	t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
++		    (e->tUNK_21 & 0xf) << 5 |
++		    (e->tUNK_13 & 0x1f);
++
++	t->reg[4] = (boot->reg[4] & 0xfff00fff) |
++		    (e->tRRD&0x1f) << 15;
++
++	NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
++		 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
++	NV_DEBUG(dev, "         2a0: %08x\n", t->reg[4]);
++	return 0;
+ }
+ 
+ /**
+- * Processes the Memory Timing BIOS table, stores generated
+- * register values
+- * @pre init scripts were run, memtiming regs are initialized
++ * MR generation methods
+  */
+-void
+-nouveau_mem_timing_init(struct drm_device *dev)
++
++static int
++nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
++		    struct nouveau_pm_tbl_entry *e, u8 len,
++		    struct nouveau_pm_memtiming *boot,
++		    struct nouveau_pm_memtiming *t)
++{
++	t->drive_strength = 0;
++	if (len < 15) {
++		t->odt = boot->odt;
++	} else {
++		t->odt = e->RAM_FT1 & 0x07;
++	}
++
++	if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
++		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
++		return -ERANGE;
++	}
++
++	if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
++		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
++		return -ERANGE;
++	}
++
++	if (t->odt > 3) {
++		NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
++			t->id, t->odt);
++		t->odt = 0;
++	}
++
++	t->mr[0] = (boot->mr[0] & 0x100f) |
++		   (e->tCL) << 4 |
++		   (e->tWR - 1) << 9;
++	t->mr[1] = (boot->mr[1] & 0x101fbb) |
++		   (t->odt & 0x1) << 2 |
++		   (t->odt & 0x2) << 5;
++
++	NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
++	return 0;
++}
++
++uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
++	0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
++
++static int
++nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
++		    struct nouveau_pm_tbl_entry *e, u8 len,
++		    struct nouveau_pm_memtiming *boot,
++		    struct nouveau_pm_memtiming *t)
++{
++	u8 cl = e->tCL - 4;
++
++	t->drive_strength = 0;
++	if (len < 15) {
++		t->odt = boot->odt;
++	} else {
++		t->odt = e->RAM_FT1 & 0x07;
++	}
++
++	if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
++		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
++		return -ERANGE;
++	}
++
++	if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
++		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
++		return -ERANGE;
++	}
++
++	if (e->tCWL < 5) {
++		NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
++		return -ERANGE;
++	}
++
++	t->mr[0] = (boot->mr[0] & 0x180b) |
++		   /* CAS */
++		   (cl & 0x7) << 4 |
++		   (cl & 0x8) >> 1 |
++		   (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
++	t->mr[1] = (boot->mr[1] & 0x101dbb) |
++		   (t->odt & 0x1) << 2 |
++		   (t->odt & 0x2) << 5 |
++		   (t->odt & 0x4) << 7;
++	t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
++
++	NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
++	return 0;
++}
++
++uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
++	0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
++uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
++	0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
++
++static int
++nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
++		     struct nouveau_pm_tbl_entry *e, u8 len,
++		     struct nouveau_pm_memtiming *boot,
++		     struct nouveau_pm_memtiming *t)
++{
++	if (len < 15) {
++		t->drive_strength = boot->drive_strength;
++		t->odt = boot->odt;
++	} else {
++		t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
++		t->odt = e->RAM_FT1 & 0x07;
++	}
++
++	if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
++		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
++		return -ERANGE;
++	}
++
++	if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
++		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
++		return -ERANGE;
++	}
++
++	if (t->odt > 3) {
++		NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
++			t->id, t->odt);
++		t->odt = 0;
++	}
++
++	t->mr[0] = (boot->mr[0] & 0xe0b) |
++		   /* CAS */
++		   ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
++		   ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
++	t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
++		   (t->odt << 2) |
++		   (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
++	t->mr[2] = boot->mr[2];
++
++	NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
++		      t->mr[0], t->mr[1], t->mr[2]);
++	return 0;
++}
++
++static int
++nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
++		     struct nouveau_pm_tbl_entry *e, u8 len,
++		     struct nouveau_pm_memtiming *boot,
++		     struct nouveau_pm_memtiming *t)
++{
++	if (len < 15) {
++		t->drive_strength = boot->drive_strength;
++		t->odt = boot->odt;
++	} else {
++		t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
++		t->odt = e->RAM_FT1 & 0x03;
++	}
++
++	if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
++		NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
++		return -ERANGE;
++	}
++
++	if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
++		NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
++		return -ERANGE;
++	}
++
++	if (t->odt > 3) {
++		NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
++			t->id, t->odt);
++		t->odt = 0;
++	}
++
++	t->mr[0] = (boot->mr[0] & 0x007) |
++		   ((e->tCL - 5) << 3) |
++		   ((e->tWR - 4) << 8);
++	t->mr[1] = (boot->mr[1] & 0x1007f0) |
++		   t->drive_strength |
++		   (t->odt << 2);
++
++	NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
++	return 0;
++}
++
++int
++nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
++			struct nouveau_pm_memtiming *t)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+-	struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
+-	struct nvbios *bios = &dev_priv->vbios;
+-	struct bit_entry P;
+-	struct nouveau_pm_tbl_header *hdr = NULL;
+-	uint8_t magic_number;
+-	u8 *entry;
+-	int i;
++	struct nouveau_pm_memtiming *boot = &pm->boot.timing;
++	struct nouveau_pm_tbl_entry *e;
++	u8 ver, len, *ptr, *ramcfg;
++	int ret;
++
++	ptr = nouveau_perf_timing(dev, freq, &ver, &len);
++	if (!ptr || ptr[0] == 0x00) {
++		*t = *boot;
++		return 0;
++	}
++	e = (struct nouveau_pm_tbl_entry *)ptr;
++
++	t->tCWL = boot->tCWL;
++
++	switch (dev_priv->card_type) {
++	case NV_40:
++		ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
++		break;
++	case NV_50:
++		ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
++		break;
++	case NV_C0:
++		ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
++		break;
++	default:
++		ret = -ENODEV;
++		break;
++	}
+ 
+-	if (bios->type == NVBIOS_BIT) {
+-		if (bit_table(dev, 'P', &P))
+-			return;
++	switch (dev_priv->vram_type * !ret) {
++	case NV_MEM_TYPE_GDDR3:
++		ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
++		break;
++	case NV_MEM_TYPE_GDDR5:
++		ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
++		break;
++	case NV_MEM_TYPE_DDR2:
++		ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
++		break;
++	case NV_MEM_TYPE_DDR3:
++		ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
++		break;
++	default:
++		ret = -EINVAL;
++		break;
++	}
+ 
+-		if (P.version == 1)
+-			hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]);
++	ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
++	if (ramcfg) {
++		int dll_off;
++
++		if (ver == 0x00)
++			dll_off = !!(ramcfg[3] & 0x04);
+ 		else
+-		if (P.version == 2)
+-			hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]);
+-		else {
+-			NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
++			dll_off = !!(ramcfg[2] & 0x40);
++
++		switch (dev_priv->vram_type) {
++		case NV_MEM_TYPE_GDDR3:
++			t->mr[1] &= ~0x00000040;
++			t->mr[1] |=  0x00000040 * dll_off;
++			break;
++		default:
++			t->mr[1] &= ~0x00000001;
++			t->mr[1] |=  0x00000001 * dll_off;
++			break;
+ 		}
++	}
++
++	return ret;
++}
++
++void
++nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 timing_base, timing_regs, mr_base;
++	int i;
++
++	if (dev_priv->card_type >= 0xC0) {
++		timing_base = 0x10f290;
++		mr_base = 0x10f300;
+ 	} else {
+-		NV_DEBUG(dev, "BMP version too old for memory\n");
+-		return;
++		timing_base = 0x100220;
++		mr_base = 0x1002c0;
+ 	}
+ 
+-	if (!hdr) {
+-		NV_DEBUG(dev, "memory timing table pointer invalid\n");
++	t->id = -1;
++
++	switch (dev_priv->card_type) {
++	case NV_50:
++		timing_regs = 9;
++		break;
++	case NV_C0:
++	case NV_D0:
++		timing_regs = 5;
++		break;
++	case NV_30:
++	case NV_40:
++		timing_regs = 3;
++		break;
++	default:
++		timing_regs = 0;
+ 		return;
+ 	}
++	for(i = 0; i < timing_regs; i++)
++		t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
++
++	t->tCWL = 0;
++	if (dev_priv->card_type < NV_C0) {
++		t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
++	} else if (dev_priv->card_type <= NV_D0) {
++		t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
++	}
+ 
+-	if (hdr->version != 0x10) {
+-		NV_WARN(dev, "memory timing table 0x%02x unknown\n", hdr->version);
+-		return;
++	t->mr[0] = nv_rd32(dev, mr_base);
++	t->mr[1] = nv_rd32(dev, mr_base + 0x04);
++	t->mr[2] = nv_rd32(dev, mr_base + 0x20);
++	t->mr[3] = nv_rd32(dev, mr_base + 0x24);
++
++	t->odt = 0;
++	t->drive_strength = 0;
++
++	switch (dev_priv->vram_type) {
++	case NV_MEM_TYPE_DDR3:
++		t->odt |= (t->mr[1] & 0x200) >> 7;
++	case NV_MEM_TYPE_DDR2:
++		t->odt |= (t->mr[1] & 0x04) >> 2 |
++			  (t->mr[1] & 0x40) >> 5;
++		break;
++	case NV_MEM_TYPE_GDDR3:
++	case NV_MEM_TYPE_GDDR5:
++		t->drive_strength = t->mr[1] & 0x03;
++		t->odt = (t->mr[1] & 0x0c) >> 2;
++		break;
++	default:
++		break;
+ 	}
++}
+ 
+-	/* validate record length */
+-	if (hdr->entry_len < 15) {
+-		NV_ERROR(dev, "mem timing table length unknown: %d\n", hdr->entry_len);
+-		return;
++int
++nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
++		 struct nouveau_pm_level *perflvl)
++{
++	struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
++	struct nouveau_pm_memtiming *info = &perflvl->timing;
++	u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
++	u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
++	u32 mr1_dlloff;
++
++	switch (dev_priv->vram_type) {
++	case NV_MEM_TYPE_DDR2:
++		tDLLK = 2000;
++		mr1_dlloff = 0x00000001;
++		break;
++	case NV_MEM_TYPE_DDR3:
++		tDLLK = 12000;
++		mr1_dlloff = 0x00000001;
++		break;
++	case NV_MEM_TYPE_GDDR3:
++		tDLLK = 40000;
++		mr1_dlloff = 0x00000040;
++		break;
++	default:
++		NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
++		return -ENODEV;
+ 	}
+ 
+-	/* parse vbios entries into common format */
+-	memtimings->timing =
+-		kcalloc(hdr->entry_cnt, sizeof(*memtimings->timing), GFP_KERNEL);
+-	if (!memtimings->timing)
+-		return;
++	/* fetch current MRs */
++	switch (dev_priv->vram_type) {
++	case NV_MEM_TYPE_GDDR3:
++	case NV_MEM_TYPE_DDR3:
++		mr[2] = exec->mrg(exec, 2);
++	default:
++		mr[1] = exec->mrg(exec, 1);
++		mr[0] = exec->mrg(exec, 0);
++		break;
++	}
+ 
+-	/* Get "some number" from the timing reg for NV_40 and NV_50
+-	 * Used in calculations later... source unknown */
+-	magic_number = 0;
+-	if (P.version == 1) {
+-		magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24;
++	/* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh  */
++	if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
++		exec->precharge(exec);
++		exec->mrs (exec, 1, mr[1] | mr1_dlloff);
++		exec->wait(exec, tMRD);
+ 	}
+ 
+-	entry = (u8*) hdr + hdr->header_len;
+-	for (i = 0; i < hdr->entry_cnt; i++, entry += hdr->entry_len) {
+-		struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
+-		if (entry[0] == 0)
+-			continue;
++	/* enter self-refresh mode */
++	exec->precharge(exec);
++	exec->refresh(exec);
++	exec->refresh(exec);
++	exec->refresh_auto(exec, false);
++	exec->refresh_self(exec, true);
++	exec->wait(exec, tCKSRE);
++
++	/* modify input clock frequency */
++	exec->clock_set(exec);
++
++	/* exit self-refresh mode */
++	exec->wait(exec, tCKSRX);
++	exec->precharge(exec);
++	exec->refresh_self(exec, false);
++	exec->refresh_auto(exec, true);
++	exec->wait(exec, tXS);
++
++	/* update MRs */
++	if (mr[2] != info->mr[2]) {
++		exec->mrs (exec, 2, info->mr[2]);
++		exec->wait(exec, tMRD);
++	}
++
++	if (mr[1] != info->mr[1]) {
++		/* need to keep DLL off until later, at least on GDDR3 */
++		exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
++		exec->wait(exec, tMRD);
++	}
++
++	if (mr[0] != info->mr[0]) {
++		exec->mrs (exec, 0, info->mr[0]);
++		exec->wait(exec, tMRD);
++	}
+ 
+-		timing->id = i;
+-		timing->WR = entry[0];
+-		timing->CL = entry[2];
++	/* update PFB timing registers */
++	exec->timing_set(exec);
+ 
+-		if(dev_priv->card_type <= NV_40) {
+-			nv40_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]);
+-		} else if(dev_priv->card_type == NV_50){
+-			nv50_mem_timing_entry(dev,&P,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]);
+-		} else if(dev_priv->card_type == NV_C0) {
+-			nvc0_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,&pm->memtimings.timing[i]);
++	/* DLL (enable + ) reset */
++	if (!(info->mr[1] & mr1_dlloff)) {
++		if (mr[1] & mr1_dlloff) {
++			exec->mrs (exec, 1, info->mr[1]);
++			exec->wait(exec, tMRD);
+ 		}
++		exec->mrs (exec, 0, info->mr[0] | 0x00000100);
++		exec->wait(exec, tMRD);
++		exec->mrs (exec, 0, info->mr[0] | 0x00000000);
++		exec->wait(exec, tMRD);
++		exec->wait(exec, tDLLK);
++		if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3)
++			exec->precharge(exec);
+ 	}
+ 
+-	memtimings->nr_timing = hdr->entry_cnt;
+-	memtimings->supported = P.version == 1;
++	return 0;
+ }
+ 
+-void
+-nouveau_mem_timing_fini(struct drm_device *dev)
++int
++nouveau_mem_vbios_type(struct drm_device *dev)
+ {
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
++	struct bit_entry M;
++	u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
++	if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
++		u8 *table = ROMPTR(dev, M.data[3]);
++		if (table && table[0] == 0x10 && ramcfg < table[3]) {
++			u8 *entry = table + table[1] + (ramcfg * table[2]);
++			switch (entry[0] & 0x0f) {
++			case 0: return NV_MEM_TYPE_DDR2;
++			case 1: return NV_MEM_TYPE_DDR3;
++			case 2: return NV_MEM_TYPE_GDDR3;
++			case 3: return NV_MEM_TYPE_GDDR5;
++			default:
++				break;
++			}
+ 
+-	if(mem->timing) {
+-		kfree(mem->timing);
+-		mem->timing = NULL;
++		}
+ 	}
++	return NV_MEM_TYPE_UNKNOWN;
+ }
+ 
+ static int
+diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
+new file mode 100644
+index 0000000..07d0d1e
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_mxm.c
+@@ -0,0 +1,723 @@
++/*
++ * Copyright 2011 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include <linux/acpi.h>
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++
++#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
++#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
++
++static u8 *
++mxms_data(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	return dev_priv->mxms;
++
++}
++
++static u16
++mxms_version(struct drm_device *dev)
++{
++	u8 *mxms = mxms_data(dev);
++	u16 version = (mxms[4] << 8) | mxms[5];
++	switch (version ) {
++	case 0x0200:
++	case 0x0201:
++	case 0x0300:
++		return version;
++	default:
++		break;
++	}
++
++	MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
++	return 0x0000;
++}
++
++static u16
++mxms_headerlen(struct drm_device *dev)
++{
++	return 8;
++}
++
++static u16
++mxms_structlen(struct drm_device *dev)
++{
++	return *(u16 *)&mxms_data(dev)[6];
++}
++
++static bool
++mxms_checksum(struct drm_device *dev)
++{
++	u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
++	u8 *mxms = mxms_data(dev), sum = 0;
++	while (size--)
++		sum += *mxms++;
++	if (sum) {
++		MXM_DBG(dev, "checksum invalid\n");
++		return false;
++	}
++	return true;
++}
++
++static bool
++mxms_valid(struct drm_device *dev)
++{
++	u8 *mxms = mxms_data(dev);
++	if (*(u32 *)mxms != 0x5f4d584d) {
++		MXM_DBG(dev, "signature invalid\n");
++		return false;
++	}
++
++	if (!mxms_version(dev) || !mxms_checksum(dev))
++		return false;
++
++	return true;
++}
++
++static bool
++mxms_foreach(struct drm_device *dev, u8 types,
++	     bool (*exec)(struct drm_device *, u8 *, void *), void *info)
++{
++	u8 *mxms = mxms_data(dev);
++	u8 *desc = mxms + mxms_headerlen(dev);
++	u8 *fini = desc + mxms_structlen(dev) - 1;
++	while (desc < fini) {
++		u8 type = desc[0] & 0x0f;
++		u8 headerlen = 0;
++		u8 recordlen = 0;
++		u8 entries = 0;
++
++		switch (type) {
++		case 0: /* Output Device Structure */
++			if (mxms_version(dev) >= 0x0300)
++				headerlen = 8;
++			else
++				headerlen = 6;
++			break;
++		case 1: /* System Cooling Capability Structure */
++		case 2: /* Thermal Structure */
++		case 3: /* Input Power Structure */
++			headerlen = 4;
++			break;
++		case 4: /* GPIO Device Structure */
++			headerlen = 4;
++			recordlen = 2;
++			entries   = (ROM32(desc[0]) & 0x01f00000) >> 20;
++			break;
++		case 5: /* Vendor Specific Structure */
++			headerlen = 8;
++			break;
++		case 6: /* Backlight Control Structure */
++			if (mxms_version(dev) >= 0x0300) {
++				headerlen = 4;
++				recordlen = 8;
++				entries   = (desc[1] & 0xf0) >> 4;
++			} else {
++				headerlen = 8;
++			}
++			break;
++		case 7: /* Fan Control Structure */
++			headerlen = 8;
++			recordlen = 4;
++			entries   = desc[1] & 0x07;
++			break;
++		default:
++			MXM_DBG(dev, "unknown descriptor type %d\n", type);
++			return false;
++		}
++
++		if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
++			static const char * mxms_desc_name[] = {
++				"ODS", "SCCS", "TS", "IPS",
++				"GSD", "VSS", "BCS", "FCS",
++			};
++			u8 *dump = desc;
++			int i, j;
++
++			MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
++			for (j = headerlen - 1; j >= 0; j--)
++				printk("%02x", dump[j]);
++			printk("\n");
++			dump += headerlen;
++
++			for (i = 0; i < entries; i++, dump += recordlen) {
++				MXM_DBG(dev, "      ");
++				for (j = recordlen - 1; j >= 0; j--)
++					printk("%02x", dump[j]);
++				printk("\n");
++			}
++		}
++
++		if (types & (1 << type)) {
++			if (!exec(dev, desc, info))
++				return false;
++		}
++
++		desc += headerlen + (entries * recordlen);
++	}
++
++	return true;
++}
++
++static u8 *
++mxm_table(struct drm_device *dev, u8 *size)
++{
++	struct bit_entry x;
++
++	if (bit_table(dev, 'x', &x)) {
++		MXM_DBG(dev, "BIT 'x' table not present\n");
++		return NULL;
++	}
++
++	if (x.version != 1 || x.length < 3) {
++		MXM_MSG(dev, "BIT x table %d/%d unknown\n",
++			x.version, x.length);
++		return NULL;
++	}
++
++	*size = x.length;
++	return x.data;
++}
++
++/* These map MXM v2.x digital connection values to the appropriate SOR/link,
++ * hopefully they're correct for all boards within the same chipset...
++ *
++ * MXM v3.x VBIOS are nicer and provide pointers to these tables.
++ */
++static u8 nv84_sor_map[16] = {
++	0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
++	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++};
++
++static u8 nv92_sor_map[16] = {
++	0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
++	0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++};
++
++static u8 nv94_sor_map[16] = {
++	0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
++	0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
++};
++
++static u8 nv96_sor_map[16] = {
++	0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
++	0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
++};
++
++static u8 nv98_sor_map[16] = {
++	0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
++	0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
++};
++
++static u8
++mxm_sor_map(struct drm_device *dev, u8 conn)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u8 len, *mxm = mxm_table(dev, &len);
++	if (mxm && len >= 6) {
++		u8 *map = ROMPTR(dev, mxm[4]);
++		if (map) {
++			if (map[0] == 0x10) {
++				if (conn < map[3])
++					return map[map[1] + conn];
++				return 0x00;
++			}
++
++			MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
++		}
++	}
++
++	if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
++		return nv84_sor_map[conn];
++	if (dev_priv->chipset == 0x92)
++		return nv92_sor_map[conn];
++	if (dev_priv->chipset == 0x94)
++		return nv94_sor_map[conn];
++	if (dev_priv->chipset == 0x96)
++		return nv96_sor_map[conn];
++	if (dev_priv->chipset == 0x98)
++		return nv98_sor_map[conn];
++
++	MXM_MSG(dev, "missing sor map\n");
++	return 0x00;
++}
++
++static u8
++mxm_ddc_map(struct drm_device *dev, u8 port)
++{
++	u8 len, *mxm = mxm_table(dev, &len);
++	if (mxm && len >= 8) {
++		u8 *map = ROMPTR(dev, mxm[6]);
++		if (map) {
++			if (map[0] == 0x10) {
++				if (port < map[3])
++					return map[map[1] + port];
++				return 0x00;
++			}
++
++			MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
++		}
++	}
++
++	/* v2.x: directly write port as dcb i2cidx */
++	return (port << 4) | port;
++}
++
++struct mxms_odev {
++	u8 outp_type;
++	u8 conn_type;
++	u8 ddc_port;
++	u8 dig_conn;
++};
++
++static void
++mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
++{
++	u64 data = ROM32(pdata[0]);
++	if (mxms_version(dev) >= 0x0300)
++		data |= (u64)ROM16(pdata[4]) << 32;
++
++	desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
++	desc->ddc_port  = (data & 0x0000000000000f00ULL) >> 8;
++	desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
++	desc->dig_conn  = (data & 0x0000000000780000ULL) >> 19;
++}
++
++struct context {
++	u32 *outp;
++	struct mxms_odev desc;
++};
++
++static bool
++mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
++{
++	struct context *ctx = info;
++	struct mxms_odev desc;
++
++	mxms_output_device(dev, data, &desc);
++	if (desc.outp_type == 2 &&
++	    desc.dig_conn == ctx->desc.dig_conn)
++		return false;
++	return true;
++}
++
++static bool
++mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
++{
++	struct context *ctx = info;
++	u64 desc = *(u64 *)data;
++
++	mxms_output_device(dev, data, &ctx->desc);
++
++	/* match dcb encoder type to mxm-ods device type */
++	if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
++		return true;
++
++	/* digital output, have some extra stuff to match here, there's a
++	 * table in the vbios that provides a mapping from the mxm digital
++	 * connection enum values to SOR/link
++	 */
++	if ((desc & 0x00000000000000f0) >= 0x20) {
++		/* check against sor index */
++		u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
++		if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
++			return true;
++
++		/* check dcb entry has a compatible link field */
++		link = (link & 0x30) >> 4;
++		if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
++			return true;
++	}
++
++	/* mark this descriptor accounted for by setting invalid device type,
++	 * except of course some manufactures don't follow specs properly and
++	 * we need to avoid killing off the TMDS function on DP connectors
++	 * if MXM-SIS is missing an entry for it.
++	 */
++	data[0] &= ~0xf0;
++	if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
++	    mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
++		data[0] |= 0x20; /* modify descriptor to match TMDS now */
++	} else {
++		data[0] |= 0xf0;
++	}
++
++	return false;
++}
++
++static int
++mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
++{
++	struct context ctx = { .outp = (u32 *)dcbe };
++	u8 type, i2cidx, link;
++	u8 *conn;
++
++	/* look for an output device structure that matches this dcb entry.
++	 * if one isn't found, disable it.
++	 */
++	if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
++		MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
++			idx, ctx.outp[0], ctx.outp[1]);
++		ctx.outp[0] |= 0x0000000f;
++		return 0;
++	}
++
++	/* modify the output's ddc/aux port, there's a pointer to a table
++	 * with the mapping from mxm ddc/aux port to dcb i2c_index in the
++	 * vbios mxm table
++	 */
++	i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
++	if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
++		i2cidx = (i2cidx & 0x0f) << 4;
++	else
++		i2cidx = (i2cidx & 0xf0);
++
++	if (i2cidx != 0xf0) {
++		ctx.outp[0] &= ~0x000000f0;
++		ctx.outp[0] |= i2cidx;
++	}
++
++	/* override dcb sorconf.link, based on what mxm data says */
++	switch (ctx.desc.outp_type) {
++	case 0x00: /* Analog CRT */
++	case 0x01: /* Analog TV/HDTV */
++		break;
++	default:
++		link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
++		ctx.outp[1] &= ~0x00000030;
++		ctx.outp[1] |= link;
++		break;
++	}
++
++	/* we may need to fixup various other vbios tables based on what
++	 * the descriptor says the connector type should be.
++	 *
++	 * in a lot of cases, the vbios tables will claim DVI-I is possible,
++	 * and the mxm data says the connector is really HDMI.  another
++	 * common example is DP->eDP.
++	 */
++	conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
++	type = conn[0];
++	switch (ctx.desc.conn_type) {
++	case 0x01: /* LVDS */
++		ctx.outp[1] |= 0x00000004; /* use_power_scripts */
++		/* XXX: modify default link width in LVDS table */
++		break;
++	case 0x02: /* HDMI */
++		type = DCB_CONNECTOR_HDMI_1;
++		break;
++	case 0x03: /* DVI-D */
++		type = DCB_CONNECTOR_DVI_D;
++		break;
++	case 0x0e: /* eDP, falls through to DPint */
++		ctx.outp[1] |= 0x00010000;
++	case 0x07: /* DP internal, wtf is this?? HP8670w */
++		ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
++		type = DCB_CONNECTOR_eDP;
++		break;
++	default:
++		break;
++	}
++
++	if (mxms_version(dev) >= 0x0300)
++		conn[0] = type;
++
++	return 0;
++}
++
++static bool
++mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
++{
++	u64 desc = *(u64 *)data;
++	if ((desc & 0xf0) != 0xf0)
++		MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
++	return true;
++}
++
++static void
++mxm_dcb_sanitise(struct drm_device *dev)
++{
++	u8 *dcb = dcb_table(dev);
++	if (!dcb || dcb[0] != 0x40) {
++		MXM_DBG(dev, "unsupported DCB version\n");
++		return;
++	}
++
++	dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
++	mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
++}
++
++static bool
++mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
++		     u8 offset, u8 size, u8 *data)
++{
++	struct i2c_msg msgs[] = {
++		{ .addr = addr, .flags = 0, .len = 1, .buf = &offset },
++		{ .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
++	};
++
++	return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
++}
++
++static bool
++mxm_shadow_rom(struct drm_device *dev, u8 version)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_i2c_chan *i2c = NULL;
++	u8 i2cidx, mxms[6], addr, size;
++
++	i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
++	if (i2cidx < 0x0f)
++		i2c = nouveau_i2c_find(dev, i2cidx);
++	if (!i2c)
++		return false;
++
++	addr = 0x54;
++	if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
++		addr = 0x56;
++		if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
++			return false;
++	}
++
++	dev_priv->mxms = mxms;
++	size = mxms_headerlen(dev) + mxms_structlen(dev);
++	dev_priv->mxms = kmalloc(size, GFP_KERNEL);
++
++	if (dev_priv->mxms &&
++	    mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
++		return true;
++
++	kfree(dev_priv->mxms);
++	dev_priv->mxms = NULL;
++	return false;
++}
++
++#if defined(CONFIG_ACPI)
++static bool
++mxm_shadow_dsm(struct drm_device *dev, u8 version)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	static char muid[] = {
++		0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
++		0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
++	};
++	u32 mxms_args[] = { 0x00000000 };
++	union acpi_object args[4] = {
++		/* _DSM MUID */
++		{ .buffer.type = 3,
++		  .buffer.length = sizeof(muid),
++		  .buffer.pointer = muid,
++		},
++		/* spec says this can be zero to mean "highest revision", but
++		 * of course there's at least one bios out there which fails
++		 * unless you pass in exactly the version it supports..
++		 */
++		{ .integer.type = ACPI_TYPE_INTEGER,
++		  .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
++		},
++		/* MXMS function */
++		{ .integer.type = ACPI_TYPE_INTEGER,
++		  .integer.value = 0x00000010,
++		},
++		/* Pointer to MXMS arguments */
++		{ .buffer.type = ACPI_TYPE_BUFFER,
++		  .buffer.length = sizeof(mxms_args),
++		  .buffer.pointer = (char *)mxms_args,
++		},
++	};
++	struct acpi_object_list list = { ARRAY_SIZE(args), args };
++	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
++	union acpi_object *obj;
++	acpi_handle handle;
++	int ret;
++
++	handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
++	if (!handle)
++		return false;
++
++	ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
++	if (ret) {
++		MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
++		return false;
++	}
++
++	obj = retn.pointer;
++	if (obj->type == ACPI_TYPE_BUFFER) {
++		dev_priv->mxms = kmemdup(obj->buffer.pointer,
++					 obj->buffer.length, GFP_KERNEL);
++	} else
++	if (obj->type == ACPI_TYPE_INTEGER) {
++		MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
++	}
++
++	kfree(obj);
++	return dev_priv->mxms != NULL;
++}
++#endif
++
++#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
++
++#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
++
++static u8
++wmi_wmmx_mxmi(struct drm_device *dev, u8 version)
++{
++	u32 mxmi_args[] = { 0x494D584D /* MXMI */, version, 0 };
++	struct acpi_buffer args = { sizeof(mxmi_args), mxmi_args };
++	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
++	union acpi_object *obj;
++	acpi_status status;
++
++	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
++	if (ACPI_FAILURE(status)) {
++		MXM_DBG(dev, "WMMX MXMI returned %d\n", status);
++		return 0x00;
++	}
++
++	obj = retn.pointer;
++	if (obj->type == ACPI_TYPE_INTEGER) {
++		version = obj->integer.value;
++		MXM_DBG(dev, "WMMX MXMI version %d.%d\n",
++			     (version >> 4), version & 0x0f);
++	} else {
++		version = 0;
++		MXM_DBG(dev, "WMMX MXMI returned non-integer\n");
++	}
++
++	kfree(obj);
++	return version;
++}
++
++static bool
++mxm_shadow_wmi(struct drm_device *dev, u8 version)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
++	struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
++	struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
++	union acpi_object *obj;
++	acpi_status status;
++
++	if (!wmi_has_guid(WMI_WMMX_GUID)) {
++		MXM_DBG(dev, "WMMX GUID not found\n");
++		return false;
++	}
++
++	mxms_args[1] = wmi_wmmx_mxmi(dev, 0x00);
++	if (!mxms_args[1])
++		mxms_args[1] = wmi_wmmx_mxmi(dev, version);
++	if (!mxms_args[1])
++		return false;
++
++	status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
++	if (ACPI_FAILURE(status)) {
++		MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
++		return false;
++	}
++
++	obj = retn.pointer;
++	if (obj->type == ACPI_TYPE_BUFFER) {
++		dev_priv->mxms = kmemdup(obj->buffer.pointer,
++					 obj->buffer.length, GFP_KERNEL);
++	}
++
++	kfree(obj);
++	return dev_priv->mxms != NULL;
++}
++#endif
++
++struct mxm_shadow_h {
++	const char *name;
++	bool (*exec)(struct drm_device *, u8 version);
++} _mxm_shadow[] = {
++	{ "ROM", mxm_shadow_rom },
++#if defined(CONFIG_ACPI)
++	{ "DSM", mxm_shadow_dsm },
++#endif
++#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
++	{ "WMI", mxm_shadow_wmi },
++#endif
++	{}
++};
++
++static int
++mxm_shadow(struct drm_device *dev, u8 version)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct mxm_shadow_h *shadow = _mxm_shadow;
++	do {
++		MXM_DBG(dev, "checking %s\n", shadow->name);
++		if (shadow->exec(dev, version)) {
++			if (mxms_valid(dev))
++				return 0;
++			kfree(dev_priv->mxms);
++			dev_priv->mxms = NULL;
++		}
++	} while ((++shadow)->name);
++	return -ENOENT;
++}
++
++int
++nouveau_mxm_init(struct drm_device *dev)
++{
++	u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
++	if (!mxm || !mxm[0]) {
++		MXM_MSG(dev, "no VBIOS data, nothing to do\n");
++		return 0;
++	}
++
++	MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
++
++	if (mxm_shadow(dev, mxm[0])) {
++		MXM_MSG(dev, "failed to locate valid SIS\n");
++#if 0
++		/* we should, perhaps, fall back to some kind of limited
++		 * mode here if the x86 vbios hasn't already done the
++		 * work for us (so we prevent loading with completely
++		 * whacked vbios tables).
++		 */
++		return -EINVAL;
++#else
++		return 0;
++#endif
++	}
++
++	MXM_MSG(dev, "MXMS Version %d.%d\n",
++		mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
++	mxms_foreach(dev, 0, NULL, NULL);
++
++	if (nouveau_mxmdcb)
++		mxm_dcb_sanitise(dev);
++	return 0;
++}
++
++void
++nouveau_mxm_fini(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	kfree(dev_priv->mxms);
++	dev_priv->mxms = NULL;
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
+index 6abdbe6..2ef883c 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
++++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
+@@ -115,7 +115,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_gpuobj *nobj = NULL;
+ 	struct drm_mm_node *mem;
+-	uint32_t offset;
++	uint64_t offset;
+ 	int target, ret;
+ 
+ 	mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
+index 960c0ae..cc419fae 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_object.c
++++ b/drivers/gpu/drm/nouveau/nouveau_object.c
+@@ -723,14 +723,14 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
+ 	nv_wo32(chan->ramin, 0x020c, 0x000000ff);
+ 
+ 	/* map display semaphore buffers into channel's vm */
+-	if (dev_priv->card_type >= NV_D0)
+-		return 0;
+-
+-	for (i = 0; i < 2; i++) {
+-		struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
+-
+-		ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
+-					 &chan->dispc_vma[i]);
++	for (i = 0; i < dev->mode_config.num_crtc; i++) {
++		struct nouveau_bo *bo;
++		if (dev_priv->card_type >= NV_D0)
++			bo = nvd0_display_crtc_sema(dev, i);
++		else
++			bo = nv50_display(dev)->crtc[i].sem.bo;
++
++		ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -879,9 +879,14 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
+ 
+ 	NV_DEBUG(dev, "ch%d\n", chan->id);
+ 
+-	if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) {
++	if (dev_priv->card_type >= NV_D0) {
++		for (i = 0; i < dev->mode_config.num_crtc; i++) {
++			struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
++			nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
++		}
++	} else
++	if (dev_priv->card_type >= NV_50) {
+ 		struct nv50_display *disp = nv50_display(dev);
+-
+ 		for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ 			struct nv50_display_crtc *dispc = &disp->crtc[i];
+ 			nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
+index 33d03fb..69a528d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
++++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
+@@ -27,6 +27,178 @@
+ #include "nouveau_drv.h"
+ #include "nouveau_pm.h"
+ 
++static u8 *
++nouveau_perf_table(struct drm_device *dev, u8 *ver)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->vbios;
++	struct bit_entry P;
++
++	if (!bit_table(dev, 'P', &P) && P.version && P.version <= 2) {
++		u8 *perf = ROMPTR(dev, P.data[0]);
++		if (perf) {
++			*ver = perf[0];
++			return perf;
++		}
++	}
++
++	if (bios->type == NVBIOS_BMP) {
++		if (bios->data[bios->offset + 6] >= 0x25) {
++			u8 *perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
++			if (perf) {
++				*ver = perf[1];
++				return perf;
++			}
++		}
++	}
++
++	return NULL;
++}
++
++static u8 *
++nouveau_perf_entry(struct drm_device *dev, int idx,
++		   u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
++{
++	u8 *perf = nouveau_perf_table(dev, ver);
++	if (perf) {
++		if (*ver >= 0x12 && *ver < 0x20 && idx < perf[2]) {
++			*hdr = perf[3];
++			*cnt = 0;
++			*len = 0;
++			return perf + perf[0] + idx * perf[3];
++		} else
++		if (*ver >= 0x20 && *ver < 0x40 && idx < perf[2]) {
++			*hdr = perf[3];
++			*cnt = perf[4];
++			*len = perf[5];
++			return perf + perf[1] + idx * (*hdr + (*cnt * *len));
++		} else
++		if (*ver >= 0x40 && *ver < 0x41 && idx < perf[5]) {
++			*hdr = perf[2];
++			*cnt = perf[4];
++			*len = perf[3];
++			return perf + perf[1] + idx * (*hdr + (*cnt * *len));
++		}
++	}
++	return NULL;
++}
++
++static u8 *
++nouveau_perf_rammap(struct drm_device *dev, u32 freq,
++		    u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct bit_entry P;
++	u8 *perf, i = 0;
++
++	if (!bit_table(dev, 'P', &P) && P.version == 2) {
++		u8 *rammap = ROMPTR(dev, P.data[4]);
++		if (rammap) {
++			u8 *ramcfg = rammap + rammap[1];
++
++			*ver = rammap[0];
++			*hdr = rammap[2];
++			*cnt = rammap[4];
++			*len = rammap[3];
++
++			freq /= 1000;
++			for (i = 0; i < rammap[5]; i++) {
++				if (freq >= ROM16(ramcfg[0]) &&
++				    freq <= ROM16(ramcfg[2]))
++					return ramcfg;
++
++				ramcfg += *hdr + (*cnt * *len);
++			}
++		}
++
++		return NULL;
++	}
++
++	if (dev_priv->chipset == 0x49 ||
++	    dev_priv->chipset == 0x4b)
++		freq /= 2;
++
++	while ((perf = nouveau_perf_entry(dev, i++, ver, hdr, cnt, len))) {
++		if (*ver >= 0x20 && *ver < 0x25) {
++			if (perf[0] != 0xff && freq <= ROM16(perf[11]) * 1000)
++				break;
++		} else
++		if (*ver >= 0x25 && *ver < 0x40) {
++			if (perf[0] != 0xff && freq <= ROM16(perf[12]) * 1000)
++				break;
++		}
++	}
++
++	if (perf) {
++		u8 *ramcfg = perf + *hdr;
++		*ver = 0x00;
++		*hdr = 0;
++		return ramcfg;
++	}
++
++	return NULL;
++}
++
++u8 *
++nouveau_perf_ramcfg(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->vbios;
++	u8 strap, hdr, cnt;
++	u8 *rammap;
++
++	strap = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
++	if (bios->ram_restrict_tbl_ptr)
++		strap = bios->data[bios->ram_restrict_tbl_ptr + strap];
++
++	rammap = nouveau_perf_rammap(dev, freq, ver, &hdr, &cnt, len);
++	if (rammap && strap < cnt)
++		return rammap + hdr + (strap * *len);
++
++	return NULL;
++}
++
++u8 *
++nouveau_perf_timing(struct drm_device *dev, u32 freq, u8 *ver, u8 *len)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->vbios;
++	struct bit_entry P;
++	u8 *perf, *timing = NULL;
++	u8 i = 0, hdr, cnt;
++
++	if (bios->type == NVBIOS_BMP) {
++		while ((perf = nouveau_perf_entry(dev, i++, ver, &hdr, &cnt,
++						  len)) && *ver == 0x15) {
++			if (freq <= ROM32(perf[5]) * 20) {
++				*ver = 0x00;
++				*len = 14;
++				return perf + 41;
++			}
++		}
++		return NULL;
++	}
++
++	if (!bit_table(dev, 'P', &P)) {
++		if (P.version == 1)
++			timing = ROMPTR(dev, P.data[4]);
++		else
++		if (P.version == 2)
++			timing = ROMPTR(dev, P.data[8]);
++	}
++
++	if (timing && timing[0] == 0x10) {
++		u8 *ramcfg = nouveau_perf_ramcfg(dev, freq, ver, len);
++		if (ramcfg && ramcfg[1] < timing[2]) {
++			*ver = timing[0];
++			*len = timing[3];
++			return timing + timing[1] + (ramcfg[1] * timing[3]);
++		}
++	}
++
++	return NULL;
++}
++
+ static void
+ legacy_perf_init(struct drm_device *dev)
+ {
+@@ -41,7 +213,7 @@ legacy_perf_init(struct drm_device *dev)
+ 		return;
+ 	}
+ 
+-	perf = ROMPTR(bios, bmp[0x73]);
++	perf = ROMPTR(dev, bmp[0x73]);
+ 	if (!perf) {
+ 		NV_DEBUG(dev, "No memclock table pointer found.\n");
+ 		return;
+@@ -72,75 +244,11 @@ legacy_perf_init(struct drm_device *dev)
+ 	pm->nr_perflvl = 1;
+ }
+ 
+-static struct nouveau_pm_memtiming *
+-nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
+-		    u16 memclk, u8 *entry, u8 recordlen, u8 entries)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+-	struct nvbios *bios = &dev_priv->vbios;
+-	u8 ramcfg;
+-	int i;
+-
+-	/* perf v2 has a separate "timing map" table, we have to match
+-	 * the target memory clock to a specific entry, *then* use
+-	 * ramcfg to select the correct subentry
+-	 */
+-	if (P->version == 2) {
+-		u8 *tmap = ROMPTR(bios, P->data[4]);
+-		if (!tmap) {
+-			NV_DEBUG(dev, "no timing map pointer\n");
+-			return NULL;
+-		}
+-
+-		if (tmap[0] != 0x10) {
+-			NV_WARN(dev, "timing map 0x%02x unknown\n", tmap[0]);
+-			return NULL;
+-		}
+-
+-		entry = tmap + tmap[1];
+-		recordlen = tmap[2] + (tmap[4] * tmap[3]);
+-		for (i = 0; i < tmap[5]; i++, entry += recordlen) {
+-			if (memclk >= ROM16(entry[0]) &&
+-			    memclk <= ROM16(entry[2]))
+-				break;
+-		}
+-
+-		if (i == tmap[5]) {
+-			NV_WARN(dev, "no match in timing map table\n");
+-			return NULL;
+-		}
+-
+-		entry += tmap[2];
+-		recordlen = tmap[3];
+-		entries   = tmap[4];
+-	}
+-
+-	ramcfg = (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
+-	if (bios->ram_restrict_tbl_ptr)
+-		ramcfg = bios->data[bios->ram_restrict_tbl_ptr + ramcfg];
+-
+-	if (ramcfg >= entries) {
+-		NV_WARN(dev, "ramcfg strap out of bounds!\n");
+-		return NULL;
+-	}
+-
+-	entry += ramcfg * recordlen;
+-	if (entry[1] >= pm->memtimings.nr_timing) {
+-		if (entry[1] != 0xff)
+-			NV_WARN(dev, "timingset %d does not exist\n", entry[1]);
+-		return NULL;
+-	}
+-
+-	return &pm->memtimings.timing[entry[1]];
+-}
+-
+ static void
+-nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
+-		     struct nouveau_pm_level *perflvl)
++nouveau_perf_voltage(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nvbios *bios = &dev_priv->vbios;
++	struct bit_entry P;
+ 	u8 *vmap;
+ 	int id;
+ 
+@@ -159,13 +267,13 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
+ 	/* on newer ones, the perflvl stores an index into yet another
+ 	 * vbios table containing a min/max voltage value for the perflvl
+ 	 */
+-	if (P->version != 2 || P->length < 34) {
++	if (bit_table(dev, 'P', &P) || P.version != 2 || P.length < 34) {
+ 		NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n",
+-			 P->version, P->length);
++			 P.version, P.length);
+ 		return;
+ 	}
+ 
+-	vmap = ROMPTR(bios, P->data[32]);
++	vmap = ROMPTR(dev, P.data[32]);
+ 	if (!vmap) {
+ 		NV_DEBUG(dev, "volt map table pointer invalid\n");
+ 		return;
+@@ -184,129 +292,70 @@ nouveau_perf_init(struct drm_device *dev)
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ 	struct nvbios *bios = &dev_priv->vbios;
+-	struct bit_entry P;
+-	struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
+-	struct nouveau_pm_tbl_header mt_hdr;
+-	u8 version, headerlen, recordlen, entries;
+-	u8 *perf, *entry;
+-	int vid, i;
+-
+-	if (bios->type == NVBIOS_BIT) {
+-		if (bit_table(dev, 'P', &P))
+-			return;
+-
+-		if (P.version != 1 && P.version != 2) {
+-			NV_WARN(dev, "unknown perf for BIT P %d\n", P.version);
+-			return;
+-		}
+-
+-		perf = ROMPTR(bios, P.data[0]);
+-		version   = perf[0];
+-		headerlen = perf[1];
+-		if (version < 0x40) {
+-			recordlen = perf[3] + (perf[4] * perf[5]);
+-			entries   = perf[2];
+-		} else {
+-			recordlen = perf[2] + (perf[3] * perf[4]);
+-			entries   = perf[5];
+-		}
+-	} else {
+-		if (bios->data[bios->offset + 6] < 0x25) {
+-			legacy_perf_init(dev);
+-			return;
+-		}
+-
+-		perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+-		if (!perf) {
+-			NV_DEBUG(dev, "perf table pointer invalid\n");
+-			return;
+-		}
+-
+-		version   = perf[1];
+-		headerlen = perf[0];
+-		recordlen = perf[3];
+-		entries   = perf[2];
+-	}
++	u8 *perf, ver, hdr, cnt, len;
++	int ret, vid, i = -1;
+ 
+-	if (entries > NOUVEAU_PM_MAX_LEVEL) {
+-		NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n");
+-		entries = NOUVEAU_PM_MAX_LEVEL;
++	if (bios->type == NVBIOS_BMP && bios->data[bios->offset + 6] < 0x25) {
++		legacy_perf_init(dev);
++		return;
+ 	}
+ 
+-	entry = perf + headerlen;
+-
+-	/* For version 0x15, initialize memtiming table */
+-	if(version == 0x15) {
+-		memtimings->timing =
+-				kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+-		if (!memtimings->timing) {
+-			NV_WARN(dev,"Could not allocate memtiming table\n");
+-			return;
+-		}
++	perf = nouveau_perf_table(dev, &ver);
++	if (ver >= 0x20 && ver < 0x40)
++		pm->fan.pwm_divisor = ROM16(perf[6]);
+ 
+-		mt_hdr.entry_cnt = entries;
+-		mt_hdr.entry_len = 14;
+-		mt_hdr.version = version;
+-		mt_hdr.header_len = 4;
+-	}
+-
+-	for (i = 0; i < entries; i++) {
++	while ((perf = nouveau_perf_entry(dev, ++i, &ver, &hdr, &cnt, &len))) {
+ 		struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
+ 
+-		perflvl->timing = NULL;
+-
+-		if (entry[0] == 0xff) {
+-			entry += recordlen;
++		if (perf[0] == 0xff)
+ 			continue;
+-		}
+ 
+-		switch (version) {
++		switch (ver) {
+ 		case 0x12:
+ 		case 0x13:
+ 		case 0x15:
+-			perflvl->fanspeed = entry[55];
+-			if (recordlen > 56)
+-				perflvl->volt_min = entry[56];
+-			perflvl->core = ROM32(entry[1]) * 10;
+-			perflvl->memory = ROM32(entry[5]) * 20;
++			perflvl->fanspeed = perf[55];
++			if (hdr > 56)
++				perflvl->volt_min = perf[56];
++			perflvl->core = ROM32(perf[1]) * 10;
++			perflvl->memory = ROM32(perf[5]) * 20;
+ 			break;
+ 		case 0x21:
+ 		case 0x23:
+ 		case 0x24:
+-			perflvl->fanspeed = entry[4];
+-			perflvl->volt_min = entry[5];
+-			perflvl->shader = ROM16(entry[6]) * 1000;
++			perflvl->fanspeed = perf[4];
++			perflvl->volt_min = perf[5];
++			perflvl->shader = ROM16(perf[6]) * 1000;
+ 			perflvl->core = perflvl->shader;
+-			perflvl->core += (signed char)entry[8] * 1000;
++			perflvl->core += (signed char)perf[8] * 1000;
+ 			if (dev_priv->chipset == 0x49 ||
+ 			    dev_priv->chipset == 0x4b)
+-				perflvl->memory = ROM16(entry[11]) * 1000;
++				perflvl->memory = ROM16(perf[11]) * 1000;
+ 			else
+-				perflvl->memory = ROM16(entry[11]) * 2000;
+-
++				perflvl->memory = ROM16(perf[11]) * 2000;
+ 			break;
+ 		case 0x25:
+-			perflvl->fanspeed = entry[4];
+-			perflvl->volt_min = entry[5];
+-			perflvl->core = ROM16(entry[6]) * 1000;
+-			perflvl->shader = ROM16(entry[10]) * 1000;
+-			perflvl->memory = ROM16(entry[12]) * 1000;
++			perflvl->fanspeed = perf[4];
++			perflvl->volt_min = perf[5];
++			perflvl->core = ROM16(perf[6]) * 1000;
++			perflvl->shader = ROM16(perf[10]) * 1000;
++			perflvl->memory = ROM16(perf[12]) * 1000;
+ 			break;
+ 		case 0x30:
+-			perflvl->memscript = ROM16(entry[2]);
++			perflvl->memscript = ROM16(perf[2]);
+ 		case 0x35:
+-			perflvl->fanspeed = entry[6];
+-			perflvl->volt_min = entry[7];
+-			perflvl->core = ROM16(entry[8]) * 1000;
+-			perflvl->shader = ROM16(entry[10]) * 1000;
+-			perflvl->memory = ROM16(entry[12]) * 1000;
+-			/*XXX: confirm on 0x35 */
+-			perflvl->unk05 = ROM16(entry[16]) * 1000;
++			perflvl->fanspeed = perf[6];
++			perflvl->volt_min = perf[7];
++			perflvl->core = ROM16(perf[8]) * 1000;
++			perflvl->shader = ROM16(perf[10]) * 1000;
++			perflvl->memory = ROM16(perf[12]) * 1000;
++			perflvl->vdec = ROM16(perf[16]) * 1000;
++			perflvl->dom6 = ROM16(perf[20]) * 1000;
+ 			break;
+ 		case 0x40:
+-#define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
++#define subent(n) ((ROM16(perf[hdr + (n) * len]) & 0xfff) * 1000)
+ 			perflvl->fanspeed = 0; /*XXX*/
+-			perflvl->volt_min = entry[2];
++			perflvl->volt_min = perf[2];
+ 			if (dev_priv->card_type == NV_50) {
+ 				perflvl->core   = subent(0);
+ 				perflvl->shader = subent(1);
+@@ -329,36 +378,34 @@ nouveau_perf_init(struct drm_device *dev)
+ 		}
+ 
+ 		/* make sure vid is valid */
+-		nouveau_perf_voltage(dev, &P, perflvl);
++		nouveau_perf_voltage(dev, perflvl);
+ 		if (pm->voltage.supported && perflvl->volt_min) {
+ 			vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
+ 			if (vid < 0) {
+-				NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
+-				entry += recordlen;
++				NV_DEBUG(dev, "perflvl %d, bad vid\n", i);
+ 				continue;
+ 			}
+ 		}
+ 
+ 		/* get the corresponding memory timings */
+-		if (version == 0x15) {
+-			memtimings->timing[i].id = i;
+-			nv30_mem_timing_entry(dev,&mt_hdr,(struct nouveau_pm_tbl_entry*) &entry[41],0,&memtimings->timing[i]);
+-			perflvl->timing = &memtimings->timing[i];
+-		} else if (version > 0x15) {
+-			/* last 3 args are for < 0x40, ignored for >= 0x40 */
+-			perflvl->timing =
+-				nouveau_perf_timing(dev, &P,
+-						    perflvl->memory / 1000,
+-						    entry + perf[3],
+-						    perf[5], perf[4]);
++		ret = nouveau_mem_timing_calc(dev, perflvl->memory,
++					          &perflvl->timing);
++		if (ret) {
++			NV_DEBUG(dev, "perflvl %d, bad timing: %d\n", i, ret);
++			continue;
+ 		}
+ 
+ 		snprintf(perflvl->name, sizeof(perflvl->name),
+ 			 "performance_level_%d", i);
+ 		perflvl->id = i;
+-		pm->nr_perflvl++;
+ 
+-		entry += recordlen;
++		snprintf(perflvl->profile.name, sizeof(perflvl->profile.name),
++			 "%d", perflvl->id);
++		perflvl->profile.func = &nouveau_pm_static_profile_func;
++		list_add_tail(&perflvl->profile.head, &pm->profiles);
++
++
++		pm->nr_perflvl++;
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
+index a539fd2..da3e7c3 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
+@@ -26,6 +26,7 @@
+ 
+ #include "nouveau_drv.h"
+ #include "nouveau_pm.h"
++#include "nouveau_gpio.h"
+ 
+ #ifdef CONFIG_ACPI
+ #include <linux/acpi.h>
+@@ -35,22 +36,98 @@
+ #include <linux/hwmon-sysfs.h>
+ 
+ static int
+-nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+-		     u8 id, u32 khz)
++nouveau_pwmfan_get(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+-	void *pre_state;
++	struct gpio_func gpio;
++	u32 divs, duty;
++	int ret;
+ 
+-	if (khz == 0)
+-		return 0;
++	if (!pm->pwm_get)
++		return -ENODEV;
++
++	ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
++	if (ret == 0) {
++		ret = pm->pwm_get(dev, gpio.line, &divs, &duty);
++		if (ret == 0 && divs) {
++			divs = max(divs, duty);
++			if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
++				duty = divs - duty;
++			return (duty * 100) / divs;
++		}
++
++		return nouveau_gpio_func_get(dev, gpio.func) * 100;
++	}
++
++	return -ENODEV;
++}
++
++static int
++nouveau_pwmfan_set(struct drm_device *dev, int percent)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++	struct gpio_func gpio;
++	u32 divs, duty;
++	int ret;
++
++	if (!pm->pwm_set)
++		return -ENODEV;
++
++	ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
++	if (ret == 0) {
++		divs = pm->fan.pwm_divisor;
++		if (pm->fan.pwm_freq) {
++			/*XXX: PNVIO clock more than likely... */
++			divs = 135000 / pm->fan.pwm_freq;
++			if (dev_priv->chipset < 0xa3)
++				divs /= 4;
++		}
++
++		duty = ((divs * percent) + 99) / 100;
++		if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
++			duty = divs - duty;
++
++		ret = pm->pwm_set(dev, gpio.line, divs, duty);
++		if (!ret)
++			pm->fan.percent = percent;
++		return ret;
++	}
++
++	return -ENODEV;
++}
++
++static int
++nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
++		       struct nouveau_pm_level *a, struct nouveau_pm_level *b)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++	int ret;
+ 
+-	pre_state = pm->clock_pre(dev, perflvl, id, khz);
+-	if (IS_ERR(pre_state))
+-		return PTR_ERR(pre_state);
++	/*XXX: not on all boards, we should control based on temperature
++	 *     on recent boards..  or maybe on some other factor we don't
++	 *     know about?
++	 */
++	if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
++		ret = nouveau_pwmfan_set(dev, perflvl->fanspeed);
++		if (ret && ret != -ENODEV) {
++			NV_ERROR(dev, "fanspeed set failed: %d\n", ret);
++			return ret;
++		}
++	}
++
++	if (pm->voltage.supported && pm->voltage_set) {
++		if (perflvl->volt_min && b->volt_min > a->volt_min) {
++			ret = pm->voltage_set(dev, perflvl->volt_min);
++			if (ret) {
++				NV_ERROR(dev, "voltage set failed: %d\n", ret);
++				return ret;
++			}
++		}
++	}
+ 
+-	if (pre_state)
+-		pm->clock_set(dev, pre_state);
+ 	return 0;
+ }
+ 
+@@ -59,34 +136,90 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++	void *state;
+ 	int ret;
+ 
+ 	if (perflvl == pm->cur)
+ 		return 0;
+ 
+-	if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) {
+-		ret = pm->voltage_set(dev, perflvl->volt_min);
+-		if (ret) {
+-			NV_ERROR(dev, "voltage_set %d failed: %d\n",
+-				 perflvl->volt_min, ret);
+-		}
+-	}
++	ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
++	if (ret)
++		return ret;
+ 
+-	if (pm->clocks_pre) {
+-		void *state = pm->clocks_pre(dev, perflvl);
+-		if (IS_ERR(state))
+-			return PTR_ERR(state);
+-		pm->clocks_set(dev, state);
+-	} else
+-	if (pm->clock_set) {
+-		nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
+-		nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
+-		nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
+-		nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
++	state = pm->clocks_pre(dev, perflvl);
++	if (IS_ERR(state)) {
++		ret = PTR_ERR(state);
++		goto error;
+ 	}
++	ret = pm->clocks_set(dev, state);
++	if (ret)
++		goto error;
++
++	ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
++	if (ret)
++		return ret;
+ 
+ 	pm->cur = perflvl;
+ 	return 0;
++
++error:
++	/* restore the fan speed and voltage before leaving */
++	nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
++	return ret;
++}
++
++void
++nouveau_pm_trigger(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++	struct nouveau_pm_profile *profile = NULL;
++	struct nouveau_pm_level *perflvl = NULL;
++	int ret;
++
++	/* select power profile based on current power source */
++	if (power_supply_is_system_supplied())
++		profile = pm->profile_ac;
++	else
++		profile = pm->profile_dc;
++
++	if (profile != pm->profile) {
++		pm->profile->func->fini(pm->profile);
++		pm->profile = profile;
++		pm->profile->func->init(pm->profile);
++	}
++
++	/* select performance level based on profile */
++	perflvl = profile->func->select(profile);
++
++	/* change perflvl, if necessary */
++	if (perflvl != pm->cur) {
++		struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
++		u64 time0 = ptimer->read(dev);
++
++		NV_INFO(dev, "setting performance level: %d", perflvl->id);
++		ret = nouveau_pm_perflvl_set(dev, perflvl);
++		if (ret)
++			NV_INFO(dev, "> reclocking failed: %d\n\n", ret);
++
++		NV_INFO(dev, "> reclocking took %lluns\n\n",
++			     ptimer->read(dev) - time0);
++	}
++}
++
++static struct nouveau_pm_profile *
++profile_find(struct drm_device *dev, const char *string)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++	struct nouveau_pm_profile *profile;
++
++	list_for_each_entry(profile, &pm->profiles, head) {
++		if (!strncmp(profile->name, string, sizeof(profile->name)))
++			return profile;
++	}
++
++	return NULL;
+ }
+ 
+ static int
+@@ -94,33 +227,55 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+-	struct nouveau_pm_level *perflvl = NULL;
++	struct nouveau_pm_profile *ac = NULL, *dc = NULL;
++	char string[16], *cur = string, *ptr;
+ 
+ 	/* safety precaution, for now */
+ 	if (nouveau_perflvl_wr != 7777)
+ 		return -EPERM;
+ 
+-	if (!strncmp(profile, "boot", 4))
+-		perflvl = &pm->boot;
+-	else {
+-		int pl = simple_strtol(profile, NULL, 10);
+-		int i;
++	strncpy(string, profile, sizeof(string));
++	string[sizeof(string) - 1] = 0;
++	if ((ptr = strchr(string, '\n')))
++		*ptr = '\0';
+ 
+-		for (i = 0; i < pm->nr_perflvl; i++) {
+-			if (pm->perflvl[i].id == pl) {
+-				perflvl = &pm->perflvl[i];
+-				break;
+-			}
+-		}
++	ptr = strsep(&cur, ",");
++	if (ptr)
++		ac = profile_find(dev, ptr);
+ 
+-		if (!perflvl)
+-			return -EINVAL;
+-	}
++	ptr = strsep(&cur, ",");
++	if (ptr)
++		dc = profile_find(dev, ptr);
++	else
++		dc = ac;
++
++	if (ac == NULL || dc == NULL)
++		return -EINVAL;
+ 
+-	NV_INFO(dev, "setting performance level: %s\n", profile);
+-	return nouveau_pm_perflvl_set(dev, perflvl);
++	pm->profile_ac = ac;
++	pm->profile_dc = dc;
++	nouveau_pm_trigger(dev);
++	return 0;
+ }
+ 
++static void
++nouveau_pm_static_dummy(struct nouveau_pm_profile *profile)
++{
++}
++
++static struct nouveau_pm_level *
++nouveau_pm_static_select(struct nouveau_pm_profile *profile)
++{
++	return container_of(profile, struct nouveau_pm_level, profile);
++}
++
++const struct nouveau_pm_profile_func nouveau_pm_static_profile_func = {
++	.destroy = nouveau_pm_static_dummy,
++	.init = nouveau_pm_static_dummy,
++	.fini = nouveau_pm_static_dummy,
++	.select = nouveau_pm_static_select,
++};
++
+ static int
+ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+ {
+@@ -134,23 +289,6 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+ 		ret = pm->clocks_get(dev, perflvl);
+ 		if (ret)
+ 			return ret;
+-	} else
+-	if (pm->clock_get) {
+-		ret = pm->clock_get(dev, PLL_CORE);
+-		if (ret > 0)
+-			perflvl->core = ret;
+-
+-		ret = pm->clock_get(dev, PLL_MEMORY);
+-		if (ret > 0)
+-			perflvl->memory = ret;
+-
+-		ret = pm->clock_get(dev, PLL_SHADER);
+-		if (ret > 0)
+-			perflvl->shader = ret;
+-
+-		ret = pm->clock_get(dev, PLL_UNK05);
+-		if (ret > 0)
+-			perflvl->unk05 = ret;
+ 	}
+ 
+ 	if (pm->voltage.supported && pm->voltage_get) {
+@@ -161,13 +299,18 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+ 		}
+ 	}
+ 
++	ret = nouveau_pwmfan_get(dev);
++	if (ret > 0)
++		perflvl->fanspeed = ret;
++
++	nouveau_mem_timing_read(dev, &perflvl->timing);
+ 	return 0;
+ }
+ 
+ static void
+ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
+ {
+-	char c[16], s[16], v[32], f[16], t[16], m[16];
++	char c[16], s[16], v[32], f[16], m[16];
+ 
+ 	c[0] = '\0';
+ 	if (perflvl->core)
+@@ -195,18 +338,15 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
+ 	if (perflvl->fanspeed)
+ 		snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
+ 
+-	t[0] = '\0';
+-	if (perflvl->timing)
+-		snprintf(t, sizeof(t), " timing %d", perflvl->timing->id);
+-
+-	snprintf(ptr, len, "%s%s%s%s%s%s\n", c, s, m, t, v, f);
++	snprintf(ptr, len, "%s%s%s%s%s\n", c, s, m, v, f);
+ }
+ 
+ static ssize_t
+ nouveau_pm_get_perflvl_info(struct device *d,
+ 			    struct device_attribute *a, char *buf)
+ {
+-	struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a;
++	struct nouveau_pm_level *perflvl =
++		container_of(a, struct nouveau_pm_level, dev_attr);
+ 	char *ptr = buf;
+ 	int len = PAGE_SIZE;
+ 
+@@ -228,12 +368,8 @@ nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
+ 	int len = PAGE_SIZE, ret;
+ 	char *ptr = buf;
+ 
+-	if (!pm->cur)
+-		snprintf(ptr, len, "setting: boot\n");
+-	else if (pm->cur == &pm->boot)
+-		snprintf(ptr, len, "setting: boot\nc:");
+-	else
+-		snprintf(ptr, len, "setting: static %d\nc:", pm->cur->id);
++	snprintf(ptr, len, "profile: %s, %s\nc:",
++		 pm->profile_ac->name, pm->profile_dc->name);
+ 	ptr += strlen(buf);
+ 	len -= strlen(buf);
+ 
+@@ -345,7 +481,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
+ 	struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+ 	long value;
+ 
+-	if (strict_strtol(buf, 10, &value) == -EINVAL)
++	if (kstrtol(buf, 10, &value) == -EINVAL)
+ 		return count;
+ 
+ 	temp->down_clock = value/1000;
+@@ -380,7 +516,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
+ 	struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+ 	long value;
+ 
+-	if (strict_strtol(buf, 10, &value) == -EINVAL)
++	if (kstrtol(buf, 10, &value) == -EINVAL)
+ 		return count;
+ 
+ 	temp->critical = value/1000;
+@@ -412,6 +548,172 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
+ 						nouveau_hwmon_show_update_rate,
+ 						NULL, 0);
+ 
++static ssize_t
++nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
++			      char *buf)
++{
++	struct drm_device *dev = dev_get_drvdata(d);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
++	struct gpio_func gpio;
++	u32 cycles, cur, prev;
++	u64 start;
++	int ret;
++
++	ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio);
++	if (ret)
++		return ret;
++
++	/* Monitor the GPIO input 0x3b for 250ms.
++	 * When the fan spins, it changes the value of GPIO FAN_SENSE.
++	 * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
++	 */
++	start = ptimer->read(dev);
++	prev = nouveau_gpio_sense(dev, 0, gpio.line);
++	cycles = 0;
++	do {
++		cur = nouveau_gpio_sense(dev, 0, gpio.line);
++		if (prev != cur) {
++			cycles++;
++			prev = cur;
++		}
++
++		usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
++	} while (ptimer->read(dev) - start < 250000000);
++
++	/* interpolate to get rpm */
++	return sprintf(buf, "%i\n", cycles / 4 * 4 * 60);
++}
++static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
++			  NULL, 0);
++
++static ssize_t
++nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
++{
++	struct drm_device *dev = dev_get_drvdata(d);
++	int ret;
++
++	ret = nouveau_pwmfan_get(dev);
++	if (ret < 0)
++		return ret;
++
++	return sprintf(buf, "%i\n", ret);
++}
++
++static ssize_t
++nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
++		       const char *buf, size_t count)
++{
++	struct drm_device *dev = dev_get_drvdata(d);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++	int ret = -ENODEV;
++	long value;
++
++	if (nouveau_perflvl_wr != 7777)
++		return -EPERM;
++
++	if (kstrtol(buf, 10, &value) == -EINVAL)
++		return -EINVAL;
++
++	if (value < pm->fan.min_duty)
++		value = pm->fan.min_duty;
++	if (value > pm->fan.max_duty)
++		value = pm->fan.max_duty;
++
++	ret = nouveau_pwmfan_set(dev, value);
++	if (ret)
++		return ret;
++
++	return count;
++}
++
++static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR,
++			  nouveau_hwmon_get_pwm0,
++			  nouveau_hwmon_set_pwm0, 0);
++
++static ssize_t
++nouveau_hwmon_get_pwm0_min(struct device *d,
++			   struct device_attribute *a, char *buf)
++{
++	struct drm_device *dev = dev_get_drvdata(d);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++
++	return sprintf(buf, "%i\n", pm->fan.min_duty);
++}
++
++static ssize_t
++nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a,
++			   const char *buf, size_t count)
++{
++	struct drm_device *dev = dev_get_drvdata(d);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++	long value;
++
++	if (kstrtol(buf, 10, &value) == -EINVAL)
++		return -EINVAL;
++
++	if (value < 0)
++		value = 0;
++
++	if (pm->fan.max_duty - value < 10)
++		value = pm->fan.max_duty - 10;
++
++	if (value < 10)
++		pm->fan.min_duty = 10;
++	else
++		pm->fan.min_duty = value;
++
++	return count;
++}
++
++static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR,
++			  nouveau_hwmon_get_pwm0_min,
++			  nouveau_hwmon_set_pwm0_min, 0);
++
++static ssize_t
++nouveau_hwmon_get_pwm0_max(struct device *d,
++			   struct device_attribute *a, char *buf)
++{
++	struct drm_device *dev = dev_get_drvdata(d);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++
++	return sprintf(buf, "%i\n", pm->fan.max_duty);
++}
++
++static ssize_t
++nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a,
++			   const char *buf, size_t count)
++{
++	struct drm_device *dev = dev_get_drvdata(d);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++	long value;
++
++	if (kstrtol(buf, 10, &value) == -EINVAL)
++		return -EINVAL;
++
++	if (value < 0)
++		value = 0;
++
++	if (value - pm->fan.min_duty < 10)
++		value = pm->fan.min_duty + 10;
++
++	if (value > 100)
++		pm->fan.max_duty = 100;
++	else
++		pm->fan.max_duty = value;
++
++	return count;
++}
++
++static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR,
++			  nouveau_hwmon_get_pwm0_max,
++			  nouveau_hwmon_set_pwm0_max, 0);
++
+ static struct attribute *hwmon_attributes[] = {
+ 	&sensor_dev_attr_temp1_input.dev_attr.attr,
+ 	&sensor_dev_attr_temp1_max.dev_attr.attr,
+@@ -420,20 +722,36 @@ static struct attribute *hwmon_attributes[] = {
+ 	&sensor_dev_attr_update_rate.dev_attr.attr,
+ 	NULL
+ };
++static struct attribute *hwmon_fan_rpm_attributes[] = {
++	&sensor_dev_attr_fan0_input.dev_attr.attr,
++	NULL
++};
++static struct attribute *hwmon_pwm_fan_attributes[] = {
++	&sensor_dev_attr_pwm0.dev_attr.attr,
++	&sensor_dev_attr_pwm0_min.dev_attr.attr,
++	&sensor_dev_attr_pwm0_max.dev_attr.attr,
++	NULL
++};
+ 
+ static const struct attribute_group hwmon_attrgroup = {
+ 	.attrs = hwmon_attributes,
+ };
++static const struct attribute_group hwmon_fan_rpm_attrgroup = {
++	.attrs = hwmon_fan_rpm_attributes,
++};
++static const struct attribute_group hwmon_pwm_fan_attrgroup = {
++	.attrs = hwmon_pwm_fan_attributes,
++};
+ #endif
+ 
+ static int
+ nouveau_hwmon_init(struct drm_device *dev)
+ {
+-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
+ 	struct device *hwmon_dev;
+-	int ret;
++	int ret = 0;
+ 
+ 	if (!pm->temp_get)
+ 		return -ENODEV;
+@@ -446,17 +764,46 @@ nouveau_hwmon_init(struct drm_device *dev)
+ 		return ret;
+ 	}
+ 	dev_set_drvdata(hwmon_dev, dev);
++
++	/* default sysfs entries */
+ 	ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
+ 	if (ret) {
+-		NV_ERROR(dev,
+-			"Unable to create hwmon sysfs file: %d\n", ret);
+-		hwmon_device_unregister(hwmon_dev);
+-		return ret;
++		if (ret)
++			goto error;
++	}
++
++	/* if the card has a pwm fan */
++	/*XXX: incorrect, need better detection for this, some boards have
++	 *     the gpio entries for pwm fan control even when there's no
++	 *     actual fan connected to it... therm table? */
++	if (nouveau_pwmfan_get(dev) >= 0) {
++		ret = sysfs_create_group(&dev->pdev->dev.kobj,
++					 &hwmon_pwm_fan_attrgroup);
++		if (ret)
++			goto error;
++	}
++
++	/* if the card can read the fan rpm */
++	if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) {
++		ret = sysfs_create_group(&dev->pdev->dev.kobj,
++					 &hwmon_fan_rpm_attrgroup);
++		if (ret)
++			goto error;
+ 	}
+ 
+ 	pm->hwmon = hwmon_dev;
+-#endif
++
++	return 0;
++
++error:
++	NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret);
++	hwmon_device_unregister(hwmon_dev);
++	pm->hwmon = NULL;
++	return ret;
++#else
++	pm->hwmon = NULL;
+ 	return 0;
++#endif
+ }
+ 
+ static void
+@@ -468,6 +815,11 @@ nouveau_hwmon_fini(struct drm_device *dev)
+ 
+ 	if (pm->hwmon) {
+ 		sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
++		sysfs_remove_group(&dev->pdev->dev.kobj,
++				   &hwmon_pwm_fan_attrgroup);
++		sysfs_remove_group(&dev->pdev->dev.kobj,
++				   &hwmon_fan_rpm_attrgroup);
++
+ 		hwmon_device_unregister(pm->hwmon);
+ 	}
+ #endif
+@@ -486,6 +838,7 @@ nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
+ 		bool ac = power_supply_is_system_supplied();
+ 
+ 		NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
++		nouveau_pm_trigger(dev);
+ 	}
+ 
+ 	return NOTIFY_OK;
+@@ -500,35 +853,48 @@ nouveau_pm_init(struct drm_device *dev)
+ 	char info[256];
+ 	int ret, i;
+ 
+-	nouveau_mem_timing_init(dev);
++	/* parse aux tables from vbios */
+ 	nouveau_volt_init(dev);
+-	nouveau_perf_init(dev);
+ 	nouveau_temp_init(dev);
+ 
++	/* determine current ("boot") performance level */
++	ret = nouveau_pm_perflvl_get(dev, &pm->boot);
++	if (ret) {
++		NV_ERROR(dev, "failed to determine boot perflvl\n");
++		return ret;
++	}
++
++	strncpy(pm->boot.name, "boot", 4);
++	strncpy(pm->boot.profile.name, "boot", 4);
++	pm->boot.profile.func = &nouveau_pm_static_profile_func;
++
++	INIT_LIST_HEAD(&pm->profiles);
++	list_add(&pm->boot.profile.head, &pm->profiles);
++
++	pm->profile_ac = &pm->boot.profile;
++	pm->profile_dc = &pm->boot.profile;
++	pm->profile = &pm->boot.profile;
++	pm->cur = &pm->boot;
++
++	/* add performance levels from vbios */
++	nouveau_perf_init(dev);
++
++	/* display available performance levels */
+ 	NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
+ 	for (i = 0; i < pm->nr_perflvl; i++) {
+ 		nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
+ 		NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info);
+ 	}
+ 
+-	/* determine current ("boot") performance level */
+-	ret = nouveau_pm_perflvl_get(dev, &pm->boot);
+-	if (ret == 0) {
+-		strncpy(pm->boot.name, "boot", 4);
+-		pm->cur = &pm->boot;
+-
+-		nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
+-		NV_INFO(dev, "c:%s", info);
+-	}
++	nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
++	NV_INFO(dev, "c:%s", info);
+ 
+ 	/* switch performance levels now if requested */
+-	if (nouveau_perflvl != NULL) {
+-		ret = nouveau_pm_profile_set(dev, nouveau_perflvl);
+-		if (ret) {
+-			NV_ERROR(dev, "error setting perflvl \"%s\": %d\n",
+-				 nouveau_perflvl, ret);
+-		}
+-	}
++	if (nouveau_perflvl != NULL)
++		nouveau_pm_profile_set(dev, nouveau_perflvl);
++
++	/* determine the current fan speed */
++	pm->fan.percent = nouveau_pwmfan_get(dev);
+ 
+ 	nouveau_sysfs_init(dev);
+ 	nouveau_hwmon_init(dev);
+@@ -545,6 +911,12 @@ nouveau_pm_fini(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
++	struct nouveau_pm_profile *profile, *tmp;
++
++	list_for_each_entry_safe(profile, tmp, &pm->profiles, head) {
++		list_del(&profile->head);
++		profile->func->destroy(profile);
++	}
+ 
+ 	if (pm->cur != &pm->boot)
+ 		nouveau_pm_perflvl_set(dev, &pm->boot);
+@@ -552,7 +924,6 @@ nouveau_pm_fini(struct drm_device *dev)
+ 	nouveau_temp_fini(dev);
+ 	nouveau_perf_fini(dev);
+ 	nouveau_volt_fini(dev);
+-	nouveau_mem_timing_fini(dev);
+ 
+ #if defined(CONFIG_ACPI) && defined(CONFIG_POWER_SUPPLY)
+ 	unregister_acpi_notifier(&pm->acpi_nb);
+@@ -574,4 +945,5 @@ nouveau_pm_resume(struct drm_device *dev)
+ 	perflvl = pm->cur;
+ 	pm->cur = &pm->boot;
+ 	nouveau_pm_perflvl_set(dev, perflvl);
++	nouveau_pwmfan_set(dev, pm->fan.percent);
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
+index 8ac02cd..3f82dfe 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
++++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
+@@ -25,10 +25,30 @@
+ #ifndef __NOUVEAU_PM_H__
+ #define __NOUVEAU_PM_H__
+ 
++struct nouveau_mem_exec_func {
++	struct drm_device *dev;
++	void (*precharge)(struct nouveau_mem_exec_func *);
++	void (*refresh)(struct nouveau_mem_exec_func *);
++	void (*refresh_auto)(struct nouveau_mem_exec_func *, bool);
++	void (*refresh_self)(struct nouveau_mem_exec_func *, bool);
++	void (*wait)(struct nouveau_mem_exec_func *, u32 nsec);
++	u32  (*mrg)(struct nouveau_mem_exec_func *, int mr);
++	void (*mrs)(struct nouveau_mem_exec_func *, int mr, u32 data);
++	void (*clock_set)(struct nouveau_mem_exec_func *);
++	void (*timing_set)(struct nouveau_mem_exec_func *);
++	void *priv;
++};
++
++/* nouveau_mem.c */
++int  nouveau_mem_exec(struct nouveau_mem_exec_func *,
++		      struct nouveau_pm_level *);
++
+ /* nouveau_pm.c */
+ int  nouveau_pm_init(struct drm_device *dev);
+ void nouveau_pm_fini(struct drm_device *dev);
+ void nouveau_pm_resume(struct drm_device *dev);
++extern const struct nouveau_pm_profile_func nouveau_pm_static_profile_func;
++void nouveau_pm_trigger(struct drm_device *dev);
+ 
+ /* nouveau_volt.c */
+ void nouveau_volt_init(struct drm_device *);
+@@ -41,35 +61,41 @@ int  nouveau_voltage_gpio_set(struct drm_device *, int voltage);
+ /* nouveau_perf.c */
+ void nouveau_perf_init(struct drm_device *);
+ void nouveau_perf_fini(struct drm_device *);
++u8 *nouveau_perf_timing(struct drm_device *, u32 freq, u8 *ver, u8 *len);
++u8 *nouveau_perf_ramcfg(struct drm_device *, u32 freq, u8 *ver, u8 *len);
+ 
+ /* nouveau_mem.c */
+ void nouveau_mem_timing_init(struct drm_device *);
+ void nouveau_mem_timing_fini(struct drm_device *);
+ 
+ /* nv04_pm.c */
+-int nv04_pm_clock_get(struct drm_device *, u32 id);
+-void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+-			u32 id, int khz);
+-void nv04_pm_clock_set(struct drm_device *, void *);
++int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
++void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
++int nv04_pm_clocks_set(struct drm_device *, void *);
+ 
+ /* nv40_pm.c */
+ int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+ void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+-void nv40_pm_clocks_set(struct drm_device *, void *);
++int nv40_pm_clocks_set(struct drm_device *, void *);
++int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
++int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
+ 
+ /* nv50_pm.c */
+-int nv50_pm_clock_get(struct drm_device *, u32 id);
+-void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+-			u32 id, int khz);
+-void nv50_pm_clock_set(struct drm_device *, void *);
++int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
++void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
++int nv50_pm_clocks_set(struct drm_device *, void *);
++int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
++int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
+ 
+ /* nva3_pm.c */
+ int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+ void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+-void nva3_pm_clocks_set(struct drm_device *, void *);
++int nva3_pm_clocks_set(struct drm_device *, void *);
+ 
+ /* nvc0_pm.c */
+ int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
++void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
++int nvc0_pm_clocks_set(struct drm_device *, void *);
+ 
+ /* nouveau_temp.c */
+ void nouveau_temp_init(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+index c8a463b..47f245e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
++++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+@@ -8,91 +8,30 @@
+ #define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
+ 
+ struct nouveau_sgdma_be {
+-	struct ttm_backend backend;
++	/* this has to be the first field so populate/unpopulated in
++	 * nouve_bo.c works properly, otherwise have to move them here
++	 */
++	struct ttm_dma_tt ttm;
+ 	struct drm_device *dev;
+-
+-	dma_addr_t *pages;
+-	unsigned nr_pages;
+-	bool unmap_pages;
+-
+ 	u64 offset;
+-	bool bound;
+ };
+ 
+-static int
+-nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
+-		       struct page **pages, struct page *dummy_read_page,
+-		       dma_addr_t *dma_addrs)
+-{
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+-	struct drm_device *dev = nvbe->dev;
+-	int i;
+-
+-	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
+-
+-	nvbe->pages = dma_addrs;
+-	nvbe->nr_pages = num_pages;
+-	nvbe->unmap_pages = true;
+-
+-	/* this code path isn't called and is incorrect anyways */
+-	if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
+-		nvbe->unmap_pages = false;
+-		return 0;
+-	}
+-
+-	for (i = 0; i < num_pages; i++) {
+-		nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
+-					      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+-		if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
+-			nvbe->nr_pages = --i;
+-			be->func->clear(be);
+-			return -EFAULT;
+-		}
+-	}
+-
+-	return 0;
+-}
+-
+ static void
+-nouveau_sgdma_clear(struct ttm_backend *be)
++nouveau_sgdma_destroy(struct ttm_tt *ttm)
+ {
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+-	struct drm_device *dev = nvbe->dev;
+-
+-	if (nvbe->bound)
+-		be->func->unbind(be);
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ 
+-	if (nvbe->unmap_pages) {
+-		while (nvbe->nr_pages--) {
+-			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+-				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+-		}
+-		nvbe->unmap_pages = false;
+-	}
+-
+-	nvbe->pages = NULL;
+-}
+-
+-static void
+-nouveau_sgdma_destroy(struct ttm_backend *be)
+-{
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+-
+-	if (be) {
++	if (ttm) {
+ 		NV_DEBUG(nvbe->dev, "\n");
+-
+-		if (nvbe) {
+-			if (nvbe->pages)
+-				be->func->clear(be);
+-			kfree(nvbe);
+-		}
++		ttm_dma_tt_fini(&nvbe->ttm);
++		kfree(nvbe);
+ 	}
+ }
+ 
+ static int
+-nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
++nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+ {
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ 	struct drm_device *dev = nvbe->dev;
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+@@ -102,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+ 
+ 	nvbe->offset = mem->start << PAGE_SHIFT;
+ 	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
+-	for (i = 0; i < nvbe->nr_pages; i++) {
+-		dma_addr_t dma_offset = nvbe->pages[i];
++	for (i = 0; i < ttm->num_pages; i++) {
++		dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
+ 		uint32_t offset_l = lower_32_bits(dma_offset);
+ 
+ 		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
+@@ -112,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+ 		}
+ 	}
+ 
+-	nvbe->bound = true;
+ 	return 0;
+ }
+ 
+ static int
+-nv04_sgdma_unbind(struct ttm_backend *be)
++nv04_sgdma_unbind(struct ttm_tt *ttm)
+ {
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ 	struct drm_device *dev = nvbe->dev;
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+@@ -127,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be)
+ 
+ 	NV_DEBUG(dev, "\n");
+ 
+-	if (!nvbe->bound)
++	if (ttm->state != tt_bound)
+ 		return 0;
+ 
+ 	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
+-	for (i = 0; i < nvbe->nr_pages; i++) {
++	for (i = 0; i < ttm->num_pages; i++) {
+ 		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
+ 			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
+ 	}
+ 
+-	nvbe->bound = false;
+ 	return 0;
+ }
+ 
+ static struct ttm_backend_func nv04_sgdma_backend = {
+-	.populate		= nouveau_sgdma_populate,
+-	.clear			= nouveau_sgdma_clear,
+ 	.bind			= nv04_sgdma_bind,
+ 	.unbind			= nv04_sgdma_unbind,
+ 	.destroy		= nouveau_sgdma_destroy
+@@ -161,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+ }
+ 
+ static int
+-nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
++nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+ {
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+-	dma_addr_t *list = nvbe->pages;
++	dma_addr_t *list = nvbe->ttm.dma_address;
+ 	u32 pte = mem->start << 2;
+-	u32 cnt = nvbe->nr_pages;
++	u32 cnt = ttm->num_pages;
+ 
+ 	nvbe->offset = mem->start << PAGE_SHIFT;
+ 
+@@ -178,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+ 	}
+ 
+ 	nv41_sgdma_flush(nvbe);
+-	nvbe->bound = true;
+ 	return 0;
+ }
+ 
+ static int
+-nv41_sgdma_unbind(struct ttm_backend *be)
++nv41_sgdma_unbind(struct ttm_tt *ttm)
+ {
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ 	u32 pte = (nvbe->offset >> 12) << 2;
+-	u32 cnt = nvbe->nr_pages;
++	u32 cnt = ttm->num_pages;
+ 
+ 	while (cnt--) {
+ 		nv_wo32(pgt, pte, 0x00000000);
+@@ -197,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be)
+ 	}
+ 
+ 	nv41_sgdma_flush(nvbe);
+-	nvbe->bound = false;
+ 	return 0;
+ }
+ 
+ static struct ttm_backend_func nv41_sgdma_backend = {
+-	.populate		= nouveau_sgdma_populate,
+-	.clear			= nouveau_sgdma_clear,
+ 	.bind			= nv41_sgdma_bind,
+ 	.unbind			= nv41_sgdma_unbind,
+ 	.destroy		= nouveau_sgdma_destroy
+ };
+ 
+ static void
+-nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
++nv44_sgdma_flush(struct ttm_tt *ttm)
+ {
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ 	struct drm_device *dev = nvbe->dev;
+ 
+-	nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
++	nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
+ 	nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
+ 	if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
+ 		NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
+@@ -273,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
+ }
+ 
+ static int
+-nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
++nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+ {
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+-	dma_addr_t *list = nvbe->pages;
++	dma_addr_t *list = nvbe->ttm.dma_address;
+ 	u32 pte = mem->start << 2, tmp[4];
+-	u32 cnt = nvbe->nr_pages;
++	u32 cnt = ttm->num_pages;
+ 	int i;
+ 
+ 	nvbe->offset = mem->start << PAGE_SHIFT;
+@@ -308,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+ 	if (cnt)
+ 		nv44_sgdma_fill(pgt, list, pte, cnt);
+ 
+-	nv44_sgdma_flush(nvbe);
+-	nvbe->bound = true;
++	nv44_sgdma_flush(ttm);
+ 	return 0;
+ }
+ 
+ static int
+-nv44_sgdma_unbind(struct ttm_backend *be)
++nv44_sgdma_unbind(struct ttm_tt *ttm)
+ {
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ 	u32 pte = (nvbe->offset >> 12) << 2;
+-	u32 cnt = nvbe->nr_pages;
++	u32 cnt = ttm->num_pages;
+ 
+ 	if (pte & 0x0000000c) {
+ 		u32  max = 4 - ((pte >> 2) & 0x3);
+@@ -342,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be)
+ 	if (cnt)
+ 		nv44_sgdma_fill(pgt, NULL, pte, cnt);
+ 
+-	nv44_sgdma_flush(nvbe);
+-	nvbe->bound = false;
++	nv44_sgdma_flush(ttm);
+ 	return 0;
+ }
+ 
+ static struct ttm_backend_func nv44_sgdma_backend = {
+-	.populate		= nouveau_sgdma_populate,
+-	.clear			= nouveau_sgdma_clear,
+ 	.bind			= nv44_sgdma_bind,
+ 	.unbind			= nv44_sgdma_unbind,
+ 	.destroy		= nouveau_sgdma_destroy
+ };
+ 
+ static int
+-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
++nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
+ {
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
+ 	struct nouveau_mem *node = mem->mm_node;
++
+ 	/* noop: bound in move_notify() */
+-	node->pages = nvbe->pages;
+-	nvbe->pages = (dma_addr_t *)node;
+-	nvbe->bound = true;
++	node->pages = nvbe->ttm.dma_address;
+ 	return 0;
+ }
+ 
+ static int
+-nv50_sgdma_unbind(struct ttm_backend *be)
++nv50_sgdma_unbind(struct ttm_tt *ttm)
+ {
+-	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+-	struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
+ 	/* noop: unbound in move_notify() */
+-	nvbe->pages = node->pages;
+-	node->pages = NULL;
+-	nvbe->bound = false;
+ 	return 0;
+ }
+ 
+ static struct ttm_backend_func nv50_sgdma_backend = {
+-	.populate		= nouveau_sgdma_populate,
+-	.clear			= nouveau_sgdma_clear,
+ 	.bind			= nv50_sgdma_bind,
+ 	.unbind			= nv50_sgdma_unbind,
+ 	.destroy		= nouveau_sgdma_destroy
+ };
+ 
+-struct ttm_backend *
+-nouveau_sgdma_init_ttm(struct drm_device *dev)
++struct ttm_tt *
++nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
++			 unsigned long size, uint32_t page_flags,
++			 struct page *dummy_read_page)
+ {
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
++	struct drm_device *dev = dev_priv->dev;
+ 	struct nouveau_sgdma_be *nvbe;
+ 
+ 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
+@@ -398,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
+ 		return NULL;
+ 
+ 	nvbe->dev = dev;
++	nvbe->ttm.ttm.func = dev_priv->gart_info.func;
+ 
+-	nvbe->backend.func = dev_priv->gart_info.func;
+-	return &nvbe->backend;
++	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
++		kfree(nvbe);
++		return NULL;
++	}
++	return &nvbe->ttm.ttm;
+ }
+ 
+ int
+diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
+index 01adcfb..b096cf2 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_state.c
++++ b/drivers/gpu/drm/nouveau/nouveau_state.c
+@@ -36,6 +36,7 @@
+ #include "nouveau_drm.h"
+ #include "nouveau_fbcon.h"
+ #include "nouveau_ramht.h"
++#include "nouveau_gpio.h"
+ #include "nouveau_pm.h"
+ #include "nv50_display.h"
+ 
+@@ -81,17 +82,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		engine->display.early_init	= nv04_display_early_init;
+ 		engine->display.late_takedown	= nv04_display_late_takedown;
+ 		engine->display.create		= nv04_display_create;
+-		engine->display.init		= nv04_display_init;
+ 		engine->display.destroy		= nv04_display_destroy;
+-		engine->gpio.init		= nouveau_stub_init;
+-		engine->gpio.takedown		= nouveau_stub_takedown;
+-		engine->gpio.get		= NULL;
+-		engine->gpio.set		= NULL;
+-		engine->gpio.irq_enable		= NULL;
+-		engine->pm.clock_get		= nv04_pm_clock_get;
+-		engine->pm.clock_pre		= nv04_pm_clock_pre;
+-		engine->pm.clock_set		= nv04_pm_clock_set;
+-		engine->vram.init		= nouveau_mem_detect;
++		engine->display.init		= nv04_display_init;
++		engine->display.fini		= nv04_display_fini;
++		engine->pm.clocks_get		= nv04_pm_clocks_get;
++		engine->pm.clocks_pre		= nv04_pm_clocks_pre;
++		engine->pm.clocks_set		= nv04_pm_clocks_set;
++		engine->vram.init		= nv04_fb_vram_init;
+ 		engine->vram.takedown		= nouveau_stub_takedown;
+ 		engine->vram.flags_valid	= nouveau_mem_flags_valid;
+ 		break;
+@@ -130,17 +127,19 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		engine->display.early_init	= nv04_display_early_init;
+ 		engine->display.late_takedown	= nv04_display_late_takedown;
+ 		engine->display.create		= nv04_display_create;
+-		engine->display.init		= nv04_display_init;
+ 		engine->display.destroy		= nv04_display_destroy;
+-		engine->gpio.init		= nouveau_stub_init;
+-		engine->gpio.takedown		= nouveau_stub_takedown;
+-		engine->gpio.get		= nv10_gpio_get;
+-		engine->gpio.set		= nv10_gpio_set;
+-		engine->gpio.irq_enable		= NULL;
+-		engine->pm.clock_get		= nv04_pm_clock_get;
+-		engine->pm.clock_pre		= nv04_pm_clock_pre;
+-		engine->pm.clock_set		= nv04_pm_clock_set;
+-		engine->vram.init		= nouveau_mem_detect;
++		engine->display.init		= nv04_display_init;
++		engine->display.fini		= nv04_display_fini;
++		engine->gpio.drive		= nv10_gpio_drive;
++		engine->gpio.sense		= nv10_gpio_sense;
++		engine->pm.clocks_get		= nv04_pm_clocks_get;
++		engine->pm.clocks_pre		= nv04_pm_clocks_pre;
++		engine->pm.clocks_set		= nv04_pm_clocks_set;
++		if (dev_priv->chipset == 0x1a ||
++		    dev_priv->chipset == 0x1f)
++			engine->vram.init	= nv1a_fb_vram_init;
++		else
++			engine->vram.init	= nv10_fb_vram_init;
+ 		engine->vram.takedown		= nouveau_stub_takedown;
+ 		engine->vram.flags_valid	= nouveau_mem_flags_valid;
+ 		break;
+@@ -159,11 +158,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		engine->timer.init		= nv04_timer_init;
+ 		engine->timer.read		= nv04_timer_read;
+ 		engine->timer.takedown		= nv04_timer_takedown;
+-		engine->fb.init			= nv10_fb_init;
+-		engine->fb.takedown		= nv10_fb_takedown;
+-		engine->fb.init_tile_region	= nv10_fb_init_tile_region;
+-		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
+-		engine->fb.free_tile_region	= nv10_fb_free_tile_region;
++		engine->fb.init			= nv20_fb_init;
++		engine->fb.takedown		= nv20_fb_takedown;
++		engine->fb.init_tile_region	= nv20_fb_init_tile_region;
++		engine->fb.set_tile_region	= nv20_fb_set_tile_region;
++		engine->fb.free_tile_region	= nv20_fb_free_tile_region;
+ 		engine->fifo.channels		= 32;
+ 		engine->fifo.init		= nv10_fifo_init;
+ 		engine->fifo.takedown		= nv04_fifo_fini;
+@@ -179,17 +178,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		engine->display.early_init	= nv04_display_early_init;
+ 		engine->display.late_takedown	= nv04_display_late_takedown;
+ 		engine->display.create		= nv04_display_create;
+-		engine->display.init		= nv04_display_init;
+ 		engine->display.destroy		= nv04_display_destroy;
+-		engine->gpio.init		= nouveau_stub_init;
+-		engine->gpio.takedown		= nouveau_stub_takedown;
+-		engine->gpio.get		= nv10_gpio_get;
+-		engine->gpio.set		= nv10_gpio_set;
+-		engine->gpio.irq_enable		= NULL;
+-		engine->pm.clock_get		= nv04_pm_clock_get;
+-		engine->pm.clock_pre		= nv04_pm_clock_pre;
+-		engine->pm.clock_set		= nv04_pm_clock_set;
+-		engine->vram.init		= nouveau_mem_detect;
++		engine->display.init		= nv04_display_init;
++		engine->display.fini		= nv04_display_fini;
++		engine->gpio.drive		= nv10_gpio_drive;
++		engine->gpio.sense		= nv10_gpio_sense;
++		engine->pm.clocks_get		= nv04_pm_clocks_get;
++		engine->pm.clocks_pre		= nv04_pm_clocks_pre;
++		engine->pm.clocks_set		= nv04_pm_clocks_set;
++		engine->vram.init		= nv20_fb_vram_init;
+ 		engine->vram.takedown		= nouveau_stub_takedown;
+ 		engine->vram.flags_valid	= nouveau_mem_flags_valid;
+ 		break;
+@@ -228,19 +225,17 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		engine->display.early_init	= nv04_display_early_init;
+ 		engine->display.late_takedown	= nv04_display_late_takedown;
+ 		engine->display.create		= nv04_display_create;
+-		engine->display.init		= nv04_display_init;
+ 		engine->display.destroy		= nv04_display_destroy;
+-		engine->gpio.init		= nouveau_stub_init;
+-		engine->gpio.takedown		= nouveau_stub_takedown;
+-		engine->gpio.get		= nv10_gpio_get;
+-		engine->gpio.set		= nv10_gpio_set;
+-		engine->gpio.irq_enable		= NULL;
+-		engine->pm.clock_get		= nv04_pm_clock_get;
+-		engine->pm.clock_pre		= nv04_pm_clock_pre;
+-		engine->pm.clock_set		= nv04_pm_clock_set;
++		engine->display.init		= nv04_display_init;
++		engine->display.fini		= nv04_display_fini;
++		engine->gpio.drive		= nv10_gpio_drive;
++		engine->gpio.sense		= nv10_gpio_sense;
++		engine->pm.clocks_get		= nv04_pm_clocks_get;
++		engine->pm.clocks_pre		= nv04_pm_clocks_pre;
++		engine->pm.clocks_set		= nv04_pm_clocks_set;
+ 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
+ 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
+-		engine->vram.init		= nouveau_mem_detect;
++		engine->vram.init		= nv20_fb_vram_init;
+ 		engine->vram.takedown		= nouveau_stub_takedown;
+ 		engine->vram.flags_valid	= nouveau_mem_flags_valid;
+ 		break;
+@@ -280,20 +275,23 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		engine->display.early_init	= nv04_display_early_init;
+ 		engine->display.late_takedown	= nv04_display_late_takedown;
+ 		engine->display.create		= nv04_display_create;
+-		engine->display.init		= nv04_display_init;
+ 		engine->display.destroy		= nv04_display_destroy;
+-		engine->gpio.init		= nouveau_stub_init;
+-		engine->gpio.takedown		= nouveau_stub_takedown;
+-		engine->gpio.get		= nv10_gpio_get;
+-		engine->gpio.set		= nv10_gpio_set;
+-		engine->gpio.irq_enable		= NULL;
++		engine->display.init		= nv04_display_init;
++		engine->display.fini		= nv04_display_fini;
++		engine->gpio.init		= nv10_gpio_init;
++		engine->gpio.fini		= nv10_gpio_fini;
++		engine->gpio.drive		= nv10_gpio_drive;
++		engine->gpio.sense		= nv10_gpio_sense;
++		engine->gpio.irq_enable		= nv10_gpio_irq_enable;
+ 		engine->pm.clocks_get		= nv40_pm_clocks_get;
+ 		engine->pm.clocks_pre		= nv40_pm_clocks_pre;
+ 		engine->pm.clocks_set		= nv40_pm_clocks_set;
+ 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
+ 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
+ 		engine->pm.temp_get		= nv40_temp_get;
+-		engine->vram.init		= nouveau_mem_detect;
++		engine->pm.pwm_get		= nv40_pm_pwm_get;
++		engine->pm.pwm_set		= nv40_pm_pwm_set;
++		engine->vram.init		= nv40_fb_vram_init;
+ 		engine->vram.takedown		= nouveau_stub_takedown;
+ 		engine->vram.flags_valid	= nouveau_mem_flags_valid;
+ 		break;
+@@ -335,14 +333,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		engine->display.early_init	= nv50_display_early_init;
+ 		engine->display.late_takedown	= nv50_display_late_takedown;
+ 		engine->display.create		= nv50_display_create;
+-		engine->display.init		= nv50_display_init;
+ 		engine->display.destroy		= nv50_display_destroy;
++		engine->display.init		= nv50_display_init;
++		engine->display.fini		= nv50_display_fini;
+ 		engine->gpio.init		= nv50_gpio_init;
+-		engine->gpio.takedown		= nv50_gpio_fini;
+-		engine->gpio.get		= nv50_gpio_get;
+-		engine->gpio.set		= nv50_gpio_set;
+-		engine->gpio.irq_register	= nv50_gpio_irq_register;
+-		engine->gpio.irq_unregister	= nv50_gpio_irq_unregister;
++		engine->gpio.fini		= nv50_gpio_fini;
++		engine->gpio.drive		= nv50_gpio_drive;
++		engine->gpio.sense		= nv50_gpio_sense;
+ 		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
+ 		switch (dev_priv->chipset) {
+ 		case 0x84:
+@@ -355,9 +352,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		case 0xaa:
+ 		case 0xac:
+ 		case 0x50:
+-			engine->pm.clock_get	= nv50_pm_clock_get;
+-			engine->pm.clock_pre	= nv50_pm_clock_pre;
+-			engine->pm.clock_set	= nv50_pm_clock_set;
++			engine->pm.clocks_get	= nv50_pm_clocks_get;
++			engine->pm.clocks_pre	= nv50_pm_clocks_pre;
++			engine->pm.clocks_set	= nv50_pm_clocks_set;
+ 			break;
+ 		default:
+ 			engine->pm.clocks_get	= nva3_pm_clocks_get;
+@@ -371,6 +368,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 			engine->pm.temp_get	= nv84_temp_get;
+ 		else
+ 			engine->pm.temp_get	= nv40_temp_get;
++		engine->pm.pwm_get		= nv50_pm_pwm_get;
++		engine->pm.pwm_set		= nv50_pm_pwm_set;
+ 		engine->vram.init		= nv50_vram_init;
+ 		engine->vram.takedown		= nv50_vram_fini;
+ 		engine->vram.get		= nv50_vram_new;
+@@ -408,14 +407,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		engine->display.early_init	= nv50_display_early_init;
+ 		engine->display.late_takedown	= nv50_display_late_takedown;
+ 		engine->display.create		= nv50_display_create;
+-		engine->display.init		= nv50_display_init;
+ 		engine->display.destroy		= nv50_display_destroy;
++		engine->display.init		= nv50_display_init;
++		engine->display.fini		= nv50_display_fini;
+ 		engine->gpio.init		= nv50_gpio_init;
+-		engine->gpio.takedown		= nouveau_stub_takedown;
+-		engine->gpio.get		= nv50_gpio_get;
+-		engine->gpio.set		= nv50_gpio_set;
+-		engine->gpio.irq_register	= nv50_gpio_irq_register;
+-		engine->gpio.irq_unregister	= nv50_gpio_irq_unregister;
++		engine->gpio.fini		= nv50_gpio_fini;
++		engine->gpio.drive		= nv50_gpio_drive;
++		engine->gpio.sense		= nv50_gpio_sense;
+ 		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
+ 		engine->vram.init		= nvc0_vram_init;
+ 		engine->vram.takedown		= nv50_vram_fini;
+@@ -424,8 +422,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		engine->vram.flags_valid	= nvc0_vram_flags_valid;
+ 		engine->pm.temp_get		= nv84_temp_get;
+ 		engine->pm.clocks_get		= nvc0_pm_clocks_get;
++		engine->pm.clocks_pre		= nvc0_pm_clocks_pre;
++		engine->pm.clocks_set		= nvc0_pm_clocks_set;
+ 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
+ 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
++		engine->pm.pwm_get		= nv50_pm_pwm_get;
++		engine->pm.pwm_set		= nv50_pm_pwm_set;
+ 		break;
+ 	case 0xd0:
+ 		engine->instmem.init		= nvc0_instmem_init;
+@@ -458,24 +460,67 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
+ 		engine->display.early_init	= nouveau_stub_init;
+ 		engine->display.late_takedown	= nouveau_stub_takedown;
+ 		engine->display.create		= nvd0_display_create;
+-		engine->display.init		= nvd0_display_init;
+ 		engine->display.destroy		= nvd0_display_destroy;
++		engine->display.init		= nvd0_display_init;
++		engine->display.fini		= nvd0_display_fini;
+ 		engine->gpio.init		= nv50_gpio_init;
+-		engine->gpio.takedown		= nouveau_stub_takedown;
+-		engine->gpio.get		= nvd0_gpio_get;
+-		engine->gpio.set		= nvd0_gpio_set;
+-		engine->gpio.irq_register	= nv50_gpio_irq_register;
+-		engine->gpio.irq_unregister	= nv50_gpio_irq_unregister;
++		engine->gpio.fini		= nv50_gpio_fini;
++		engine->gpio.drive		= nvd0_gpio_drive;
++		engine->gpio.sense		= nvd0_gpio_sense;
+ 		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
+ 		engine->vram.init		= nvc0_vram_init;
+ 		engine->vram.takedown		= nv50_vram_fini;
+ 		engine->vram.get		= nvc0_vram_new;
+ 		engine->vram.put		= nv50_vram_del;
+ 		engine->vram.flags_valid	= nvc0_vram_flags_valid;
++		engine->pm.temp_get		= nv84_temp_get;
+ 		engine->pm.clocks_get		= nvc0_pm_clocks_get;
++		engine->pm.clocks_pre		= nvc0_pm_clocks_pre;
++		engine->pm.clocks_set		= nvc0_pm_clocks_set;
+ 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
+ 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
+ 		break;
++	case 0xe0:
++		engine->instmem.init		= nvc0_instmem_init;
++		engine->instmem.takedown	= nvc0_instmem_takedown;
++		engine->instmem.suspend		= nvc0_instmem_suspend;
++		engine->instmem.resume		= nvc0_instmem_resume;
++		engine->instmem.get		= nv50_instmem_get;
++		engine->instmem.put		= nv50_instmem_put;
++		engine->instmem.map		= nv50_instmem_map;
++		engine->instmem.unmap		= nv50_instmem_unmap;
++		engine->instmem.flush		= nv84_instmem_flush;
++		engine->mc.init			= nv50_mc_init;
++		engine->mc.takedown		= nv50_mc_takedown;
++		engine->timer.init		= nv04_timer_init;
++		engine->timer.read		= nv04_timer_read;
++		engine->timer.takedown		= nv04_timer_takedown;
++		engine->fb.init			= nvc0_fb_init;
++		engine->fb.takedown		= nvc0_fb_takedown;
++		engine->fifo.channels		= 0;
++		engine->fifo.init		= nouveau_stub_init;
++		engine->fifo.takedown		= nouveau_stub_takedown;
++		engine->fifo.disable		= nvc0_fifo_disable;
++		engine->fifo.enable		= nvc0_fifo_enable;
++		engine->fifo.reassign		= nvc0_fifo_reassign;
++		engine->fifo.unload_context	= nouveau_stub_init;
++		engine->display.early_init	= nouveau_stub_init;
++		engine->display.late_takedown	= nouveau_stub_takedown;
++		engine->display.create		= nvd0_display_create;
++		engine->display.destroy		= nvd0_display_destroy;
++		engine->display.init		= nvd0_display_init;
++		engine->display.fini		= nvd0_display_fini;
++		engine->gpio.init		= nv50_gpio_init;
++		engine->gpio.fini		= nv50_gpio_fini;
++		engine->gpio.drive		= nvd0_gpio_drive;
++		engine->gpio.sense		= nvd0_gpio_sense;
++		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
++		engine->vram.init		= nvc0_vram_init;
++		engine->vram.takedown		= nv50_vram_fini;
++		engine->vram.get		= nvc0_vram_new;
++		engine->vram.put		= nv50_vram_del;
++		engine->vram.flags_valid	= nvc0_vram_flags_valid;
++		break;
+ 	default:
+ 		NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
+ 		return 1;
+@@ -527,6 +572,7 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
+ 		printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+ 		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ 		drm_kms_helper_poll_disable(dev);
++		nouveau_switcheroo_optimus_dsm();
+ 		nouveau_pci_suspend(pdev, pmm);
+ 		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
+ 	}
+@@ -549,6 +595,75 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
+ 	return can_switch;
+ }
+ 
++static void
++nouveau_card_channel_fini(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->channel)
++		nouveau_channel_put_unlocked(&dev_priv->channel);
++}
++
++static int
++nouveau_card_channel_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan;
++	int ret, oclass;
++
++	ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
++	dev_priv->channel = chan;
++	if (ret)
++		return ret;
++
++	mutex_unlock(&dev_priv->channel->mutex);
++
++	if (dev_priv->card_type <= NV_50) {
++		if (dev_priv->card_type < NV_50)
++			oclass = 0x0039;
++		else
++			oclass = 0x5039;
++
++		ret = nouveau_gpuobj_gr_new(chan, NvM2MF, oclass);
++		if (ret)
++			goto error;
++
++		ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
++					     &chan->m2mf_ntfy);
++		if (ret)
++			goto error;
++
++		ret = RING_SPACE(chan, 6);
++		if (ret)
++			goto error;
++
++		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
++		OUT_RING  (chan, NvM2MF);
++		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
++		OUT_RING  (chan, NvNotify0);
++		OUT_RING  (chan, chan->vram_handle);
++		OUT_RING  (chan, chan->gart_handle);
++	} else
++	if (dev_priv->card_type <= NV_D0) {
++		ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
++		if (ret)
++			goto error;
++
++		ret = RING_SPACE(chan, 2);
++		if (ret)
++			goto error;
++
++		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
++		OUT_RING  (chan, 0x00009039);
++	}
++
++	FIRE_RING (chan);
++error:
++	if (ret)
++		nouveau_card_channel_fini(dev);
++	return ret;
++}
++
+ int
+ nouveau_card_init(struct drm_device *dev)
+ {
+@@ -589,47 +704,45 @@ nouveau_card_init(struct drm_device *dev)
+ 		nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
+ 	}
+ 
+-	nouveau_pm_init(dev);
+-
+-	ret = engine->vram.init(dev);
++	/* PMC */
++	ret = engine->mc.init(dev);
+ 	if (ret)
+ 		goto out_bios;
+ 
+-	ret = nouveau_gpuobj_init(dev);
++	/* PTIMER */
++	ret = engine->timer.init(dev);
+ 	if (ret)
+-		goto out_vram;
++		goto out_mc;
+ 
+-	ret = engine->instmem.init(dev);
++	/* PFB */
++	ret = engine->fb.init(dev);
+ 	if (ret)
+-		goto out_gpuobj;
++		goto out_timer;
+ 
+-	ret = nouveau_mem_vram_init(dev);
++	ret = engine->vram.init(dev);
+ 	if (ret)
+-		goto out_instmem;
++		goto out_fb;
+ 
+-	ret = nouveau_mem_gart_init(dev);
++	/* PGPIO */
++	ret = nouveau_gpio_create(dev);
+ 	if (ret)
+-		goto out_ttmvram;
++		goto out_vram;
+ 
+-	/* PMC */
+-	ret = engine->mc.init(dev);
++	ret = nouveau_gpuobj_init(dev);
+ 	if (ret)
+-		goto out_gart;
++		goto out_gpio;
+ 
+-	/* PGPIO */
+-	ret = engine->gpio.init(dev);
++	ret = engine->instmem.init(dev);
+ 	if (ret)
+-		goto out_mc;
++		goto out_gpuobj;
+ 
+-	/* PTIMER */
+-	ret = engine->timer.init(dev);
++	ret = nouveau_mem_vram_init(dev);
+ 	if (ret)
+-		goto out_gpio;
++		goto out_instmem;
+ 
+-	/* PFB */
+-	ret = engine->fb.init(dev);
++	ret = nouveau_mem_gart_init(dev);
+ 	if (ret)
+-		goto out_timer;
++		goto out_ttmvram;
+ 
+ 	if (!dev_priv->noaccel) {
+ 		switch (dev_priv->card_type) {
+@@ -650,6 +763,7 @@ nouveau_card_init(struct drm_device *dev)
+ 			nv50_graph_create(dev);
+ 			break;
+ 		case NV_C0:
++		case NV_D0:
+ 			nvc0_graph_create(dev);
+ 			break;
+ 		default:
+@@ -665,6 +779,11 @@ nouveau_card_init(struct drm_device *dev)
+ 		case 0xa0:
+ 			nv84_crypt_create(dev);
+ 			break;
++		case 0x98:
++		case 0xaa:
++		case 0xac:
++			nv98_crypt_create(dev);
++			break;
+ 		}
+ 
+ 		switch (dev_priv->card_type) {
+@@ -686,15 +805,25 @@ nouveau_card_init(struct drm_device *dev)
+ 			break;
+ 		}
+ 
++		if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
++			nv84_bsp_create(dev);
++			nv84_vp_create(dev);
++			nv98_ppp_create(dev);
++		} else
++		if (dev_priv->chipset >= 0x84) {
++			nv50_mpeg_create(dev);
++			nv84_bsp_create(dev);
++			nv84_vp_create(dev);
++		} else
++		if (dev_priv->chipset >= 0x50) {
++			nv50_mpeg_create(dev);
++		} else
+ 		if (dev_priv->card_type == NV_40 ||
+ 		    dev_priv->chipset == 0x31 ||
+ 		    dev_priv->chipset == 0x34 ||
+-		    dev_priv->chipset == 0x36)
++		    dev_priv->chipset == 0x36) {
+ 			nv31_mpeg_create(dev);
+-		else
+-		if (dev_priv->card_type == NV_50 &&
+-		    (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
+-			nv50_mpeg_create(dev);
++		}
+ 
+ 		for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
+ 			if (dev_priv->eng[e]) {
+@@ -714,63 +843,41 @@ nouveau_card_init(struct drm_device *dev)
+ 	if (ret)
+ 		goto out_fifo;
+ 
+-	/* initialise general modesetting */
+-	drm_mode_config_init(dev);
+-	drm_mode_create_scaling_mode_property(dev);
+-	drm_mode_create_dithering_property(dev);
+-	dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+-	dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
+-	dev->mode_config.min_width = 0;
+-	dev->mode_config.min_height = 0;
+-	if (dev_priv->card_type < NV_10) {
+-		dev->mode_config.max_width = 2048;
+-		dev->mode_config.max_height = 2048;
+-	} else
+-	if (dev_priv->card_type < NV_50) {
+-		dev->mode_config.max_width = 4096;
+-		dev->mode_config.max_height = 4096;
+-	} else {
+-		dev->mode_config.max_width = 8192;
+-		dev->mode_config.max_height = 8192;
+-	}
+-
+-	ret = engine->display.create(dev);
++	ret = nouveau_display_create(dev);
+ 	if (ret)
+ 		goto out_irq;
+ 
+ 	nouveau_backlight_init(dev);
++	nouveau_pm_init(dev);
+ 
+-	if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
+-		ret = nouveau_fence_init(dev);
+-		if (ret)
+-			goto out_disp;
++	ret = nouveau_fence_init(dev);
++	if (ret)
++		goto out_pm;
+ 
+-		ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
+-					    NvDmaFB, NvDmaTT);
++	if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
++		ret = nouveau_card_channel_init(dev);
+ 		if (ret)
+ 			goto out_fence;
+-
+-		mutex_unlock(&dev_priv->channel->mutex);
+ 	}
+ 
+ 	if (dev->mode_config.num_crtc) {
+-		ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
++		ret = nouveau_display_init(dev);
+ 		if (ret)
+ 			goto out_chan;
+ 
+ 		nouveau_fbcon_init(dev);
+-		drm_kms_helper_poll_init(dev);
+ 	}
+ 
+ 	return 0;
+ 
+ out_chan:
+-	nouveau_channel_put_unlocked(&dev_priv->channel);
++	nouveau_card_channel_fini(dev);
+ out_fence:
+ 	nouveau_fence_fini(dev);
+-out_disp:
++out_pm:
++	nouveau_pm_fini(dev);
+ 	nouveau_backlight_exit(dev);
+-	engine->display.destroy(dev);
++	nouveau_display_destroy(dev);
+ out_irq:
+ 	nouveau_irq_fini(dev);
+ out_fifo:
+@@ -785,15 +892,6 @@ out_engine:
+ 			dev_priv->eng[e]->destroy(dev,e );
+ 		}
+ 	}
+-
+-	engine->fb.takedown(dev);
+-out_timer:
+-	engine->timer.takedown(dev);
+-out_gpio:
+-	engine->gpio.takedown(dev);
+-out_mc:
+-	engine->mc.takedown(dev);
+-out_gart:
+ 	nouveau_mem_gart_fini(dev);
+ out_ttmvram:
+ 	nouveau_mem_vram_fini(dev);
+@@ -801,10 +899,17 @@ out_instmem:
+ 	engine->instmem.takedown(dev);
+ out_gpuobj:
+ 	nouveau_gpuobj_takedown(dev);
++out_gpio:
++	nouveau_gpio_destroy(dev);
+ out_vram:
+ 	engine->vram.takedown(dev);
++out_fb:
++	engine->fb.takedown(dev);
++out_timer:
++	engine->timer.takedown(dev);
++out_mc:
++	engine->mc.takedown(dev);
+ out_bios:
+-	nouveau_pm_fini(dev);
+ 	nouveau_bios_takedown(dev);
+ out_display_early:
+ 	engine->display.late_takedown(dev);
+@@ -820,19 +925,15 @@ static void nouveau_card_takedown(struct drm_device *dev)
+ 	int e;
+ 
+ 	if (dev->mode_config.num_crtc) {
+-		drm_kms_helper_poll_fini(dev);
+ 		nouveau_fbcon_fini(dev);
+-		drm_vblank_cleanup(dev);
+-	}
+-
+-	if (dev_priv->channel) {
+-		nouveau_channel_put_unlocked(&dev_priv->channel);
+-		nouveau_fence_fini(dev);
++		nouveau_display_fini(dev);
+ 	}
+ 
++	nouveau_card_channel_fini(dev);
++	nouveau_fence_fini(dev);
++	nouveau_pm_fini(dev);
+ 	nouveau_backlight_exit(dev);
+-	engine->display.destroy(dev);
+-	drm_mode_config_cleanup(dev);
++	nouveau_display_destroy(dev);
+ 
+ 	if (!dev_priv->noaccel) {
+ 		engine->fifo.takedown(dev);
+@@ -843,11 +944,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
+ 			}
+ 		}
+ 	}
+-	engine->fb.takedown(dev);
+-	engine->timer.takedown(dev);
+-	engine->gpio.takedown(dev);
+-	engine->mc.takedown(dev);
+-	engine->display.late_takedown(dev);
+ 
+ 	if (dev_priv->vga_ram) {
+ 		nouveau_bo_unpin(dev_priv->vga_ram);
+@@ -863,12 +959,17 @@ static void nouveau_card_takedown(struct drm_device *dev)
+ 
+ 	engine->instmem.takedown(dev);
+ 	nouveau_gpuobj_takedown(dev);
+-	engine->vram.takedown(dev);
+ 
+-	nouveau_irq_fini(dev);
++	nouveau_gpio_destroy(dev);
++	engine->vram.takedown(dev);
++	engine->fb.takedown(dev);
++	engine->timer.takedown(dev);
++	engine->mc.takedown(dev);
+ 
+-	nouveau_pm_fini(dev);
+ 	nouveau_bios_takedown(dev);
++	engine->display.late_takedown(dev);
++
++	nouveau_irq_fini(dev);
+ 
+ 	vga_client_register(dev->pdev, NULL, NULL, NULL);
+ }
+@@ -998,8 +1099,8 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
+ int nouveau_load(struct drm_device *dev, unsigned long flags)
+ {
+ 	struct drm_nouveau_private *dev_priv;
+-	uint32_t reg0, strap;
+-	resource_size_t mmio_start_offs;
++	unsigned long long offset, length;
++	uint32_t reg0 = ~0, strap;
+ 	int ret;
+ 
+ 	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+@@ -1010,83 +1111,90 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
+ 	dev->dev_private = dev_priv;
+ 	dev_priv->dev = dev;
+ 
++	pci_set_master(dev->pdev);
++
+ 	dev_priv->flags = flags & NOUVEAU_FLAGS;
+ 
+ 	NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
+ 		 dev->pci_vendor, dev->pci_device, dev->pdev->class);
+ 
+-	/* resource 0 is mmio regs */
+-	/* resource 1 is linear FB */
+-	/* resource 2 is RAMIN (mmio regs + 0x1000000) */
+-	/* resource 6 is bios */
++	/* first up, map the start of mmio and determine the chipset */
++	dev_priv->mmio = ioremap(pci_resource_start(dev->pdev, 0), PAGE_SIZE);
++	if (dev_priv->mmio) {
++#ifdef __BIG_ENDIAN
++		/* put the card into big-endian mode if it's not */
++		if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
++			nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
++		DRM_MEMORYBARRIER();
++#endif
+ 
+-	/* map the mmio regs */
+-	mmio_start_offs = pci_resource_start(dev->pdev, 0);
+-	dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
+-	if (!dev_priv->mmio) {
+-		NV_ERROR(dev, "Unable to initialize the mmio mapping. "
+-			 "Please report your setup to " DRIVER_EMAIL "\n");
++		/* determine chipset and derive architecture from it */
++		reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
++		if ((reg0 & 0x0f000000) > 0) {
++			dev_priv->chipset = (reg0 & 0xff00000) >> 20;
++			switch (dev_priv->chipset & 0xf0) {
++			case 0x10:
++			case 0x20:
++			case 0x30:
++				dev_priv->card_type = dev_priv->chipset & 0xf0;
++				break;
++			case 0x40:
++			case 0x60:
++				dev_priv->card_type = NV_40;
++				break;
++			case 0x50:
++			case 0x80:
++			case 0x90:
++			case 0xa0:
++				dev_priv->card_type = NV_50;
++				break;
++			case 0xc0:
++				dev_priv->card_type = NV_C0;
++				break;
++			case 0xd0:
++				dev_priv->card_type = NV_D0;
++				break;
++			case 0xe0:
++				dev_priv->card_type = NV_E0;
++				break;
++			default:
++				break;
++			}
++		} else
++		if ((reg0 & 0xff00fff0) == 0x20004000) {
++			if (reg0 & 0x00f00000)
++				dev_priv->chipset = 0x05;
++			else
++				dev_priv->chipset = 0x04;
++			dev_priv->card_type = NV_04;
++		}
++
++		iounmap(dev_priv->mmio);
++	}
++
++	if (!dev_priv->card_type) {
++		NV_ERROR(dev, "unsupported chipset 0x%08x\n", reg0);
+ 		ret = -EINVAL;
+ 		goto err_priv;
+ 	}
+-	NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
+-					(unsigned long long)mmio_start_offs);
+ 
+-#ifdef __BIG_ENDIAN
+-	/* Put the card in BE mode if it's not */
+-	if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
+-		nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
++	NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
++		     dev_priv->card_type, reg0);
+ 
+-	DRM_MEMORYBARRIER();
+-#endif
++	/* map the mmio regs, limiting the amount to preserve vmap space */
++	offset = pci_resource_start(dev->pdev, 0);
++	length = pci_resource_len(dev->pdev, 0);
++	if (dev_priv->card_type < NV_E0)
++		length = min(length, (unsigned long long)0x00800000);
+ 
+-	/* Time to determine the card architecture */
+-	reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
+-
+-	/* We're dealing with >=NV10 */
+-	if ((reg0 & 0x0f000000) > 0) {
+-		/* Bit 27-20 contain the architecture in hex */
+-		dev_priv->chipset = (reg0 & 0xff00000) >> 20;
+-	/* NV04 or NV05 */
+-	} else if ((reg0 & 0xff00fff0) == 0x20004000) {
+-		if (reg0 & 0x00f00000)
+-			dev_priv->chipset = 0x05;
+-		else
+-			dev_priv->chipset = 0x04;
+-	} else
+-		dev_priv->chipset = 0xff;
+-
+-	switch (dev_priv->chipset & 0xf0) {
+-	case 0x00:
+-	case 0x10:
+-	case 0x20:
+-	case 0x30:
+-		dev_priv->card_type = dev_priv->chipset & 0xf0;
+-		break;
+-	case 0x40:
+-	case 0x60:
+-		dev_priv->card_type = NV_40;
+-		break;
+-	case 0x50:
+-	case 0x80:
+-	case 0x90:
+-	case 0xa0:
+-		dev_priv->card_type = NV_50;
+-		break;
+-	case 0xc0:
+-		dev_priv->card_type = NV_C0;
+-		break;
+-	case 0xd0:
+-		dev_priv->card_type = NV_D0;
+-		break;
+-	default:
+-		NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
++	dev_priv->mmio = ioremap(offset, length);
++	if (!dev_priv->mmio) {
++		NV_ERROR(dev, "Unable to initialize the mmio mapping. "
++			 "Please report your setup to " DRIVER_EMAIL "\n");
+ 		ret = -EINVAL;
+-		goto err_mmio;
++		goto err_priv;
+ 	}
+-
+-	NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
+-		dev_priv->card_type, reg0);
++	NV_DEBUG(dev, "regs mapped ok at 0x%llx\n", offset);
+ 
+ 	/* determine frequency of timing crystal */
+ 	strap = nv_rd32(dev, 0x101000);
+@@ -1112,13 +1220,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
+ 	dev_priv->noaccel = !!nouveau_noaccel;
+ 	if (nouveau_noaccel == -1) {
+ 		switch (dev_priv->chipset) {
+-#if 0
+-		case 0xXX: /* known broken */
++		case 0xd9: /* known broken */
+ 			NV_INFO(dev, "acceleration disabled by default, pass "
+ 				     "noaccel=0 to force enable\n");
+ 			dev_priv->noaccel = true;
+ 			break;
+-#endif
+ 		default:
+ 			dev_priv->noaccel = false;
+ 			break;
+@@ -1146,7 +1252,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
+ 		}
+ 	} else {
+ 		dev_priv->ramin_size = 1 * 1024 * 1024;
+-		dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
++		dev_priv->ramin = ioremap(offset + NV_RAMIN,
+ 					  dev_priv->ramin_size);
+ 		if (!dev_priv->ramin) {
+ 			NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
+@@ -1240,7 +1346,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
+ 		getparam->value = 1;
+ 		break;
+ 	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
+-		getparam->value = dev_priv->card_type < NV_D0;
++		getparam->value = 1;
+ 		break;
+ 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
+ 		/* NV40 and NV50 versions are quite different, but register
+diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
+index 5a46446..0f5a301 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
++++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
+@@ -55,6 +55,10 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
+ 	temps->down_clock = 100;
+ 	temps->fan_boost = 90;
+ 
++	/* Set the default range for the pwm fan */
++	pm->fan.min_duty = 30;
++	pm->fan.max_duty = 100;
++
+ 	/* Set the known default values to setup the temperature sensor */
+ 	if (dev_priv->card_type >= NV_40) {
+ 		switch (dev_priv->chipset) {
+@@ -156,11 +160,26 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
+ 		case 0x13:
+ 			sensor->slope_div = value;
+ 			break;
++		case 0x22:
++			pm->fan.min_duty = value & 0xff;
++			pm->fan.max_duty = (value & 0xff00) >> 8;
++			break;
++		case 0x26:
++			pm->fan.pwm_freq = value;
++			break;
+ 		}
+ 		temp += recordlen;
+ 	}
+ 
+ 	nouveau_temp_safety_checks(dev);
++
++	/* check the fan min/max settings */
++	if (pm->fan.min_duty < 10)
++		pm->fan.min_duty = 10;
++	if (pm->fan.max_duty > 100)
++		pm->fan.max_duty = 100;
++	if (pm->fan.max_duty < pm->fan.min_duty)
++		pm->fan.max_duty = pm->fan.min_duty;
+ }
+ 
+ static int
+@@ -267,8 +286,6 @@ probe_monitoring_device(struct nouveau_i2c_chan *i2c,
+ static void
+ nouveau_temp_probe_i2c(struct drm_device *dev)
+ {
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ 	struct i2c_board_info info[] = {
+ 		{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
+ 		{ I2C_BOARD_INFO("w83781d", 0x2d) },
+@@ -277,11 +294,9 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
+ 		{ I2C_BOARD_INFO("lm99", 0x4c) },
+ 		{ }
+ 	};
+-	int idx = (dcb->version >= 0x40 ?
+-		   dcb->i2c_default_indices & 0xf : 2);
+ 
+ 	nouveau_i2c_identify(dev, "monitoring device", info,
+-			     probe_monitoring_device, idx);
++			     probe_monitoring_device, NV_I2C_DEFAULT(0));
+ }
+ 
+ void
+@@ -297,9 +312,9 @@ nouveau_temp_init(struct drm_device *dev)
+ 			return;
+ 
+ 		if (P.version == 1)
+-			temp = ROMPTR(bios, P.data[12]);
++			temp = ROMPTR(dev, P.data[12]);
+ 		else if (P.version == 2)
+-			temp = ROMPTR(bios, P.data[16]);
++			temp = ROMPTR(dev, P.data[16]);
+ 		else
+ 			NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
+index ef0832b..2bf6c03 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
+@@ -78,9 +78,10 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
+ 
+ void
+ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
+-		  struct nouveau_mem *mem, dma_addr_t *list)
++		  struct nouveau_mem *mem)
+ {
+ 	struct nouveau_vm *vm = vma->vm;
++	dma_addr_t *list = mem->pages;
+ 	int big = vma->node->type != vm->spg_shift;
+ 	u32 offset = vma->node->offset + (delta >> 12);
+ 	u32 bits = vma->node->type - 12;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
+index 6ce995f..4fb6e72 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
++++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
+@@ -89,7 +89,7 @@ void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
+ void nouveau_vm_unmap(struct nouveau_vma *);
+ void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
+ void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
+-		       struct nouveau_mem *, dma_addr_t *);
++		       struct nouveau_mem *);
+ 
+ /* nv50_vm.c */
+ void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
+index 86d03e1..b010cb9 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
++++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
+@@ -26,6 +26,7 @@
+ 
+ #include "nouveau_drv.h"
+ #include "nouveau_pm.h"
++#include "nouveau_gpio.h"
+ 
+ static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
+ static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
+@@ -34,7 +35,6 @@ int
+ nouveau_voltage_gpio_get(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+ 	struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+ 	u8 vid = 0;
+ 	int i;
+@@ -43,7 +43,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
+ 		if (!(volt->vid_mask & (1 << i)))
+ 			continue;
+ 
+-		vid |= gpio->get(dev, vidtag[i]) << i;
++		vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i;
+ 	}
+ 
+ 	return nouveau_volt_lvl_lookup(dev, vid);
+@@ -53,7 +53,6 @@ int
+ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+ 	struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
+ 	int vid, i;
+ 
+@@ -65,7 +64,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
+ 		if (!(volt->vid_mask & (1 << i)))
+ 			continue;
+ 
+-		gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
++		nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i)));
+ 	}
+ 
+ 	return 0;
+@@ -117,10 +116,10 @@ nouveau_volt_init(struct drm_device *dev)
+ 			return;
+ 
+ 		if (P.version == 1)
+-			volt = ROMPTR(bios, P.data[16]);
++			volt = ROMPTR(dev, P.data[16]);
+ 		else
+ 		if (P.version == 2)
+-			volt = ROMPTR(bios, P.data[12]);
++			volt = ROMPTR(dev, P.data[12]);
+ 		else {
+ 			NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
+ 		}
+@@ -130,7 +129,7 @@ nouveau_volt_init(struct drm_device *dev)
+ 			return;
+ 		}
+ 
+-		volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
++		volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
+ 	}
+ 
+ 	if (!volt) {
+@@ -194,7 +193,7 @@ nouveau_volt_init(struct drm_device *dev)
+ 			return;
+ 		}
+ 
+-		if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
++		if (!nouveau_gpio_func_valid(dev, vidtag[i])) {
+ 			NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
+ 			return;
+ 		}
+diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
+index 5e45398..728d075 100644
+--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
++++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
+@@ -364,7 +364,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
+ 	regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
+ 	regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
+ 	/* framebuffer can be larger than crtc scanout area. */
+-	regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
++	regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8;
+ 	regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
+ 	regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
+ 	regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
+@@ -377,9 +377,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
+ 
+ 	/* framebuffer can be larger than crtc scanout area. */
+ 	regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
+-		XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
++		XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ 	regp->CRTC[NV_CIO_CRE_42] =
+-		XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
++		XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+ 	regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
+ 					    MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
+ 	regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
+@@ -835,18 +835,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
+ 	NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
+ 		      regp->ramdac_gen_ctrl);
+ 
+-	regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
++	regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
+ 	regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
+-		XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
++		XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ 	regp->CRTC[NV_CIO_CRE_42] =
+-		XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
++		XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+ 	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
+ 	crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
+ 	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
+ 
+ 	/* Update the framebuffer location. */
+ 	regp->fb_start = nv_crtc->fb.offset & ~3;
+-	regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
++	regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
+ 	nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
+ 
+ 	/* Update the arbitration parameters. */
+diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
+index 2d6bfd0..f180dcf 100644
+--- a/drivers/gpu/drm/nouveau/nv04_dac.c
++++ b/drivers/gpu/drm/nouveau/nv04_dac.c
+@@ -32,6 +32,7 @@
+ #include "nouveau_connector.h"
+ #include "nouveau_crtc.h"
+ #include "nouveau_hw.h"
++#include "nouveau_gpio.h"
+ #include "nvreg.h"
+ 
+ int nv04_dac_output_offset(struct drm_encoder *encoder)
+@@ -220,7 +221,6 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
+ {
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+ 	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ 	uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
+ 	uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
+@@ -252,11 +252,11 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
+ 		nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
+ 	}
+ 
+-	saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
+-	saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
++	saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
++	saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
+ 
+-	gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+-	gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
++	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
++	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+ 
+ 	msleep(4);
+ 
+@@ -306,8 +306,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
+ 		nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
+ 	nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
+ 
+-	gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+-	gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
++	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
++	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+ 
+ 	return sample;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
+index 752440c..71b6235 100644
+--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
++++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
+@@ -289,6 +289,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
+ 	struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ 	struct drm_display_mode *output_mode = &nv_encoder->mode;
++	struct drm_connector *connector = &nv_connector->base;
+ 	uint32_t mode_ratio, panel_ratio;
+ 
+ 	NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
+@@ -340,10 +341,15 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
+ 	    output_mode->clock > 165000)
+ 		regp->fp_control |= (2 << 24);
+ 	if (nv_encoder->dcb->type == OUTPUT_LVDS) {
+-		bool duallink, dummy;
++		bool duallink = false, dummy;
++		if (nv_connector->edid &&
++		    nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
++			duallink = (((u8 *)nv_connector->edid)[121] == 2);
++		} else {
++			nouveau_bios_parse_lvds_table(dev, output_mode->clock,
++						      &duallink, &dummy);
++		}
+ 
+-		nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+-					      &duallink, &dummy);
+ 		if (duallink)
+ 			regp->fp_control |= (8 << 28);
+ 	} else
+@@ -407,7 +413,9 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
+ 	}
+ 
+ 	/* Output property. */
+-	if (nv_connector->use_dithering) {
++	if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
++	    (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
++	     encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
+ 		if (dev_priv->chipset == 0x11)
+ 			regp->dither = savep->dither | 0x00010000;
+ 		else {
+@@ -496,7 +504,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
+ 
+ static inline bool is_powersaving_dpms(int mode)
+ {
+-	return (mode != DRM_MODE_DPMS_ON);
++	return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
+ }
+ 
+ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
+diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
+index 6bd8518..7047d37 100644
+--- a/drivers/gpu/drm/nouveau/nv04_display.c
++++ b/drivers/gpu/drm/nouveau/nv04_display.c
+@@ -243,6 +243,11 @@ nv04_display_init(struct drm_device *dev)
+ 	return 0;
+ }
+ 
++void
++nv04_display_fini(struct drm_device *dev)
++{
++}
++
+ static void
+ nv04_vblank_crtc0_isr(struct drm_device *dev)
+ {
+diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
+index 638cf60..d5eedd6 100644
+--- a/drivers/gpu/drm/nouveau/nv04_fb.c
++++ b/drivers/gpu/drm/nouveau/nv04_fb.c
+@@ -4,6 +4,40 @@
+ #include "nouveau_drm.h"
+ 
+ int
++nv04_fb_vram_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
++
++	if (boot0 & 0x00000100) {
++		dev_priv->vram_size  = ((boot0 >> 12) & 0xf) * 2 + 2;
++		dev_priv->vram_size *= 1024 * 1024;
++	} else {
++		switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
++		case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
++			dev_priv->vram_size = 32 * 1024 * 1024;
++			break;
++		case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
++			dev_priv->vram_size = 16 * 1024 * 1024;
++			break;
++		case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
++			dev_priv->vram_size = 8 * 1024 * 1024;
++			break;
++		case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
++			dev_priv->vram_size = 4 * 1024 * 1024;
++			break;
++		}
++	}
++
++	if ((boot0 & 0x00000038) <= 0x10)
++		dev_priv->vram_type = NV_MEM_TYPE_SGRAM;
++	else
++		dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
++
++	return 0;
++}
++
++int
+ nv04_fb_init(struct drm_device *dev)
+ {
+ 	/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
+diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
+index 9ae92a8..6e75899 100644
+--- a/drivers/gpu/drm/nouveau/nv04_pm.c
++++ b/drivers/gpu/drm/nouveau/nv04_pm.c
+@@ -27,68 +27,111 @@
+ #include "nouveau_hw.h"
+ #include "nouveau_pm.h"
+ 
+-struct nv04_pm_state {
++int
++nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
++{
++	int ret;
++
++	ret = nouveau_hw_get_clock(dev, PLL_CORE);
++	if (ret < 0)
++		return ret;
++	perflvl->core = ret;
++
++	ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
++	if (ret < 0)
++		return ret;
++	perflvl->memory = ret;
++
++	return 0;
++}
++
++struct nv04_pm_clock {
+ 	struct pll_lims pll;
+ 	struct nouveau_pll_vals calc;
+ };
+ 
+-int
+-nv04_pm_clock_get(struct drm_device *dev, u32 id)
++struct nv04_pm_state {
++	struct nv04_pm_clock core;
++	struct nv04_pm_clock memory;
++};
++
++static int
++calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
+ {
+-	return nouveau_hw_get_clock(dev, id);
++	int ret;
++
++	ret = get_pll_limits(dev, id, &clk->pll);
++	if (ret)
++		return ret;
++
++	ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc);
++	if (!ret)
++		return -EINVAL;
++
++	return 0;
+ }
+ 
+ void *
+-nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+-		  u32 id, int khz)
++nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+ {
+-	struct nv04_pm_state *state;
++	struct nv04_pm_state *info;
+ 	int ret;
+ 
+-	state = kzalloc(sizeof(*state), GFP_KERNEL);
+-	if (!state)
++	info = kzalloc(sizeof(*info), GFP_KERNEL);
++	if (!info)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	ret = get_pll_limits(dev, id, &state->pll);
+-	if (ret) {
+-		kfree(state);
+-		return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+-	}
++	ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
++	if (ret)
++		goto error;
+ 
+-	ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
+-	if (!ret) {
+-		kfree(state);
+-		return ERR_PTR(-EINVAL);
++	if (perflvl->memory) {
++		ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
++		if (ret)
++			goto error;
+ 	}
+ 
+-	return state;
++	return info;
++error:
++	kfree(info);
++	return ERR_PTR(ret);
+ }
+ 
+-void
+-nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
++static void
++prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+-	struct nv04_pm_state *state = pre_state;
+-	u32 reg = state->pll.reg;
++	u32 reg = clk->pll.reg;
+ 
+ 	/* thank the insane nouveau_hw_setpll() interface for this */
+ 	if (dev_priv->card_type >= NV_40)
+ 		reg += 4;
+ 
+-	nouveau_hw_setpll(dev, reg, &state->calc);
++	nouveau_hw_setpll(dev, reg, &clk->calc);
++}
++
++int
++nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
++	struct nv04_pm_state *state = pre_state;
++
++	prog_pll(dev, &state->core);
+ 
+-	if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
+-		if (dev_priv->card_type == NV_20)
+-			nv_mask(dev, 0x1002c4, 0, 1 << 20);
++	if (state->memory.pll.reg) {
++		prog_pll(dev, &state->memory);
++		if (dev_priv->card_type < NV_30) {
++			if (dev_priv->card_type == NV_20)
++				nv_mask(dev, 0x1002c4, 0, 1 << 20);
+ 
+-		/* Reset the DLLs */
+-		nv_mask(dev, 0x1002c0, 0, 1 << 8);
++			/* Reset the DLLs */
++			nv_mask(dev, 0x1002c0, 0, 1 << 8);
++		}
+ 	}
+ 
+-	if (reg == NV_PRAMDAC_NVPLL_COEFF)
+-		ptimer->init(dev);
++	ptimer->init(dev);
+ 
+ 	kfree(state);
++	return 0;
+ }
+-
+diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
+index 263301b..55c9452 100644
+--- a/drivers/gpu/drm/nouveau/nv04_timer.c
++++ b/drivers/gpu/drm/nouveau/nv04_timer.c
+@@ -2,6 +2,7 @@
+ #include "drm.h"
+ #include "nouveau_drv.h"
+ #include "nouveau_drm.h"
++#include "nouveau_hw.h"
+ 
+ int
+ nv04_timer_init(struct drm_device *dev)
+@@ -17,7 +18,7 @@ nv04_timer_init(struct drm_device *dev)
+ 
+ 	/* determine base clock for timer source */
+ 	if (dev_priv->chipset < 0x40) {
+-		n = dev_priv->engine.pm.clock_get(dev, PLL_CORE);
++		n = nouveau_hw_get_clock(dev, PLL_CORE);
+ 	} else
+ 	if (dev_priv->chipset == 0x40) {
+ 		/*XXX: figure this out */
+diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
+index f78181a..420b1608 100644
+--- a/drivers/gpu/drm/nouveau/nv10_fb.c
++++ b/drivers/gpu/drm/nouveau/nv10_fb.c
+@@ -3,81 +3,16 @@
+ #include "nouveau_drv.h"
+ #include "nouveau_drm.h"
+ 
+-static struct drm_mm_node *
+-nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+-	struct drm_mm_node *mem;
+-	int ret;
+-
+-	ret = drm_mm_pre_get(&pfb->tag_heap);
+-	if (ret)
+-		return NULL;
+-
+-	spin_lock(&dev_priv->tile.lock);
+-	mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
+-	if (mem)
+-		mem = drm_mm_get_block_atomic(mem, size, 0);
+-	spin_unlock(&dev_priv->tile.lock);
+-
+-	return mem;
+-}
+-
+-static void
+-nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-
+-	spin_lock(&dev_priv->tile.lock);
+-	drm_mm_put_block(mem);
+-	spin_unlock(&dev_priv->tile.lock);
+-}
+-
+ void
+ nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ 			 uint32_t size, uint32_t pitch, uint32_t flags)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+-	int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
+ 
+-	tile->addr = addr;
++	tile->addr  = 0x80000000 | addr;
+ 	tile->limit = max(1u, addr + size) - 1;
+ 	tile->pitch = pitch;
+-
+-	if (dev_priv->card_type == NV_20) {
+-		if (flags & NOUVEAU_GEM_TILE_ZETA) {
+-			/*
+-			 * Allocate some of the on-die tag memory,
+-			 * used to store Z compression meta-data (most
+-			 * likely just a bitmap determining if a given
+-			 * tile is compressed or not).
+-			 */
+-			tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
+-
+-			if (tile->tag_mem) {
+-				/* Enable Z compression */
+-				if (dev_priv->chipset >= 0x25)
+-					tile->zcomp = tile->tag_mem->start |
+-						(bpp == 16 ?
+-						 NV25_PFB_ZCOMP_MODE_16 :
+-						 NV25_PFB_ZCOMP_MODE_32);
+-				else
+-					tile->zcomp = tile->tag_mem->start |
+-						NV20_PFB_ZCOMP_EN |
+-						(bpp == 16 ? 0 :
+-						 NV20_PFB_ZCOMP_MODE_32);
+-			}
+-
+-			tile->addr |= 3;
+-		} else {
+-			tile->addr |= 1;
+-		}
+-
+-	} else {
+-		tile->addr |= 1 << 31;
+-	}
+ }
+ 
+ void
+@@ -86,11 +21,6 @@ nv10_fb_free_tile_region(struct drm_device *dev, int i)
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ 
+-	if (tile->tag_mem) {
+-		nv20_fb_free_tag(dev, tile->tag_mem);
+-		tile->tag_mem = NULL;
+-	}
+-
+ 	tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
+ }
+ 
+@@ -103,9 +33,48 @@ nv10_fb_set_tile_region(struct drm_device *dev, int i)
+ 	nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ 	nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ 	nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
++}
++
++int
++nv1a_fb_vram_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct pci_dev *bridge;
++	uint32_t mem, mib;
++
++	bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
++	if (!bridge) {
++		NV_ERROR(dev, "no bridge device\n");
++		return 0;
++	}
++
++	if (dev_priv->chipset == 0x1a) {
++		pci_read_config_dword(bridge, 0x7c, &mem);
++		mib = ((mem >> 6) & 31) + 1;
++	} else {
++		pci_read_config_dword(bridge, 0x84, &mem);
++		mib = ((mem >> 4) & 127) + 1;
++	}
++
++	dev_priv->vram_size = mib * 1024 * 1024;
++	return 0;
++}
++
++int
++nv10_fb_vram_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 fifo_data = nv_rd32(dev, NV04_PFB_FIFO_DATA);
++	u32 cfg0 = nv_rd32(dev, 0x100200);
+ 
+-	if (dev_priv->card_type == NV_20)
+-		nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
++	dev_priv->vram_size = fifo_data & NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
++
++	if (cfg0 & 0x00000001)
++		dev_priv->vram_type = NV_MEM_TYPE_DDR1;
++	else
++		dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
++
++	return 0;
+ }
+ 
+ int
+@@ -115,14 +84,8 @@ nv10_fb_init(struct drm_device *dev)
+ 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ 	int i;
+ 
+-	pfb->num_tiles = NV10_PFB_TILE__SIZE;
+-
+-	if (dev_priv->card_type == NV_20)
+-		drm_mm_init(&pfb->tag_heap, 0,
+-			    (dev_priv->chipset >= 0x25 ?
+-			     64 * 1024 : 32 * 1024));
+-
+ 	/* Turn all the tiling regions off. */
++	pfb->num_tiles = NV10_PFB_TILE__SIZE;
+ 	for (i = 0; i < pfb->num_tiles; i++)
+ 		pfb->set_tile_region(dev, i);
+ 
+@@ -138,7 +101,4 @@ nv10_fb_takedown(struct drm_device *dev)
+ 
+ 	for (i = 0; i < pfb->num_tiles; i++)
+ 		pfb->free_tile_region(dev, i);
+-
+-	if (dev_priv->card_type == NV_20)
+-		drm_mm_takedown(&pfb->tag_heap);
+ }
+diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
+index 007fc29..9d79180 100644
+--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
++++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
+@@ -27,66 +27,97 @@
+ #include "drmP.h"
+ #include "nouveau_drv.h"
+ #include "nouveau_hw.h"
++#include "nouveau_gpio.h"
+ 
+-static bool
+-get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
+-		  uint32_t *mask)
++int
++nv10_gpio_sense(struct drm_device *dev, int line)
+ {
+-	if (ent->line < 2) {
+-		*reg = NV_PCRTC_GPIO;
+-		*shift = ent->line * 16;
+-		*mask = 0x11;
+-
+-	} else if (ent->line < 10) {
+-		*reg = NV_PCRTC_GPIO_EXT;
+-		*shift = (ent->line - 2) * 4;
+-		*mask = 0x3;
++	if (line < 2) {
++		line = line * 16;
++		line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
++		return !!(line & 0x0100);
++	} else
++	if (line < 10) {
++		line = (line - 2) * 4;
++		line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
++		return !!(line & 0x04);
++	} else
++	if (line < 14) {
++		line = (line - 10) * 4;
++		line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
++		return !!(line & 0x04);
++	}
+ 
+-	} else if (ent->line < 14) {
+-		*reg = NV_PCRTC_850;
+-		*shift = (ent->line - 10) * 4;
+-		*mask = 0x3;
++	return -EINVAL;
++}
+ 
++int
++nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
++{
++	u32 reg, mask, data;
++
++	if (line < 2) {
++		line = line * 16;
++		reg  = NV_PCRTC_GPIO;
++		mask = 0x00000011;
++		data = (dir << 4) | out;
++	} else
++	if (line < 10) {
++		line = (line - 2) * 4;
++		reg  = NV_PCRTC_GPIO_EXT;
++		mask = 0x00000003;
++		data = (dir << 1) | out;
++	} else
++	if (line < 14) {
++		line = (line - 10) * 4;
++		reg  = NV_PCRTC_850;
++		mask = 0x00000003;
++		data = (dir << 1) | out;
+ 	} else {
+-		return false;
++		return -EINVAL;
+ 	}
+ 
+-	return true;
++	mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
++	NVWriteCRTC(dev, 0, reg, mask | (data << line));
++	return 0;
+ }
+ 
+-int
+-nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
++void
++nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
+ {
+-	struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
+-	uint32_t reg, shift, mask, value;
++	u32 mask = 0x00010001 << line;
+ 
+-	if (!ent)
+-		return -ENODEV;
++	nv_wr32(dev, 0x001104, mask);
++	nv_mask(dev, 0x001144, mask, on ? mask : 0);
++}
+ 
+-	if (!get_gpio_location(ent, &reg, &shift, &mask))
+-		return -ENODEV;
++static void
++nv10_gpio_isr(struct drm_device *dev)
++{
++	u32 intr = nv_rd32(dev, 0x1104);
++	u32 hi = (intr & 0x0000ffff) >> 0;
++	u32 lo = (intr & 0xffff0000) >> 16;
+ 
+-	value = NVReadCRTC(dev, 0, reg) >> shift;
++	nouveau_gpio_isr(dev, 0, hi | lo);
+ 
+-	return (ent->invert ? 1 : 0) ^ (value & 1);
++	nv_wr32(dev, 0x001104, intr);
+ }
+ 
+ int
+-nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
++nv10_gpio_init(struct drm_device *dev)
+ {
+-	struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
+-	uint32_t reg, shift, mask, value;
+-
+-	if (!ent)
+-		return -ENODEV;
+-
+-	if (!get_gpio_location(ent, &reg, &shift, &mask))
+-		return -ENODEV;
+-
+-	value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
+-	mask = ~(mask << shift);
+-
+-	NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
+-
++	nv_wr32(dev, 0x001140, 0x00000000);
++	nv_wr32(dev, 0x001100, 0xffffffff);
++	nv_wr32(dev, 0x001144, 0x00000000);
++	nv_wr32(dev, 0x001104, 0xffffffff);
++	nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
+ 	return 0;
+ }
++
++void
++nv10_gpio_fini(struct drm_device *dev)
++{
++	nv_wr32(dev, 0x001140, 0x00000000);
++	nv_wr32(dev, 0x001144, 0x00000000);
++	nouveau_irq_unregister(dev, 28);
++}
+diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
+index 3900ceb..696d7e7 100644
+--- a/drivers/gpu/drm/nouveau/nv17_tv.c
++++ b/drivers/gpu/drm/nouveau/nv17_tv.c
+@@ -30,6 +30,7 @@
+ #include "nouveau_encoder.h"
+ #include "nouveau_connector.h"
+ #include "nouveau_crtc.h"
++#include "nouveau_gpio.h"
+ #include "nouveau_hw.h"
+ #include "nv17_tv.h"
+ 
+@@ -37,7 +38,6 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
+ {
+ 	struct drm_device *dev = encoder->dev;
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+ 	uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+ 	uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
+ 		fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
+@@ -53,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
+ 	head = (dacclk & 0x100) >> 8;
+ 
+ 	/* Save the previous state. */
+-	gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
+-	gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
++	gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
++	gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
+ 	fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
+ 	fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
+ 	fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
+@@ -65,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
+ 	ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
+ 
+ 	/* Prepare the DAC for load detection.  */
+-	gpio->set(dev, DCB_GPIO_TVDAC1, true);
+-	gpio->set(dev, DCB_GPIO_TVDAC0, true);
++	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true);
++	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true);
+ 
+ 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
+ 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
+@@ -111,8 +111,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
+ 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
+ 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
+ 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
+-	gpio->set(dev, DCB_GPIO_TVDAC1, gpio1);
+-	gpio->set(dev, DCB_GPIO_TVDAC0, gpio0);
++	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1);
++	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0);
+ 
+ 	return sample;
+ }
+@@ -357,8 +357,6 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
+ static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
+ {
+ 	struct drm_device *dev = encoder->dev;
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
+ 	struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
+ 	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ 
+@@ -383,8 +381,8 @@ static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
+ 
+ 	nv_load_ptv(dev, regs, 200);
+ 
+-	gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+-	gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
++	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
++	nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+ 
+ 	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
+ }
+diff --git a/drivers/gpu/drm/nouveau/nv20_fb.c b/drivers/gpu/drm/nouveau/nv20_fb.c
+new file mode 100644
+index 0000000..19bd640
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv20_fb.c
+@@ -0,0 +1,148 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++static struct drm_mm_node *
++nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
++	struct drm_mm_node *mem;
++	int ret;
++
++	ret = drm_mm_pre_get(&pfb->tag_heap);
++	if (ret)
++		return NULL;
++
++	spin_lock(&dev_priv->tile.lock);
++	mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
++	if (mem)
++		mem = drm_mm_get_block_atomic(mem, size, 0);
++	spin_unlock(&dev_priv->tile.lock);
++
++	return mem;
++}
++
++static void
++nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node **pmem)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_mm_node *mem = *pmem;
++	if (mem) {
++		spin_lock(&dev_priv->tile.lock);
++		drm_mm_put_block(mem);
++		spin_unlock(&dev_priv->tile.lock);
++		*pmem = NULL;
++	}
++}
++
++void
++nv20_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
++			 uint32_t size, uint32_t pitch, uint32_t flags)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
++	int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
++
++	tile->addr  = 0x00000001 | addr;
++	tile->limit = max(1u, addr + size) - 1;
++	tile->pitch = pitch;
++
++	/* Allocate some of the on-die tag memory, used to store Z
++	 * compression meta-data (most likely just a bitmap determining
++	 * if a given tile is compressed or not).
++	 */
++	if (flags & NOUVEAU_GEM_TILE_ZETA) {
++		tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
++		if (tile->tag_mem) {
++			/* Enable Z compression */
++			tile->zcomp = tile->tag_mem->start;
++			if (dev_priv->chipset >= 0x25) {
++				if (bpp == 16)
++					tile->zcomp |= NV25_PFB_ZCOMP_MODE_16;
++				else
++					tile->zcomp |= NV25_PFB_ZCOMP_MODE_32;
++			} else {
++				tile->zcomp |= NV20_PFB_ZCOMP_EN;
++				if (bpp != 16)
++					tile->zcomp |= NV20_PFB_ZCOMP_MODE_32;
++			}
++		}
++
++		tile->addr |= 2;
++	}
++}
++
++void
++nv20_fb_free_tile_region(struct drm_device *dev, int i)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
++
++	tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
++	nv20_fb_free_tag(dev, &tile->tag_mem);
++}
++
++void
++nv20_fb_set_tile_region(struct drm_device *dev, int i)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
++
++	nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
++	nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
++	nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
++	nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
++}
++
++int
++nv20_fb_vram_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 mem_size = nv_rd32(dev, 0x10020c);
++	u32 pbus1218 = nv_rd32(dev, 0x001218);
++
++	dev_priv->vram_size = mem_size & 0xff000000;
++	switch (pbus1218 & 0x00000300) {
++	case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
++	case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
++	case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
++	case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_GDDR2; break;
++	}
++
++	return 0;
++}
++
++int
++nv20_fb_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
++	int i;
++
++	if (dev_priv->chipset >= 0x25)
++		drm_mm_init(&pfb->tag_heap, 0, 64 * 1024);
++	else
++		drm_mm_init(&pfb->tag_heap, 0, 32 * 1024);
++
++	/* Turn all the tiling regions off. */
++	pfb->num_tiles = NV10_PFB_TILE__SIZE;
++	for (i = 0; i < pfb->num_tiles; i++)
++		pfb->set_tile_region(dev, i);
++
++	return 0;
++}
++
++void
++nv20_fb_takedown(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
++	int i;
++
++	for (i = 0; i < pfb->num_tiles; i++)
++		pfb->free_tile_region(dev, i);
++
++	drm_mm_takedown(&pfb->tag_heap);
++}
+diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
+index f0ac2a7..7fbcb33 100644
+--- a/drivers/gpu/drm/nouveau/nv40_fb.c
++++ b/drivers/gpu/drm/nouveau/nv40_fb.c
+@@ -72,6 +72,51 @@ nv44_fb_init_gart(struct drm_device *dev)
+ }
+ 
+ int
++nv40_fb_vram_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	/* 0x001218 is actually present on a few other NV4X I looked at,
++	 * and even contains sane values matching 0x100474.  From looking
++	 * at various vbios images however, this isn't the case everywhere.
++	 * So, I chose to use the same regs I've seen NVIDIA reading around
++	 * the memory detection, hopefully that'll get us the right numbers
++	 */
++	if (dev_priv->chipset == 0x40) {
++		u32 pbus1218 = nv_rd32(dev, 0x001218);
++		switch (pbus1218 & 0x00000300) {
++		case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
++		case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
++		case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
++		case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
++		}
++	} else
++	if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
++		u32 pfb914 = nv_rd32(dev, 0x100914);
++		switch (pfb914 & 0x00000003) {
++		case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
++		case 0x00000001: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
++		case 0x00000002: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
++		case 0x00000003: break;
++		}
++	} else
++	if (dev_priv->chipset != 0x4e) {
++		u32 pfb474 = nv_rd32(dev, 0x100474);
++		if (pfb474 & 0x00000004)
++			dev_priv->vram_type = NV_MEM_TYPE_GDDR3;
++		if (pfb474 & 0x00000002)
++			dev_priv->vram_type = NV_MEM_TYPE_DDR2;
++		if (pfb474 & 0x00000001)
++			dev_priv->vram_type = NV_MEM_TYPE_DDR1;
++	} else {
++		dev_priv->vram_type = NV_MEM_TYPE_STOLEN;
++	}
++
++	dev_priv->vram_size = nv_rd32(dev, 0x10020c) & 0xff000000;
++	return 0;
++}
++
++int
+ nv40_fb_init(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
+index e676b0d..c761538 100644
+--- a/drivers/gpu/drm/nouveau/nv40_pm.c
++++ b/drivers/gpu/drm/nouveau/nv40_pm.c
+@@ -222,7 +222,7 @@ nv40_pm_gr_idle(void *data)
+ 	return true;
+ }
+ 
+-void
++int
+ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+@@ -231,7 +231,7 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
+ 	struct bit_entry M;
+ 	u32 crtc_mask = 0;
+ 	u8 sr1[2];
+-	int i;
++	int i, ret = -EAGAIN;
+ 
+ 	/* determine which CRTCs are active, fetch VGA_SR1 for each */
+ 	for (i = 0; i < 2; i++) {
+@@ -263,6 +263,8 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
+ 	if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
+ 		goto resume;
+ 
++	ret = 0;
++
+ 	/* set engine clocks */
+ 	nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
+ 	nv_wr32(dev, 0x004004, info->npll_coef);
+@@ -345,4 +347,48 @@ resume:
+ 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ 
+ 	kfree(info);
++	return ret;
++}
++
++int
++nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
++{
++	if (line == 2) {
++		u32 reg = nv_rd32(dev, 0x0010f0);
++		if (reg & 0x80000000) {
++			*duty = (reg & 0x7fff0000) >> 16;
++			*divs = (reg & 0x00007fff);
++			return 0;
++		}
++	} else
++	if (line == 9) {
++		u32 reg = nv_rd32(dev, 0x0015f4);
++		if (reg & 0x80000000) {
++			*divs = nv_rd32(dev, 0x0015f8);
++			*duty = (reg & 0x7fffffff);
++			return 0;
++		}
++	} else {
++		NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
++		return -ENODEV;
++	}
++
++	return -EINVAL;
++}
++
++int
++nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
++{
++	if (line == 2) {
++		nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
++	} else
++	if (line == 9) {
++		nv_wr32(dev, 0x0015f8, divs);
++		nv_wr32(dev, 0x0015f4, duty | 0x80000000);
++	} else {
++		NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
++		return -ENODEV;
++	}
++
++	return 0;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
+index 882080e..701b927 100644
+--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
++++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
+@@ -132,30 +132,74 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
+ }
+ 
+ static int
+-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
++nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
++{
++	struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
++	struct nouveau_connector *nv_connector;
++	struct drm_connector *connector;
++	int head = nv_crtc->index, ret;
++	u32 mode = 0x00;
++
++	nv_connector = nouveau_crtc_connector_get(nv_crtc);
++	connector = &nv_connector->base;
++	if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
++		if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
++			mode = DITHERING_MODE_DYNAMIC2X2;
++	} else {
++		mode = nv_connector->dithering_mode;
++	}
++
++	if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
++		if (connector->display_info.bpc >= 8)
++			mode |= DITHERING_DEPTH_8BPC;
++	} else {
++		mode |= nv_connector->dithering_depth;
++	}
++
++	ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
++	if (ret == 0) {
++		BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
++		OUT_RING  (evo, mode);
++		if (update) {
++			BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
++			OUT_RING  (evo, 0);
++			FIRE_RING (evo);
++		}
++	}
++
++	return ret;
++}
++
++static int
++nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
+ {
+ 	struct drm_device *dev = nv_crtc->base.dev;
+ 	struct nouveau_channel *evo = nv50_display(dev)->master;
+ 	int ret;
++	int adj;
++	u32 hue, vib;
+ 
+-	NV_DEBUG_KMS(dev, "\n");
++	NV_DEBUG_KMS(dev, "vibrance = %i, hue = %i\n",
++		     nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
+ 
+ 	ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
+ 	if (ret) {
+-		NV_ERROR(dev, "no space while setting dither\n");
++		NV_ERROR(dev, "no space while setting color vibrance\n");
+ 		return ret;
+ 	}
+ 
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
+-	if (on)
+-		OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
+-	else
+-		OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
++	adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
++	vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
++
++	hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
++
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
++	OUT_RING  (evo, (hue << 20) | (vib << 8));
+ 
+ 	if (update) {
+ 		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+-		OUT_RING(evo, 0);
+-		FIRE_RING(evo);
++		OUT_RING  (evo, 0);
++		FIRE_RING (evo);
+ 	}
+ 
+ 	return 0;
+@@ -180,80 +224,103 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
+ }
+ 
+ static int
+-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
++nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+ {
+-	struct nouveau_connector *nv_connector =
+-		nouveau_crtc_connector_get(nv_crtc);
+-	struct drm_device *dev = nv_crtc->base.dev;
++	struct nouveau_connector *nv_connector;
++	struct drm_crtc *crtc = &nv_crtc->base;
++	struct drm_device *dev = crtc->dev;
+ 	struct nouveau_channel *evo = nv50_display(dev)->master;
+-	struct drm_display_mode *native_mode = NULL;
+-	struct drm_display_mode *mode = &nv_crtc->base.mode;
+-	uint32_t outX, outY, horiz, vert;
+-	int ret;
++	struct drm_display_mode *umode = &crtc->mode;
++	struct drm_display_mode *omode;
++	int scaling_mode, ret;
++	u32 ctrl = 0, oX, oY;
+ 
+ 	NV_DEBUG_KMS(dev, "\n");
+ 
+-	switch (scaling_mode) {
+-	case DRM_MODE_SCALE_NONE:
+-		break;
+-	default:
+-		if (!nv_connector || !nv_connector->native_mode) {
+-			NV_ERROR(dev, "No native mode, forcing panel scaling\n");
+-			scaling_mode = DRM_MODE_SCALE_NONE;
++	nv_connector = nouveau_crtc_connector_get(nv_crtc);
++	if (!nv_connector || !nv_connector->native_mode) {
++		NV_ERROR(dev, "no native mode, forcing panel scaling\n");
++		scaling_mode = DRM_MODE_SCALE_NONE;
++	} else {
++		scaling_mode = nv_connector->scaling_mode;
++	}
++
++	/* start off at the resolution we programmed the crtc for, this
++	 * effectively handles NONE/FULL scaling
++	 */
++	if (scaling_mode != DRM_MODE_SCALE_NONE)
++		omode = nv_connector->native_mode;
++	else
++		omode = umode;
++
++	oX = omode->hdisplay;
++	oY = omode->vdisplay;
++	if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
++		oY *= 2;
++
++	/* add overscan compensation if necessary, will keep the aspect
++	 * ratio the same as the backend mode unless overridden by the
++	 * user setting both hborder and vborder properties.
++	 */
++	if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
++			     (nv_connector->underscan == UNDERSCAN_AUTO &&
++			      nv_connector->edid &&
++			      drm_detect_hdmi_monitor(nv_connector->edid)))) {
++		u32 bX = nv_connector->underscan_hborder;
++		u32 bY = nv_connector->underscan_vborder;
++		u32 aspect = (oY << 19) / oX;
++
++		if (bX) {
++			oX -= (bX * 2);
++			if (bY) oY -= (bY * 2);
++			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+ 		} else {
+-			native_mode = nv_connector->native_mode;
++			oX -= (oX >> 4) + 32;
++			if (bY) oY -= (bY * 2);
++			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+ 		}
+-		break;
+ 	}
+ 
++	/* handle CENTER/ASPECT scaling, taking into account the areas
++	 * removed already for overscan compensation
++	 */
+ 	switch (scaling_mode) {
++	case DRM_MODE_SCALE_CENTER:
++		oX = min((u32)umode->hdisplay, oX);
++		oY = min((u32)umode->vdisplay, oY);
++		/* fall-through */
+ 	case DRM_MODE_SCALE_ASPECT:
+-		horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
+-		vert = (native_mode->vdisplay << 19) / mode->vdisplay;
+-
+-		if (vert > horiz) {
+-			outX = (mode->hdisplay * horiz) >> 19;
+-			outY = (mode->vdisplay * horiz) >> 19;
++		if (oY < oX) {
++			u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
++			oX = ((oY * aspect) + (aspect / 2)) >> 19;
+ 		} else {
+-			outX = (mode->hdisplay * vert) >> 19;
+-			outY = (mode->vdisplay * vert) >> 19;
++			u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
++			oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ 		}
+ 		break;
+-	case DRM_MODE_SCALE_FULLSCREEN:
+-		outX = native_mode->hdisplay;
+-		outY = native_mode->vdisplay;
+-		break;
+-	case DRM_MODE_SCALE_CENTER:
+-	case DRM_MODE_SCALE_NONE:
+ 	default:
+-		outX = mode->hdisplay;
+-		outY = mode->vdisplay;
+ 		break;
+ 	}
+ 
+-	ret = RING_SPACE(evo, update ? 7 : 5);
++	if (umode->hdisplay != oX || umode->vdisplay != oY ||
++	    umode->flags & DRM_MODE_FLAG_INTERLACE ||
++	    umode->flags & DRM_MODE_FLAG_DBLSCAN)
++		ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
++
++	ret = RING_SPACE(evo, 5);
+ 	if (ret)
+ 		return ret;
+ 
+-	/* Got a better name for SCALER_ACTIVE? */
+-	/* One day i've got to really figure out why this is needed. */
+ 	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
+-	if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
+-	    (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+-	    mode->hdisplay != outX || mode->vdisplay != outY) {
+-		OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
+-	} else {
+-		OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
+-	}
+-
++	OUT_RING  (evo, ctrl);
+ 	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
+-	OUT_RING(evo, outY << 16 | outX);
+-	OUT_RING(evo, outY << 16 | outX);
++	OUT_RING  (evo, oY << 16 | oX);
++	OUT_RING  (evo, oY << 16 | oX);
+ 
+ 	if (update) {
+-		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+-		OUT_RING(evo, 0);
+-		FIRE_RING(evo);
++		nv50_display_flip_stop(crtc);
++		nv50_display_sync(dev);
++		nv50_display_flip_next(crtc, crtc->fb, NULL);
+ 	}
+ 
+ 	return 0;
+@@ -333,7 +400,6 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
+ 	nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ 	nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ 	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+-	kfree(nv_crtc->mode);
+ 	kfree(nv_crtc);
+ }
+ 
+@@ -441,39 +507,6 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
+ {
+ }
+ 
+-static int
+-nv50_crtc_wait_complete(struct drm_crtc *crtc)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+-	struct nv50_display *disp = nv50_display(dev);
+-	struct nouveau_channel *evo = disp->master;
+-	u64 start;
+-	int ret;
+-
+-	ret = RING_SPACE(evo, 6);
+-	if (ret)
+-		return ret;
+-	BEGIN_RING(evo, 0, 0x0084, 1);
+-	OUT_RING  (evo, 0x80000000);
+-	BEGIN_RING(evo, 0, 0x0080, 1);
+-	OUT_RING  (evo, 0);
+-	BEGIN_RING(evo, 0, 0x0084, 1);
+-	OUT_RING  (evo, 0x00000000);
+-
+-	nv_wo32(disp->ntfy, 0x000, 0x00000000);
+-	FIRE_RING (evo);
+-
+-	start = ptimer->read(dev);
+-	do {
+-		if (nv_ro32(disp->ntfy, 0x000))
+-			return 0;
+-	} while (ptimer->read(dev) - start < 2000000000ULL);
+-
+-	return -EBUSY;
+-}
+-
+ static void
+ nv50_crtc_prepare(struct drm_crtc *crtc)
+ {
+@@ -497,7 +530,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
+ 
+ 	nv50_crtc_blank(nv_crtc, false);
+ 	drm_vblank_post_modeset(dev, nv_crtc->index);
+-	nv50_crtc_wait_complete(crtc);
++	nv50_display_sync(dev);
+ 	nv50_display_flip_next(crtc, crtc->fb, NULL);
+ }
+ 
+@@ -579,8 +612,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
+ 	OUT_RING  (evo, fb->base.depth == 8 ?
+ 		   NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
+ 
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
+-	OUT_RING  (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
+ 	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
+ 	OUT_RING  (evo, (y << 16) | x);
+ 
+@@ -593,90 +624,77 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
+ }
+ 
+ static int
+-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+-		   struct drm_display_mode *adjusted_mode, int x, int y,
++nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
++		   struct drm_display_mode *mode, int x, int y,
+ 		   struct drm_framebuffer *old_fb)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct nouveau_channel *evo = nv50_display(dev)->master;
+ 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+-	struct nouveau_connector *nv_connector = NULL;
+-	uint32_t hsync_dur,  vsync_dur, hsync_start_to_end, vsync_start_to_end;
+-	uint32_t hunk1, vunk1, vunk2a, vunk2b;
++	u32 head = nv_crtc->index * 0x400;
++	u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
++	u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
++	u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
++	u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
++	u32 vblan2e = 0, vblan2s = 1;
+ 	int ret;
+ 
+-	/* Find the connector attached to this CRTC */
+-	nv_connector = nouveau_crtc_connector_get(nv_crtc);
+-
+-	*nv_crtc->mode = *adjusted_mode;
+-
+-	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
++	/* hw timing description looks like this:
++	 *
++	 * <sync> <back porch> <---------display---------> <front porch>
++	 * ______
++	 *       |____________|---------------------------|____________|
++	 *
++	 *       ^ synce      ^ blanke                    ^ blanks     ^ active
++	 *
++	 * interlaced modes also have 2 additional values pointing at the end
++	 * and start of the next field's blanking period.
++	 */
+ 
+-	hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+-	vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+-	hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
+-	vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
+-	/* I can't give this a proper name, anyone else can? */
+-	hunk1 = adjusted_mode->htotal -
+-		adjusted_mode->hsync_start + adjusted_mode->hdisplay;
+-	vunk1 = adjusted_mode->vtotal -
+-		adjusted_mode->vsync_start + adjusted_mode->vdisplay;
+-	/* Another strange value, this time only for interlaced adjusted_modes. */
+-	vunk2a = 2 * adjusted_mode->vtotal -
+-		 adjusted_mode->vsync_start + adjusted_mode->vdisplay;
+-	vunk2b = adjusted_mode->vtotal -
+-		 adjusted_mode->vsync_start + adjusted_mode->vtotal;
+-
+-	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+-		vsync_dur /= 2;
+-		vsync_start_to_end  /= 2;
+-		vunk1 /= 2;
+-		vunk2a /= 2;
+-		vunk2b /= 2;
+-		/* magic */
+-		if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+-			vsync_start_to_end -= 1;
+-			vunk1 -= 1;
+-			vunk2a -= 1;
+-			vunk2b -= 1;
+-		}
++	hactive = mode->htotal;
++	hsynce  = mode->hsync_end - mode->hsync_start - 1;
++	hbackp  = mode->htotal - mode->hsync_end;
++	hblanke = hsynce + hbackp;
++	hfrontp = mode->hsync_start - mode->hdisplay;
++	hblanks = mode->htotal - hfrontp - 1;
++
++	vactive = mode->vtotal * vscan / ilace;
++	vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
++	vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
++	vblanke = vsynce + vbackp;
++	vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
++	vblanks = vactive - vfrontp - 1;
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
++		vblan2e = vactive + vsynce + vbackp;
++		vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
++		vactive = (vactive * 2) + 1;
+ 	}
+ 
+-	ret = RING_SPACE(evo, 17);
+-	if (ret)
+-		return ret;
+-
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
+-	OUT_RING(evo, adjusted_mode->clock | 0x800000);
+-	OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
+-
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
+-	OUT_RING(evo, 0);
+-	OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
+-	OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
+-	OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
+-			(hsync_start_to_end - 1));
+-	OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
+-
+-	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+-		BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
+-		OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
+-	} else {
+-		OUT_RING(evo, 0);
+-		OUT_RING(evo, 0);
++	ret = RING_SPACE(evo, 18);
++	if (ret == 0) {
++		BEGIN_RING(evo, 0, 0x0804 + head, 2);
++		OUT_RING  (evo, 0x00800000 | mode->clock);
++		OUT_RING  (evo, (ilace == 2) ? 2 : 0);
++		BEGIN_RING(evo, 0, 0x0810 + head, 6);
++		OUT_RING  (evo, 0x00000000); /* border colour */
++		OUT_RING  (evo, (vactive << 16) | hactive);
++		OUT_RING  (evo, ( vsynce << 16) | hsynce);
++		OUT_RING  (evo, (vblanke << 16) | hblanke);
++		OUT_RING  (evo, (vblanks << 16) | hblanks);
++		OUT_RING  (evo, (vblan2e << 16) | vblan2s);
++		BEGIN_RING(evo, 0, 0x082c + head, 1);
++		OUT_RING  (evo, 0x00000000);
++		BEGIN_RING(evo, 0, 0x0900 + head, 1);
++		OUT_RING  (evo, 0x00000311); /* makes sync channel work */
++		BEGIN_RING(evo, 0, 0x08c8 + head, 1);
++		OUT_RING  (evo, (umode->vdisplay << 16) | umode->hdisplay);
++		BEGIN_RING(evo, 0, 0x08d4 + head, 1);
++		OUT_RING  (evo, 0x00000000); /* screen position */
+ 	}
+ 
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
+-	OUT_RING(evo, 0);
+-
+-	/* This is the actual resolution of the mode. */
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
+-	OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
+-	OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
+-
+-	nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
+-	nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
++	nv_crtc->set_dither(nv_crtc, false);
++	nv_crtc->set_scale(nv_crtc, false);
++	nv_crtc->set_color_vibrance(nv_crtc, false);
+ 
+ 	return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
+ }
+@@ -692,7 +710,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = nv50_crtc_wait_complete(crtc);
++	ret = nv50_display_sync(crtc->dev);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -711,7 +729,7 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ 	if (ret)
+ 		return ret;
+ 
+-	return nv50_crtc_wait_complete(crtc);
++	return nv50_display_sync(crtc->dev);
+ }
+ 
+ static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
+@@ -737,11 +755,8 @@ nv50_crtc_create(struct drm_device *dev, int index)
+ 	if (!nv_crtc)
+ 		return -ENOMEM;
+ 
+-	nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
+-	if (!nv_crtc->mode) {
+-		kfree(nv_crtc);
+-		return -ENOMEM;
+-	}
++	nv_crtc->color_vibrance = 50;
++	nv_crtc->vibrant_hue = 0;
+ 
+ 	/* Default CLUT parameters, will be activated on the hw upon
+ 	 * first mode set.
+@@ -764,7 +779,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
+ 	}
+ 
+ 	if (ret) {
+-		kfree(nv_crtc->mode);
+ 		kfree(nv_crtc);
+ 		return ret;
+ 	}
+@@ -774,6 +788,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
+ 	/* set function pointers */
+ 	nv_crtc->set_dither = nv50_crtc_set_dither;
+ 	nv_crtc->set_scale = nv50_crtc_set_scale;
++	nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
+ 
+ 	drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
+ 	drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
+diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
+index 808f3ec..55c5633 100644
+--- a/drivers/gpu/drm/nouveau/nv50_dac.c
++++ b/drivers/gpu/drm/nouveau/nv50_dac.c
+@@ -190,21 +190,13 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 	}
+ 
+ 	if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
+-	     connector->native_mode) {
+-		int id = adjusted_mode->base.id;
+-		*adjusted_mode = *connector->native_mode;
+-		adjusted_mode->base.id = id;
+-	}
++	     connector->native_mode)
++		drm_mode_copy(adjusted_mode, connector->native_mode);
+ 
+ 	return true;
+ }
+ 
+ static void
+-nv50_dac_prepare(struct drm_encoder *encoder)
+-{
+-}
+-
+-static void
+ nv50_dac_commit(struct drm_encoder *encoder)
+ {
+ }
+@@ -266,7 +258,7 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
+ 	.save = nv50_dac_save,
+ 	.restore = nv50_dac_restore,
+ 	.mode_fixup = nv50_dac_mode_fixup,
+-	.prepare = nv50_dac_prepare,
++	.prepare = nv50_dac_disconnect,
+ 	.commit = nv50_dac_commit,
+ 	.mode_set = nv50_dac_mode_set,
+ 	.get_crtc = nv50_dac_crtc_get,
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index 06de250..8b78b9c 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -50,9 +50,76 @@ nv50_sor_nr(struct drm_device *dev)
+ 	return 4;
+ }
+ 
++u32
++nv50_display_active_crtcs(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 mask = 0;
++	int i;
++
++	if (dev_priv->chipset  < 0x90 ||
++	    dev_priv->chipset == 0x92 ||
++	    dev_priv->chipset == 0xa0) {
++		for (i = 0; i < 2; i++)
++			mask |= nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
++	} else {
++		for (i = 0; i < 4; i++)
++			mask |= nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
++	}
++
++	for (i = 0; i < 3; i++)
++		mask |= nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
++
++	return mask & 3;
++}
++
++static int
++evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
++{
++	int ret = 0;
++	nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
++	nv_wr32(dev, 0x610304 + (ch * 0x08), data);
++	nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
++	if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
++		ret = -EBUSY;
++	if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
++		NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
++	nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
++	return ret;
++}
++
+ int
+ nv50_display_early_init(struct drm_device *dev)
+ {
++	u32 ctrl = nv_rd32(dev, 0x610200);
++	int i;
++
++	/* check if master evo channel is already active, a good a sign as any
++	 * that the display engine is in a weird state (hibernate/kexec), if
++	 * it is, do our best to reset the display engine...
++	 */
++	if ((ctrl & 0x00000003) == 0x00000003) {
++		NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
++
++		/* deactivate both heads first, PDISP will disappear forever
++		 * (well, until you power cycle) on some boards as soon as
++		 * PMC_ENABLE is hit unless they are..
++		 */
++		for (i = 0; i < 2; i++) {
++			evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
++			evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
++			evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
++			evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
++			evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
++			evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
++		}
++		evo_icmd(dev, 0, 0x0080, 0);
++
++		/* reset PDISP */
++		nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
++		nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -62,11 +129,40 @@ nv50_display_late_takedown(struct drm_device *dev)
+ }
+ 
+ int
+-nv50_display_init(struct drm_device *dev)
++nv50_display_sync(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+-	struct drm_connector *connector;
++	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
++	struct nv50_display *disp = nv50_display(dev);
++	struct nouveau_channel *evo = disp->master;
++	u64 start;
++	int ret;
++
++	ret = RING_SPACE(evo, 6);
++	if (ret == 0) {
++		BEGIN_RING(evo, 0, 0x0084, 1);
++		OUT_RING  (evo, 0x80000000);
++		BEGIN_RING(evo, 0, 0x0080, 1);
++		OUT_RING  (evo, 0);
++		BEGIN_RING(evo, 0, 0x0084, 1);
++		OUT_RING  (evo, 0x00000000);
++
++		nv_wo32(disp->ntfy, 0x000, 0x00000000);
++		FIRE_RING (evo);
++
++		start = ptimer->read(dev);
++		do {
++			if (nv_ro32(disp->ntfy, 0x000))
++				return 0;
++		} while (ptimer->read(dev) - start < 2000000000ULL);
++	}
++
++	return -EBUSY;
++}
++
++int
++nv50_display_init(struct drm_device *dev)
++{
+ 	struct nouveau_channel *evo;
+ 	int ret, i;
+ 	u32 val;
+@@ -161,16 +257,6 @@ nv50_display_init(struct drm_device *dev)
+ 		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
+ 		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
+ 
+-	/* enable hotplug interrupts */
+-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+-		struct nouveau_connector *conn = nouveau_connector(connector);
+-
+-		if (conn->dcb->gpio_tag == 0xff)
+-			continue;
+-
+-		pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
+-	}
+-
+ 	ret = nv50_evo_init(dev);
+ 	if (ret)
+ 		return ret;
+@@ -178,36 +264,19 @@ nv50_display_init(struct drm_device *dev)
+ 
+ 	nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
+ 
+-	ret = RING_SPACE(evo, 15);
++	ret = RING_SPACE(evo, 3);
+ 	if (ret)
+ 		return ret;
+ 	BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
+-	OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
+-	OUT_RING(evo, NvEvoSync);
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
+-	OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
+-	OUT_RING(evo, 0);
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
+-	OUT_RING(evo, 0);
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
+-	OUT_RING(evo, 0);
+-	/* required to make display sync channels not hate life */
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
+-	OUT_RING  (evo, 0x00000311);
+-	BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
+-	OUT_RING  (evo, 0x00000311);
+-	FIRE_RING(evo);
+-	if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
+-		NV_ERROR(dev, "evo pushbuf stalled\n");
+-
++	OUT_RING  (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
++	OUT_RING  (evo, NvEvoSync);
+ 
+-	return 0;
++	return nv50_display_sync(dev);
+ }
+ 
+-static int nv50_display_disable(struct drm_device *dev)
++void
++nv50_display_fini(struct drm_device *dev)
+ {
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nv50_display *disp = nv50_display(dev);
+ 	struct nouveau_channel *evo = disp->master;
+ 	struct drm_crtc *drm_crtc;
+@@ -270,18 +339,10 @@ static int nv50_display_disable(struct drm_device *dev)
+ 
+ 	/* disable interrupts. */
+ 	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
+-
+-	/* disable hotplug interrupts */
+-	nv_wr32(dev, 0xe054, 0xffffffff);
+-	nv_wr32(dev, 0xe050, 0x00000000);
+-	if (dev_priv->chipset >= 0x90) {
+-		nv_wr32(dev, 0xe074, 0xffffffff);
+-		nv_wr32(dev, 0xe070, 0x00000000);
+-	}
+-	return 0;
+ }
+ 
+-int nv50_display_create(struct drm_device *dev)
++int
++nv50_display_create(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct dcb_table *dcb = &dev_priv->vbios.dcb;
+@@ -341,7 +402,7 @@ int nv50_display_create(struct drm_device *dev)
+ 	tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
+ 	nouveau_irq_register(dev, 26, nv50_display_isr);
+ 
+-	ret = nv50_display_init(dev);
++	ret = nv50_evo_create(dev);
+ 	if (ret) {
+ 		nv50_display_destroy(dev);
+ 		return ret;
+@@ -357,7 +418,7 @@ nv50_display_destroy(struct drm_device *dev)
+ 
+ 	NV_DEBUG_KMS(dev, "\n");
+ 
+-	nv50_display_disable(dev);
++	nv50_evo_destroy(dev);
+ 	nouveau_irq_unregister(dev, 26);
+ 	kfree(disp);
+ }
+@@ -413,15 +474,15 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ 		}
+ 
+ 		if (dev_priv->chipset < 0xc0) {
+-			BEGIN_RING(chan, NvSubSw, 0x0060, 2);
++			BEGIN_RING(chan, 0, 0x0060, 2);
+ 			OUT_RING  (chan, NvEvoSema0 + nv_crtc->index);
+ 			OUT_RING  (chan, dispc->sem.offset);
+-			BEGIN_RING(chan, NvSubSw, 0x006c, 1);
++			BEGIN_RING(chan, 0, 0x006c, 1);
+ 			OUT_RING  (chan, 0xf00d0000 | dispc->sem.value);
+-			BEGIN_RING(chan, NvSubSw, 0x0064, 2);
++			BEGIN_RING(chan, 0, 0x0064, 2);
+ 			OUT_RING  (chan, dispc->sem.offset ^ 0x10);
+ 			OUT_RING  (chan, 0x74b1e000);
+-			BEGIN_RING(chan, NvSubSw, 0x0060, 1);
++			BEGIN_RING(chan, 0, 0x0060, 1);
+ 			if (dev_priv->chipset < 0x84)
+ 				OUT_RING  (chan, NvSema);
+ 			else
+@@ -429,12 +490,12 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ 		} else {
+ 			u64 offset = chan->dispc_vma[nv_crtc->index].offset;
+ 			offset += dispc->sem.offset;
+-			BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
++			BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
+ 			OUT_RING  (chan, upper_32_bits(offset));
+ 			OUT_RING  (chan, lower_32_bits(offset));
+ 			OUT_RING  (chan, 0xf00d0000 | dispc->sem.value);
+ 			OUT_RING  (chan, 0x1002);
+-			BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
++			BEGIN_NVC0(chan, 2, 0, 0x0010, 4);
+ 			OUT_RING  (chan, upper_32_bits(offset));
+ 			OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
+ 			OUT_RING  (chan, 0x74b1e000);
+@@ -521,7 +582,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
+ 		} else {
+ 			/* determine number of lvds links */
+ 			if (nv_connector && nv_connector->edid &&
+-			    nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
++			    nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ 				/* http://www.spwg.org */
+ 				if (((u8 *)nv_connector->edid)[121] == 2)
+ 					script |= 0x0100;
+@@ -722,8 +783,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
+ 	if (crtc >= 0) {
+ 		pclk  = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
+ 		pclk &= 0x003fffff;
+-
+-		nv50_crtc_set_clock(dev, crtc, pclk);
++		if (pclk)
++			nv50_crtc_set_clock(dev, crtc, pclk);
+ 
+ 		tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
+ 		tmp &= ~0x000000f;
+@@ -802,9 +863,9 @@ nv50_display_unk20_handler(struct drm_device *dev)
+ 	if (type == OUTPUT_DP) {
+ 		int link = !(dcb->dpconf.sor.link & 1);
+ 		if ((mc & 0x000f0000) == 0x00020000)
+-			nouveau_dp_tu_update(dev, or, link, pclk, 18);
++			nv50_sor_dp_calc_tu(dev, or, link, pclk, 18);
+ 		else
+-			nouveau_dp_tu_update(dev, or, link, pclk, 24);
++			nv50_sor_dp_calc_tu(dev, or, link, pclk, 24);
+ 	}
+ 
+ 	if (dcb->type != OUTPUT_ANALOG) {
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
+index c2da503..5d3dd14 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.h
++++ b/drivers/gpu/drm/nouveau/nv50_display.h
+@@ -69,14 +69,20 @@ int nv50_display_early_init(struct drm_device *dev);
+ void nv50_display_late_takedown(struct drm_device *dev);
+ int nv50_display_create(struct drm_device *dev);
+ int nv50_display_init(struct drm_device *dev);
++void nv50_display_fini(struct drm_device *dev);
+ void nv50_display_destroy(struct drm_device *dev);
+ int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
+ int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+ 
++u32  nv50_display_active_crtcs(struct drm_device *);
++
++int  nv50_display_sync(struct drm_device *);
+ int  nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+ 			    struct nouveau_channel *chan);
+ void nv50_display_flip_stop(struct drm_crtc *);
+ 
++int  nv50_evo_create(struct drm_device *dev);
++void nv50_evo_destroy(struct drm_device *dev);
+ int  nv50_evo_init(struct drm_device *dev);
+ void nv50_evo_fini(struct drm_device *dev);
+ void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
+diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
+index c99d975..9b962e9 100644
+--- a/drivers/gpu/drm/nouveau/nv50_evo.c
++++ b/drivers/gpu/drm/nouveau/nv50_evo.c
+@@ -218,7 +218,7 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
+ 	}
+ }
+ 
+-static void
++void
+ nv50_evo_destroy(struct drm_device *dev)
+ {
+ 	struct nv50_display *disp = nv50_display(dev);
+@@ -235,7 +235,7 @@ nv50_evo_destroy(struct drm_device *dev)
+ 	nv50_evo_channel_del(&disp->master);
+ }
+ 
+-static int
++int
+ nv50_evo_create(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+@@ -388,12 +388,6 @@ nv50_evo_init(struct drm_device *dev)
+ 	struct nv50_display *disp = nv50_display(dev);
+ 	int ret, i;
+ 
+-	if (!disp->master) {
+-		ret = nv50_evo_create(dev);
+-		if (ret)
+-			return ret;
+-	}
+-
+ 	ret = nv50_evo_channel_init(disp->master);
+ 	if (ret)
+ 		return ret;
+@@ -420,6 +414,4 @@ nv50_evo_fini(struct drm_device *dev)
+ 
+ 	if (disp->master)
+ 		nv50_evo_channel_fini(disp->master);
+-
+-	nv50_evo_destroy(dev);
+ }
+diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
+index 3860ca6..771d879 100644
+--- a/drivers/gpu/drm/nouveau/nv50_evo.h
++++ b/drivers/gpu/drm/nouveau/nv50_evo.h
+@@ -104,7 +104,8 @@
+ #define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE                            0x00000000
+ #define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE                              0x00000009
+ #define NV50_EVO_CRTC_COLOR_CTRL                                     0x000008a8
+-#define NV50_EVO_CRTC_COLOR_CTRL_COLOR                               0x00040000
++#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE                            0x000fff00
++#define NV50_EVO_CRTC_COLOR_CTRL_HUE                                 0xfff00000
+ #define NV50_EVO_CRTC_FB_POS                                         0x000008c0
+ #define NV50_EVO_CRTC_REAL_RES                                       0x000008c8
+ #define NV50_EVO_CRTC_SCALE_CENTER_OFFSET                            0x000008d4
+diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
+index c34a074..3bc2a56 100644
+--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
++++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
+@@ -230,6 +230,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
+ 	struct drm_device *dev = chan->dev;
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_gpuobj *ramfc = NULL;
++        uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
+ 	unsigned long flags;
+ 	int ret;
+ 
+@@ -280,8 +281,9 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
+ 	nv_wo32(ramfc, 0x7c, 0x30000001);
+ 	nv_wo32(ramfc, 0x78, 0x00000000);
+ 	nv_wo32(ramfc, 0x3c, 0x403f6078);
+-	nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
+-	nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
++	nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
++	nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
++                drm_order(chan->dma.ib_max + 1) << 16);
+ 
+ 	if (dev_priv->chipset != 0x50) {
+ 		nv_wo32(chan->ramin, 0, chan->id);
+diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
+index 793a5cc..f429e6a 100644
+--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
++++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
+@@ -25,229 +25,95 @@
+ #include "drmP.h"
+ #include "nouveau_drv.h"
+ #include "nouveau_hw.h"
++#include "nouveau_gpio.h"
+ 
+ #include "nv50_display.h"
+ 
+-static void nv50_gpio_isr(struct drm_device *dev);
+-static void nv50_gpio_isr_bh(struct work_struct *work);
+-
+-struct nv50_gpio_priv {
+-	struct list_head handlers;
+-	spinlock_t lock;
+-};
+-
+-struct nv50_gpio_handler {
+-	struct drm_device *dev;
+-	struct list_head head;
+-	struct work_struct work;
+-	bool inhibit;
+-
+-	struct dcb_gpio_entry *gpio;
+-
+-	void (*handler)(void *data, int state);
+-	void *data;
+-};
+-
+ static int
+-nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
++nv50_gpio_location(int line, u32 *reg, u32 *shift)
+ {
+ 	const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+ 
+-	if (gpio->line >= 32)
++	if (line >= 32)
+ 		return -EINVAL;
+ 
+-	*reg = nv50_gpio_reg[gpio->line >> 3];
+-	*shift = (gpio->line & 7) << 2;
++	*reg = nv50_gpio_reg[line >> 3];
++	*shift = (line & 7) << 2;
+ 	return 0;
+ }
+ 
+ int
+-nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
++nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+ {
+-	struct dcb_gpio_entry *gpio;
+-	uint32_t r, s, v;
+-
+-	gpio = nouveau_bios_gpio_entry(dev, tag);
+-	if (!gpio)
+-		return -ENOENT;
++	u32 reg, shift;
+ 
+-	if (nv50_gpio_location(gpio, &r, &s))
++	if (nv50_gpio_location(line, &reg, &shift))
+ 		return -EINVAL;
+ 
+-	v = nv_rd32(dev, r) >> (s + 2);
+-	return ((v & 1) == (gpio->state[1] & 1));
++	nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
++	return 0;
+ }
+ 
+ int
+-nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
++nv50_gpio_sense(struct drm_device *dev, int line)
+ {
+-	struct dcb_gpio_entry *gpio;
+-	uint32_t r, s, v;
+-
+-	gpio = nouveau_bios_gpio_entry(dev, tag);
+-	if (!gpio)
+-		return -ENOENT;
++	u32 reg, shift;
+ 
+-	if (nv50_gpio_location(gpio, &r, &s))
++	if (nv50_gpio_location(line, &reg, &shift))
+ 		return -EINVAL;
+ 
+-	v  = nv_rd32(dev, r) & ~(0x3 << s);
+-	v |= (gpio->state[state] ^ 2) << s;
+-	nv_wr32(dev, r, v);
+-	return 0;
++	return !!(nv_rd32(dev, reg) & (4 << shift));
+ }
+ 
+-int
+-nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
++void
++nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
+ {
+-	struct dcb_gpio_entry *gpio;
+-	u32 v;
+-
+-	gpio = nouveau_bios_gpio_entry(dev, tag);
+-	if (!gpio)
+-		return -ENOENT;
++	u32 reg  = line < 16 ? 0xe050 : 0xe070;
++	u32 mask = 0x00010001 << (line & 0xf);
+ 
+-	v  = nv_rd32(dev, 0x00d610 + (gpio->line * 4));
+-	v &= 0x00004000;
+-	return (!!v == (gpio->state[1] & 1));
++	nv_wr32(dev, reg + 4, mask);
++	nv_mask(dev, reg + 0, mask, on ? mask : 0);
+ }
+ 
+ int
+-nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
++nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+ {
+-	struct dcb_gpio_entry *gpio;
+-	u32 v;
+-
+-	gpio = nouveau_bios_gpio_entry(dev, tag);
+-	if (!gpio)
+-		return -ENOENT;
+-
+-	v = gpio->state[state] ^ 2;
+-
+-	nv_mask(dev, 0x00d610 + (gpio->line * 4), 0x00003000, v << 12);
++	u32 data = ((dir ^ 1) << 13) | (out << 12);
++	nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
++	nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
+ 	return 0;
+ }
+ 
+ int
+-nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
+-		       void (*handler)(void *, int), void *data)
++nvd0_gpio_sense(struct drm_device *dev, int line)
+ {
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+-	struct nv50_gpio_priv *priv = pgpio->priv;
+-	struct nv50_gpio_handler *gpioh;
+-	struct dcb_gpio_entry *gpio;
+-	unsigned long flags;
+-
+-	gpio = nouveau_bios_gpio_entry(dev, tag);
+-	if (!gpio)
+-		return -ENOENT;
+-
+-	gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
+-	if (!gpioh)
+-		return -ENOMEM;
+-
+-	INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
+-	gpioh->dev  = dev;
+-	gpioh->gpio = gpio;
+-	gpioh->handler = handler;
+-	gpioh->data = data;
+-
+-	spin_lock_irqsave(&priv->lock, flags);
+-	list_add(&gpioh->head, &priv->handlers);
+-	spin_unlock_irqrestore(&priv->lock, flags);
+-	return 0;
++	return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
+ }
+ 
+-void
+-nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
+-			 void (*handler)(void *, int), void *data)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+-	struct nv50_gpio_priv *priv = pgpio->priv;
+-	struct nv50_gpio_handler *gpioh, *tmp;
+-	struct dcb_gpio_entry *gpio;
+-	LIST_HEAD(tofree);
+-	unsigned long flags;
+-
+-	gpio = nouveau_bios_gpio_entry(dev, tag);
+-	if (!gpio)
+-		return;
+-
+-	spin_lock_irqsave(&priv->lock, flags);
+-	list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
+-		if (gpioh->gpio != gpio ||
+-		    gpioh->handler != handler ||
+-		    gpioh->data != data)
+-			continue;
+-		list_move(&gpioh->head, &tofree);
+-	}
+-	spin_unlock_irqrestore(&priv->lock, flags);
+-
+-	list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
+-		flush_work_sync(&gpioh->work);
+-		kfree(gpioh);
+-	}
+-}
+-
+-bool
+-nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
+-{
+-	struct dcb_gpio_entry *gpio;
+-	u32 reg, mask;
+-
+-	gpio = nouveau_bios_gpio_entry(dev, tag);
+-	if (!gpio)
+-		return false;
+-
+-	reg  = gpio->line < 16 ? 0xe050 : 0xe070;
+-	mask = 0x00010001 << (gpio->line & 0xf);
+-
+-	nv_wr32(dev, reg + 4, mask);
+-	reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
+-	return (reg & mask) == mask;
+-}
+-
+-static int
+-nv50_gpio_create(struct drm_device *dev)
++static void
++nv50_gpio_isr(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+-	struct nv50_gpio_priv *priv;
+-
+-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+-	if (!priv)
+-		return -ENOMEM;
++	u32 intr0, intr1 = 0;
++	u32 hi, lo;
+ 
+-	INIT_LIST_HEAD(&priv->handlers);
+-	spin_lock_init(&priv->lock);
+-	pgpio->priv = priv;
+-	return 0;
+-}
++	intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
++	if (dev_priv->chipset >= 0x90)
++		intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+ 
+-static void
+-nv50_gpio_destroy(struct drm_device *dev)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
++	hi = (intr0 & 0x0000ffff) | (intr1 << 16);
++	lo = (intr0 >> 16) | (intr1 & 0xffff0000);
++	nouveau_gpio_isr(dev, 0, hi | lo);
+ 
+-	kfree(pgpio->priv);
+-	pgpio->priv = NULL;
++	nv_wr32(dev, 0xe054, intr0);
++	if (dev_priv->chipset >= 0x90)
++		nv_wr32(dev, 0xe074, intr1);
+ }
+ 
+ int
+ nv50_gpio_init(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+-	int ret;
+-
+-	if (!pgpio->priv) {
+-		ret = nv50_gpio_create(dev);
+-		if (ret)
+-			return ret;
+-	}
+ 
+ 	/* disable, and ack any pending gpio interrupts */
+ 	nv_wr32(dev, 0xe050, 0x00000000);
+@@ -270,64 +136,4 @@ nv50_gpio_fini(struct drm_device *dev)
+ 	if (dev_priv->chipset >= 0x90)
+ 		nv_wr32(dev, 0xe070, 0x00000000);
+ 	nouveau_irq_unregister(dev, 21);
+-
+-	nv50_gpio_destroy(dev);
+-}
+-
+-static void
+-nv50_gpio_isr_bh(struct work_struct *work)
+-{
+-	struct nv50_gpio_handler *gpioh =
+-		container_of(work, struct nv50_gpio_handler, work);
+-	struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+-	struct nv50_gpio_priv *priv = pgpio->priv;
+-	unsigned long flags;
+-	int state;
+-
+-	state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
+-	if (state < 0)
+-		return;
+-
+-	gpioh->handler(gpioh->data, state);
+-
+-	spin_lock_irqsave(&priv->lock, flags);
+-	gpioh->inhibit = false;
+-	spin_unlock_irqrestore(&priv->lock, flags);
+-}
+-
+-static void
+-nv50_gpio_isr(struct drm_device *dev)
+-{
+-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+-	struct nv50_gpio_priv *priv = pgpio->priv;
+-	struct nv50_gpio_handler *gpioh;
+-	u32 intr0, intr1 = 0;
+-	u32 hi, lo, ch;
+-
+-	intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+-	if (dev_priv->chipset >= 0x90)
+-		intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+-
+-	hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+-	lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+-	ch = hi | lo;
+-
+-	nv_wr32(dev, 0xe054, intr0);
+-	if (dev_priv->chipset >= 0x90)
+-		nv_wr32(dev, 0xe074, intr1);
+-
+-	spin_lock(&priv->lock);
+-	list_for_each_entry(gpioh, &priv->handlers, head) {
+-		if (!(ch & (1 << gpioh->gpio->line)))
+-			continue;
+-
+-		if (gpioh->inhibit)
+-			continue;
+-		gpioh->inhibit = true;
+-
+-		schedule_work(&gpioh->work);
+-	}
+-	spin_unlock(&priv->lock);
+ }
+diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
+index ac601f7..33d5711 100644
+--- a/drivers/gpu/drm/nouveau/nv50_graph.c
++++ b/drivers/gpu/drm/nouveau/nv50_graph.c
+@@ -616,9 +616,9 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+ 			}
+ 			break;
+ 		case 7: /* MP error */
+-			if (ustatus & 0x00010000) {
++			if (ustatus & 0x04030000) {
+ 				nv50_pgraph_mp_trap(dev, i, display);
+-				ustatus &= ~0x00010000;
++				ustatus &= ~0x04030000;
+ 			}
+ 			break;
+ 		case 8: /* TPDMA error */
+diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
+index 3d5a86b..d020ed4 100644
+--- a/drivers/gpu/drm/nouveau/nv50_pm.c
++++ b/drivers/gpu/drm/nouveau/nv50_pm.c
+@@ -25,122 +25,862 @@
+ #include "drmP.h"
+ #include "nouveau_drv.h"
+ #include "nouveau_bios.h"
++#include "nouveau_hw.h"
+ #include "nouveau_pm.h"
++#include "nouveau_hwsq.h"
++#include "nv50_display.h"
++
++enum clk_src {
++	clk_src_crystal,
++	clk_src_href,
++	clk_src_hclk,
++	clk_src_hclkm3,
++	clk_src_hclkm3d2,
++	clk_src_host,
++	clk_src_nvclk,
++	clk_src_sclk,
++	clk_src_mclk,
++	clk_src_vdec,
++	clk_src_dom6
++};
++
++static u32 read_clk(struct drm_device *, enum clk_src);
++
++static u32
++read_div(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	switch (dev_priv->chipset) {
++	case 0x50: /* it exists, but only has bit 31, not the dividers.. */
++	case 0x84:
++	case 0x86:
++	case 0x98:
++	case 0xa0:
++		return nv_rd32(dev, 0x004700);
++	case 0x92:
++	case 0x94:
++	case 0x96:
++		return nv_rd32(dev, 0x004800);
++	default:
++		return 0x00000000;
++	}
++}
++
++static u32
++read_pll_src(struct drm_device *dev, u32 base)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 coef, ref = read_clk(dev, clk_src_crystal);
++	u32 rsel = nv_rd32(dev, 0x00e18c);
++	int P, N, M, id;
++
++	switch (dev_priv->chipset) {
++	case 0x50:
++	case 0xa0:
++		switch (base) {
++		case 0x4020:
++		case 0x4028: id = !!(rsel & 0x00000004); break;
++		case 0x4008: id = !!(rsel & 0x00000008); break;
++		case 0x4030: id = 0; break;
++		default:
++			NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
++			return 0;
++		}
++
++		coef = nv_rd32(dev, 0x00e81c + (id * 0x0c));
++		ref *=  (coef & 0x01000000) ? 2 : 4;
++		P    =  (coef & 0x00070000) >> 16;
++		N    = ((coef & 0x0000ff00) >> 8) + 1;
++		M    = ((coef & 0x000000ff) >> 0) + 1;
++		break;
++	case 0x84:
++	case 0x86:
++	case 0x92:
++		coef = nv_rd32(dev, 0x00e81c);
++		P    = (coef & 0x00070000) >> 16;
++		N    = (coef & 0x0000ff00) >> 8;
++		M    = (coef & 0x000000ff) >> 0;
++		break;
++	case 0x94:
++	case 0x96:
++	case 0x98:
++		rsel = nv_rd32(dev, 0x00c050);
++		switch (base) {
++		case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
++		case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
++		case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
++		case 0x4030: rsel = 3; break;
++		default:
++			NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
++			return 0;
++		}
++
++		switch (rsel) {
++		case 0: id = 1; break;
++		case 1: return read_clk(dev, clk_src_crystal);
++		case 2: return read_clk(dev, clk_src_href);
++		case 3: id = 0; break;
++		}
++
++		coef =  nv_rd32(dev, 0x00e81c + (id * 0x28));
++		P    = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7;
++		P   += (coef & 0x00070000) >> 16;
++		N    = (coef & 0x0000ff00) >> 8;
++		M    = (coef & 0x000000ff) >> 0;
++		break;
++	default:
++		BUG_ON(1);
++	}
++
++	if (M)
++		return (ref * N / M) >> P;
++	return 0;
++}
++
++static u32
++read_pll_ref(struct drm_device *dev, u32 base)
++{
++	u32 src, mast = nv_rd32(dev, 0x00c040);
++
++	switch (base) {
++	case 0x004028:
++		src = !!(mast & 0x00200000);
++		break;
++	case 0x004020:
++		src = !!(mast & 0x00400000);
++		break;
++	case 0x004008:
++		src = !!(mast & 0x00010000);
++		break;
++	case 0x004030:
++		src = !!(mast & 0x02000000);
++		break;
++	case 0x00e810:
++		return read_clk(dev, clk_src_crystal);
++	default:
++		NV_ERROR(dev, "bad pll 0x%06x\n", base);
++		return 0;
++	}
++
++	if (src)
++		return read_clk(dev, clk_src_href);
++	return read_pll_src(dev, base);
++}
++
++static u32
++read_pll(struct drm_device *dev, u32 base)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 mast = nv_rd32(dev, 0x00c040);
++	u32 ctrl = nv_rd32(dev, base + 0);
++	u32 coef = nv_rd32(dev, base + 4);
++	u32 ref = read_pll_ref(dev, base);
++	u32 clk = 0;
++	int N1, N2, M1, M2;
++
++	if (base == 0x004028 && (mast & 0x00100000)) {
++		/* wtf, appears to only disable post-divider on nva0 */
++		if (dev_priv->chipset != 0xa0)
++			return read_clk(dev, clk_src_dom6);
++	}
++
++	N2 = (coef & 0xff000000) >> 24;
++	M2 = (coef & 0x00ff0000) >> 16;
++	N1 = (coef & 0x0000ff00) >> 8;
++	M1 = (coef & 0x000000ff);
++	if ((ctrl & 0x80000000) && M1) {
++		clk = ref * N1 / M1;
++		if ((ctrl & 0x40000100) == 0x40000000) {
++			if (M2)
++				clk = clk * N2 / M2;
++			else
++				clk = 0;
++		}
++	}
++
++	return clk;
++}
++
++static u32
++read_clk(struct drm_device *dev, enum clk_src src)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 mast = nv_rd32(dev, 0x00c040);
++	u32 P = 0;
++
++	switch (src) {
++	case clk_src_crystal:
++		return dev_priv->crystal;
++	case clk_src_href:
++		return 100000; /* PCIE reference clock */
++	case clk_src_hclk:
++		return read_clk(dev, clk_src_href) * 27778 / 10000;
++	case clk_src_hclkm3:
++		return read_clk(dev, clk_src_hclk) * 3;
++	case clk_src_hclkm3d2:
++		return read_clk(dev, clk_src_hclk) * 3 / 2;
++	case clk_src_host:
++		switch (mast & 0x30000000) {
++		case 0x00000000: return read_clk(dev, clk_src_href);
++		case 0x10000000: break;
++		case 0x20000000: /* !0x50 */
++		case 0x30000000: return read_clk(dev, clk_src_hclk);
++		}
++		break;
++	case clk_src_nvclk:
++		if (!(mast & 0x00100000))
++			P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16;
++		switch (mast & 0x00000003) {
++		case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
++		case 0x00000001: return read_clk(dev, clk_src_dom6);
++		case 0x00000002: return read_pll(dev, 0x004020) >> P;
++		case 0x00000003: return read_pll(dev, 0x004028) >> P;
++		}
++		break;
++	case clk_src_sclk:
++		P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16;
++		switch (mast & 0x00000030) {
++		case 0x00000000:
++			if (mast & 0x00000080)
++				return read_clk(dev, clk_src_host) >> P;
++			return read_clk(dev, clk_src_crystal) >> P;
++		case 0x00000010: break;
++		case 0x00000020: return read_pll(dev, 0x004028) >> P;
++		case 0x00000030: return read_pll(dev, 0x004020) >> P;
++		}
++		break;
++	case clk_src_mclk:
++		P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16;
++		if (nv_rd32(dev, 0x004008) & 0x00000200) {
++			switch (mast & 0x0000c000) {
++			case 0x00000000:
++				return read_clk(dev, clk_src_crystal) >> P;
++			case 0x00008000:
++			case 0x0000c000:
++				return read_clk(dev, clk_src_href) >> P;
++			}
++		} else {
++			return read_pll(dev, 0x004008) >> P;
++		}
++		break;
++	case clk_src_vdec:
++		P = (read_div(dev) & 0x00000700) >> 8;
++		switch (dev_priv->chipset) {
++		case 0x84:
++		case 0x86:
++		case 0x92:
++		case 0x94:
++		case 0x96:
++		case 0xa0:
++			switch (mast & 0x00000c00) {
++			case 0x00000000:
++				if (dev_priv->chipset == 0xa0) /* wtf?? */
++					return read_clk(dev, clk_src_nvclk) >> P;
++				return read_clk(dev, clk_src_crystal) >> P;
++			case 0x00000400:
++				return 0;
++			case 0x00000800:
++				if (mast & 0x01000000)
++					return read_pll(dev, 0x004028) >> P;
++				return read_pll(dev, 0x004030) >> P;
++			case 0x00000c00:
++				return read_clk(dev, clk_src_nvclk) >> P;
++			}
++			break;
++		case 0x98:
++			switch (mast & 0x00000c00) {
++			case 0x00000000:
++				return read_clk(dev, clk_src_nvclk) >> P;
++			case 0x00000400:
++				return 0;
++			case 0x00000800:
++				return read_clk(dev, clk_src_hclkm3d2) >> P;
++			case 0x00000c00:
++				return read_clk(dev, clk_src_mclk) >> P;
++			}
++			break;
++		}
++		break;
++	case clk_src_dom6:
++		switch (dev_priv->chipset) {
++		case 0x50:
++		case 0xa0:
++			return read_pll(dev, 0x00e810) >> 2;
++		case 0x84:
++		case 0x86:
++		case 0x92:
++		case 0x94:
++		case 0x96:
++		case 0x98:
++			P = (read_div(dev) & 0x00000007) >> 0;
++			switch (mast & 0x0c000000) {
++			case 0x00000000: return read_clk(dev, clk_src_href);
++			case 0x04000000: break;
++			case 0x08000000: return read_clk(dev, clk_src_hclk);
++			case 0x0c000000:
++				return read_clk(dev, clk_src_hclkm3) >> P;
++			}
++			break;
++		default:
++			break;
++		}
++	default:
++		break;
++	}
++
++	NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast);
++	return 0;
++}
++
++int
++nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	if (dev_priv->chipset == 0xaa ||
++	    dev_priv->chipset == 0xac)
++		return 0;
++
++	perflvl->core   = read_clk(dev, clk_src_nvclk);
++	perflvl->shader = read_clk(dev, clk_src_sclk);
++	perflvl->memory = read_clk(dev, clk_src_mclk);
++	if (dev_priv->chipset != 0x50) {
++		perflvl->vdec = read_clk(dev, clk_src_vdec);
++		perflvl->dom6 = read_clk(dev, clk_src_dom6);
++	}
++
++	return 0;
++}
+ 
+ struct nv50_pm_state {
+ 	struct nouveau_pm_level *perflvl;
+-	struct pll_lims pll;
+-	enum pll_types type;
+-	int N, M, P;
++	struct hwsq_ucode eclk_hwsq;
++	struct hwsq_ucode mclk_hwsq;
++	u32 mscript;
++	u32 mmast;
++	u32 mctrl;
++	u32 mcoef;
+ };
+ 
+-int
+-nv50_pm_clock_get(struct drm_device *dev, u32 id)
++static u32
++calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
++	 u32 clk, int *N1, int *M1, int *log2P)
+ {
+-	struct pll_lims pll;
+-	int P, N, M, ret;
+-	u32 reg0, reg1;
++	struct nouveau_pll_vals coef;
++	int ret;
+ 
+-	ret = get_pll_limits(dev, id, &pll);
++	ret = get_pll_limits(dev, reg, pll);
+ 	if (ret)
+-		return ret;
++		return 0;
++
++	pll->vco2.maxfreq = 0;
++	pll->refclk = read_pll_ref(dev, reg);
++	if (!pll->refclk)
++		return 0;
++
++	ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
++	if (ret == 0)
++		return 0;
+ 
+-	reg0 = nv_rd32(dev, pll.reg + 0);
+-	reg1 = nv_rd32(dev, pll.reg + 4);
+-
+-	if ((reg0 & 0x80000000) == 0) {
+-		if (id == PLL_SHADER) {
+-			NV_DEBUG(dev, "Shader PLL is disabled. "
+-				"Shader clock is twice the core\n");
+-			ret = nv50_pm_clock_get(dev, PLL_CORE);
+-			if (ret > 0)
+-				return ret << 1;
+-		} else if (id == PLL_MEMORY) {
+-			NV_DEBUG(dev, "Memory PLL is disabled. "
+-				"Memory clock is equal to the ref_clk\n");
+-			return pll.refclk;
++	*N1 = coef.N1;
++	*M1 = coef.M1;
++	*log2P = coef.log2P;
++	return ret;
++}
++
++static inline u32
++calc_div(u32 src, u32 target, int *div)
++{
++	u32 clk0 = src, clk1 = src;
++	for (*div = 0; *div <= 7; (*div)++) {
++		if (clk0 <= target) {
++			clk1 = clk0 << (*div ? 1 : 0);
++			break;
+ 		}
++		clk0 >>= 1;
++	}
++
++	if (target - clk0 <= clk1 - target)
++		return clk0;
++	(*div)--;
++	return clk1;
++}
++
++static inline u32
++clk_same(u32 a, u32 b)
++{
++	return ((a / 1000) == (b / 1000));
++}
++
++static void
++mclk_precharge(struct nouveau_mem_exec_func *exec)
++{
++	struct nv50_pm_state *info = exec->priv;
++	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
++
++	hwsq_wr32(hwsq, 0x1002d4, 0x00000001);
++}
++
++static void
++mclk_refresh(struct nouveau_mem_exec_func *exec)
++{
++	struct nv50_pm_state *info = exec->priv;
++	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
++
++	hwsq_wr32(hwsq, 0x1002d0, 0x00000001);
++}
++
++static void
++mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
++{
++	struct nv50_pm_state *info = exec->priv;
++	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
++
++	hwsq_wr32(hwsq, 0x100210, enable ? 0x80000000 : 0x00000000);
++}
++
++static void
++mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
++{
++	struct nv50_pm_state *info = exec->priv;
++	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
++
++	hwsq_wr32(hwsq, 0x1002dc, enable ? 0x00000001 : 0x00000000);
++}
++
++static void
++mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
++{
++	struct nv50_pm_state *info = exec->priv;
++	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
++
++	if (nsec > 1000)
++		hwsq_usec(hwsq, (nsec + 500) / 1000);
++}
++
++static u32
++mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
++{
++	if (mr <= 1)
++		return nv_rd32(exec->dev, 0x1002c0 + ((mr - 0) * 4));
++	if (mr <= 3)
++		return nv_rd32(exec->dev, 0x1002e0 + ((mr - 2) * 4));
++	return 0;
++}
++
++static void
++mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
++{
++	struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
++	struct nv50_pm_state *info = exec->priv;
++	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
++
++	if (mr <= 1) {
++		if (dev_priv->vram_rank_B)
++			hwsq_wr32(hwsq, 0x1002c8 + ((mr - 0) * 4), data);
++		hwsq_wr32(hwsq, 0x1002c0 + ((mr - 0) * 4), data);
++	} else
++	if (mr <= 3) {
++		if (dev_priv->vram_rank_B)
++			hwsq_wr32(hwsq, 0x1002e8 + ((mr - 2) * 4), data);
++		hwsq_wr32(hwsq, 0x1002e0 + ((mr - 2) * 4), data);
++	}
++}
++
++static void
++mclk_clock_set(struct nouveau_mem_exec_func *exec)
++{
++	struct nv50_pm_state *info = exec->priv;
++	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
++	u32 ctrl = nv_rd32(exec->dev, 0x004008);
++
++	info->mmast = nv_rd32(exec->dev, 0x00c040);
++	info->mmast &= ~0xc0000000; /* get MCLK_2 from HREF */
++	info->mmast |=  0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
++
++	hwsq_wr32(hwsq, 0xc040, info->mmast);
++	hwsq_wr32(hwsq, 0x4008, ctrl | 0x00000200); /* bypass MPLL */
++	if (info->mctrl & 0x80000000)
++		hwsq_wr32(hwsq, 0x400c, info->mcoef);
++	hwsq_wr32(hwsq, 0x4008, info->mctrl);
++}
++
++static void
++mclk_timing_set(struct nouveau_mem_exec_func *exec)
++{
++	struct drm_device *dev = exec->dev;
++	struct nv50_pm_state *info = exec->priv;
++	struct nouveau_pm_level *perflvl = info->perflvl;
++	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
++	int i;
++
++	for (i = 0; i < 9; i++) {
++		u32 reg = 0x100220 + (i * 4);
++		u32 val = nv_rd32(dev, reg);
++		if (val != perflvl->timing.reg[i])
++			hwsq_wr32(hwsq, reg, perflvl->timing.reg[i]);
++	}
++}
++
++static int
++calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
++	  struct nv50_pm_state *info)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 crtc_mask = nv50_display_active_crtcs(dev);
++	struct nouveau_mem_exec_func exec = {
++		.dev = dev,
++		.precharge = mclk_precharge,
++		.refresh = mclk_refresh,
++		.refresh_auto = mclk_refresh_auto,
++		.refresh_self = mclk_refresh_self,
++		.wait = mclk_wait,
++		.mrg = mclk_mrg,
++		.mrs = mclk_mrs,
++		.clock_set = mclk_clock_set,
++		.timing_set = mclk_timing_set,
++		.priv = info
++	};
++	struct hwsq_ucode *hwsq = &info->mclk_hwsq;
++	struct pll_lims pll;
++	int N, M, P;
++	int ret;
++
++	/* use pcie refclock if possible, otherwise use mpll */
++	info->mctrl  = nv_rd32(dev, 0x004008);
++	info->mctrl &= ~0x81ff0200;
++	if (clk_same(perflvl->memory, read_clk(dev, clk_src_href))) {
++		info->mctrl |= 0x00000200 | (pll.log2p_bias << 19);
++	} else {
++		ret = calc_pll(dev, 0x4008, &pll, perflvl->memory, &N, &M, &P);
++		if (ret == 0)
++			return -EINVAL;
++
++		info->mctrl |= 0x80000000 | (P << 22) | (P << 16);
++		info->mctrl |= pll.log2p_bias << 19;
++		info->mcoef  = (N << 8) | M;
+ 	}
+ 
+-	P = (reg0 & 0x00070000) >> 16;
+-	N = (reg1 & 0x0000ff00) >> 8;
+-	M = (reg1 & 0x000000ff);
++	/* build the ucode which will reclock the memory for us */
++	hwsq_init(hwsq);
++	if (crtc_mask) {
++		hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
++		hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
++	}
++	if (dev_priv->chipset >= 0x92)
++		hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
++	hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
++	hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
+ 
+-	return ((pll.refclk * N / M) >> P);
++	ret = nouveau_mem_exec(&exec, perflvl);
++	if (ret)
++		return ret;
++
++	hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
++	hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
++	if (dev_priv->chipset >= 0x92)
++		hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
++	hwsq_fini(hwsq);
++	return 0;
+ }
+ 
+ void *
+-nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+-		  u32 id, int khz)
++nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+ {
+-	struct nv50_pm_state *state;
+-	int dummy, ret;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv50_pm_state *info;
++	struct hwsq_ucode *hwsq;
++	struct pll_lims pll;
++	u32 out, mast, divs, ctrl;
++	int clk, ret = -EINVAL;
++	int N, M, P1, P2;
+ 
+-	state = kzalloc(sizeof(*state), GFP_KERNEL);
+-	if (!state)
++	if (dev_priv->chipset == 0xaa ||
++	    dev_priv->chipset == 0xac)
++		return ERR_PTR(-ENODEV);
++
++	info = kmalloc(sizeof(*info), GFP_KERNEL);
++	if (!info)
+ 		return ERR_PTR(-ENOMEM);
+-	state->type = id;
+-	state->perflvl = perflvl;
++	info->perflvl = perflvl;
++
++	/* memory: build hwsq ucode which we'll use to reclock memory.
++	 *         use pcie refclock if possible, otherwise use mpll */
++	info->mclk_hwsq.len = 0;
++	if (perflvl->memory) {
++		ret = calc_mclk(dev, perflvl, info);
++		if (ret)
++			goto error;
++		info->mscript = perflvl->memscript;
++	}
++
++	divs = read_div(dev);
++	mast = info->mmast;
++
++	/* start building HWSQ script for engine reclocking */
++	hwsq = &info->eclk_hwsq;
++	hwsq_init(hwsq);
++	hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
++	hwsq_op5f(hwsq, 0x00, 0x01); /* wait for access disabled? */
++
++	/* vdec/dom6: switch to "safe" clocks temporarily */
++	if (perflvl->vdec) {
++		mast &= ~0x00000c00;
++		divs &= ~0x00000700;
++	}
++
++	if (perflvl->dom6) {
++		mast &= ~0x0c000000;
++		divs &= ~0x00000007;
++	}
++
++	hwsq_wr32(hwsq, 0x00c040, mast);
++
++	/* vdec: avoid modifying xpll until we know exactly how the other
++	 * clock domains work, i suspect at least some of them can also be
++	 * tied to xpll...
++	 */
++	if (perflvl->vdec) {
++		/* see how close we can get using nvclk as a source */
++		clk = calc_div(perflvl->core, perflvl->vdec, &P1);
++
++		/* see how close we can get using xpll/hclk as a source */
++		if (dev_priv->chipset != 0x98)
++			out = read_pll(dev, 0x004030);
++		else
++			out = read_clk(dev, clk_src_hclkm3d2);
++		out = calc_div(out, perflvl->vdec, &P2);
++
++		/* select whichever gets us closest */
++		if (abs((int)perflvl->vdec - clk) <=
++		    abs((int)perflvl->vdec - out)) {
++			if (dev_priv->chipset != 0x98)
++				mast |= 0x00000c00;
++			divs |= P1 << 8;
++		} else {
++			mast |= 0x00000800;
++			divs |= P2 << 8;
++		}
++	}
++
++	/* dom6: nfi what this is, but we're limited to various combinations
++	 * of the host clock frequency
++	 */
++	if (perflvl->dom6) {
++		if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
++			mast |= 0x00000000;
++		} else
++		if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
++			mast |= 0x08000000;
++		} else {
++			clk = read_clk(dev, clk_src_hclk) * 3;
++			clk = calc_div(clk, perflvl->dom6, &P1);
++
++			mast |= 0x0c000000;
++			divs |= P1;
++		}
++	}
++
++	/* vdec/dom6: complete switch to new clocks */
++	switch (dev_priv->chipset) {
++	case 0x92:
++	case 0x94:
++	case 0x96:
++		hwsq_wr32(hwsq, 0x004800, divs);
++		break;
++	default:
++		hwsq_wr32(hwsq, 0x004700, divs);
++		break;
++	}
++
++	hwsq_wr32(hwsq, 0x00c040, mast);
++
++	/* core/shader: make sure sclk/nvclk are disconnected from their
++	 * PLLs (nvclk to dom6, sclk to hclk)
++	 */
++	if (dev_priv->chipset < 0x92)
++		mast = (mast & ~0x001000b0) | 0x00100080;
++	else
++		mast = (mast & ~0x000000b3) | 0x00000081;
++
++	hwsq_wr32(hwsq, 0x00c040, mast);
++
++	/* core: for the moment at least, always use nvpll */
++	clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
++	if (clk == 0)
++		goto error;
++
++	ctrl  = nv_rd32(dev, 0x004028) & ~0xc03f0100;
++	mast &= ~0x00100000;
++	mast |= 3;
++
++	hwsq_wr32(hwsq, 0x004028, 0x80000000 | (P1 << 19) | (P1 << 16) | ctrl);
++	hwsq_wr32(hwsq, 0x00402c, (N << 8) | M);
++
++	/* shader: tie to nvclk if possible, otherwise use spll.  have to be
++	 * very careful that the shader clock is at least twice the core, or
++	 * some chipsets will be very unhappy.  i expect most or all of these
++	 * cases will be handled by tying to nvclk, but it's possible there's
++	 * corners
++	 */
++	ctrl = nv_rd32(dev, 0x004020) & ~0xc03f0100;
++
++	if (P1-- && perflvl->shader == (perflvl->core << 1)) {
++		hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
++		hwsq_wr32(hwsq, 0x00c040, 0x00000020 | mast);
++	} else {
++		clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
++		if (clk == 0)
++			goto error;
++		ctrl |= 0x80000000;
++
++		hwsq_wr32(hwsq, 0x004020, (P1 << 19) | (P1 << 16) | ctrl);
++		hwsq_wr32(hwsq, 0x004024, (N << 8) | M);
++		hwsq_wr32(hwsq, 0x00c040, 0x00000030 | mast);
++	}
++
++	hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
++	hwsq_op5f(hwsq, 0x00, 0x00); /* wait for access enabled? */
++	hwsq_fini(hwsq);
++
++	return info;
++error:
++	kfree(info);
++	return ERR_PTR(ret);
++}
++
++static int
++prog_hwsq(struct drm_device *dev, struct hwsq_ucode *hwsq)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	u32 hwsq_data, hwsq_kick;
++	int i;
+ 
+-	ret = get_pll_limits(dev, id, &state->pll);
+-	if (ret < 0) {
+-		kfree(state);
+-		return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
++	if (dev_priv->chipset < 0x94) {
++		hwsq_data = 0x001400;
++		hwsq_kick = 0x00000003;
++	} else {
++		hwsq_data = 0x080000;
++		hwsq_kick = 0x00000001;
+ 	}
++	/* upload hwsq ucode */
++	nv_mask(dev, 0x001098, 0x00000008, 0x00000000);
++	nv_wr32(dev, 0x001304, 0x00000000);
++	if (dev_priv->chipset >= 0x92)
++		nv_wr32(dev, 0x001318, 0x00000000);
++	for (i = 0; i < hwsq->len / 4; i++)
++		nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
++	nv_mask(dev, 0x001098, 0x00000018, 0x00000018);
++
++	/* launch, and wait for completion */
++	nv_wr32(dev, 0x00130c, hwsq_kick);
++	if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) {
++		NV_ERROR(dev, "hwsq ucode exec timed out\n");
++		NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308));
++		for (i = 0; i < hwsq->len / 4; i++) {
++			NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
++				 nv_rd32(dev, 0x001400 + (i * 4)));
++		}
+ 
+-	ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
+-			    &dummy, &dummy, &state->P);
+-	if (ret < 0) {
+-		kfree(state);
+-		return ERR_PTR(ret);
++		return -EIO;
+ 	}
+ 
+-	return state;
++	return 0;
+ }
+ 
+-void
+-nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
++int
++nv50_pm_clocks_set(struct drm_device *dev, void *data)
+ {
+-	struct nv50_pm_state *state = pre_state;
+-	struct nouveau_pm_level *perflvl = state->perflvl;
+-	u32 reg = state->pll.reg, tmp;
+-	struct bit_entry BIT_M;
+-	u16 script;
+-	int N = state->N;
+-	int M = state->M;
+-	int P = state->P;
++	struct nv50_pm_state *info = data;
++	struct bit_entry M;
++	int ret = -EBUSY;
++
++	/* halt and idle execution engines */
++	nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
++	if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010))
++		goto resume;
++	if (!nv_wait(dev, 0x00251c, 0x0000003f, 0x0000003f))
++		goto resume;
+ 
+-	if (state->type == PLL_MEMORY && perflvl->memscript &&
+-	    bit_table(dev, 'M', &BIT_M) == 0 &&
+-	    BIT_M.version == 1 && BIT_M.length >= 0x0b) {
+-		script = ROM16(BIT_M.data[0x05]);
+-		if (script)
+-			nouveau_bios_run_init_table(dev, script, NULL, -1);
+-		script = ROM16(BIT_M.data[0x07]);
+-		if (script)
+-			nouveau_bios_run_init_table(dev, script, NULL, -1);
+-		script = ROM16(BIT_M.data[0x09]);
+-		if (script)
+-			nouveau_bios_run_init_table(dev, script, NULL, -1);
++	/* program memory clock, if necessary - must come before engine clock
++	 * reprogramming due to how we construct the hwsq scripts in pre()
++	 */
++	if (info->mclk_hwsq.len) {
++		/* execute some scripts that do ??? from the vbios.. */
++		if (!bit_table(dev, 'M', &M) && M.version == 1) {
++			if (M.length >= 6)
++				nouveau_bios_init_exec(dev, ROM16(M.data[5]));
++			if (M.length >= 8)
++				nouveau_bios_init_exec(dev, ROM16(M.data[7]));
++			if (M.length >= 10)
++				nouveau_bios_init_exec(dev, ROM16(M.data[9]));
++			nouveau_bios_init_exec(dev, info->mscript);
++		}
+ 
+-		nouveau_bios_run_init_table(dev, perflvl->memscript, NULL, -1);
++		ret = prog_hwsq(dev, &info->mclk_hwsq);
++		if (ret)
++			goto resume;
+ 	}
+ 
+-	if (state->type == PLL_MEMORY) {
+-		nv_wr32(dev, 0x100210, 0);
+-		nv_wr32(dev, 0x1002dc, 1);
++	/* program engine clocks */
++	ret = prog_hwsq(dev, &info->eclk_hwsq);
++
++resume:
++	nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
++	kfree(info);
++	return ret;
++}
++
++static int
++pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
++{
++	if (*line == 0x04) {
++		*ctrl = 0x00e100;
++		*line = 4;
++		*indx = 0;
++	} else
++	if (*line == 0x09) {
++		*ctrl = 0x00e100;
++		*line = 9;
++		*indx = 1;
++	} else
++	if (*line == 0x10) {
++		*ctrl = 0x00e28c;
++		*line = 0;
++		*indx = 0;
++	} else {
++		NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
++		return -ENODEV;
+ 	}
+ 
+-	tmp  = nv_rd32(dev, reg + 0) & 0xfff8ffff;
+-	tmp |= 0x80000000 | (P << 16);
+-	nv_wr32(dev, reg + 0, tmp);
+-	nv_wr32(dev, reg + 4, (N << 8) | M);
++	return 0;
++}
++
++int
++nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
++{
++	int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
++	if (ret)
++		return ret;
+ 
+-	if (state->type == PLL_MEMORY) {
+-		nv_wr32(dev, 0x1002dc, 0);
+-		nv_wr32(dev, 0x100210, 0x80000000);
++	if (nv_rd32(dev, ctrl) & (1 << line)) {
++		*divs = nv_rd32(dev, 0x00e114 + (id * 8));
++		*duty = nv_rd32(dev, 0x00e118 + (id * 8));
++		return 0;
+ 	}
+ 
+-	kfree(state);
++	return -EINVAL;
+ }
+ 
++int
++nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
++{
++	int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
++	if (ret)
++		return ret;
++
++	nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
++	nv_wr32(dev, 0x00e114 + (id * 8), divs);
++	nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
+index 2633aa8..2746402 100644
+--- a/drivers/gpu/drm/nouveau/nv50_sor.c
++++ b/drivers/gpu/drm/nouveau/nv50_sor.c
+@@ -36,6 +36,193 @@
+ #include "nouveau_crtc.h"
+ #include "nv50_display.h"
+ 
++static u32
++nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
++	static const u8 nv50[] = { 16, 8, 0, 24 };
++	if (dev_priv->chipset == 0xaf)
++		return nvaf[lane];
++	return nv50[lane];
++}
++
++static void
++nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern)
++{
++	u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
++	nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
++}
++
++static void
++nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
++		      u8 lane, u8 swing, u8 preem)
++{
++	u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
++	u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
++	u32 mask = 0x000000ff << shift;
++	u8 *table, *entry, *config;
++
++	table = nouveau_dp_bios_data(dev, dcb, &entry);
++	if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
++		NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
++		return;
++	}
++
++	config = entry + table[4];
++	while (config[0] != swing || config[1] != preem) {
++		config += table[5];
++		if (config >= entry + table[4] + entry[4] * table[5])
++			return;
++	}
++
++	nv_mask(dev, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
++	nv_mask(dev, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
++	nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
++}
++
++static void
++nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
++		     int link_nr, u32 link_bw, bool enhframe)
++{
++	u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
++	u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
++	u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800)) & ~0x000c0000;
++	u8 *table, *entry, mask;
++	int i;
++
++	table = nouveau_dp_bios_data(dev, dcb, &entry);
++	if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
++		NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
++		return;
++	}
++
++	entry = ROMPTR(dev, entry[10]);
++	if (entry) {
++		while (link_bw < ROM16(entry[0]) * 10)
++			entry += 4;
++
++		nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
++	}
++
++	dpctrl |= ((1 << link_nr) - 1) << 16;
++	if (enhframe)
++		dpctrl |= 0x00004000;
++
++	if (link_bw > 162000)
++		clksor |= 0x00040000;
++
++	nv_wr32(dev, 0x614300 + (or * 0x800), clksor);
++	nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), dpctrl);
++
++	mask = 0;
++	for (i = 0; i < link_nr; i++)
++		mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
++	nv_mask(dev, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
++}
++
++static void
++nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
++{
++	u32 dpctrl = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
++	u32 clksor = nv_rd32(dev, 0x614300 + (or * 0x800));
++	if (clksor & 0x000c0000)
++		*bw = 270000;
++	else
++		*bw = 162000;
++
++	if      (dpctrl > 0x00030000) *nr = 4;
++	else if (dpctrl > 0x00010000) *nr = 2;
++	else			      *nr = 1;
++}
++
++void
++nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
++{
++	const u32 symbol = 100000;
++	int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
++	int TU, VTUi, VTUf, VTUa;
++	u64 link_data_rate, link_ratio, unk;
++	u32 best_diff = 64 * symbol;
++	u32 link_nr, link_bw, r;
++
++	/* calculate packed data rate for each lane */
++	nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
++	link_data_rate = (clk * bpp / 8) / link_nr;
++
++	/* calculate ratio of packed data rate to link symbol rate */
++	link_ratio = link_data_rate * symbol;
++	r = do_div(link_ratio, link_bw);
++
++	for (TU = 64; TU >= 32; TU--) {
++		/* calculate average number of valid symbols in each TU */
++		u32 tu_valid = link_ratio * TU;
++		u32 calc, diff;
++
++		/* find a hw representation for the fraction.. */
++		VTUi = tu_valid / symbol;
++		calc = VTUi * symbol;
++		diff = tu_valid - calc;
++		if (diff) {
++			if (diff >= (symbol / 2)) {
++				VTUf = symbol / (symbol - diff);
++				if (symbol - (VTUf * diff))
++					VTUf++;
++
++				if (VTUf <= 15) {
++					VTUa  = 1;
++					calc += symbol - (symbol / VTUf);
++				} else {
++					VTUa  = 0;
++					VTUf  = 1;
++					calc += symbol;
++				}
++			} else {
++				VTUa  = 0;
++				VTUf  = min((int)(symbol / diff), 15);
++				calc += symbol / VTUf;
++			}
++
++			diff = calc - tu_valid;
++		} else {
++			/* no remainder, but the hw doesn't like the fractional
++			 * part to be zero.  decrement the integer part and
++			 * have the fraction add a whole symbol back
++			 */
++			VTUa = 0;
++			VTUf = 1;
++			VTUi--;
++		}
++
++		if (diff < best_diff) {
++			best_diff = diff;
++			bestTU = TU;
++			bestVTUa = VTUa;
++			bestVTUf = VTUf;
++			bestVTUi = VTUi;
++			if (diff == 0)
++				break;
++		}
++	}
++
++	if (!bestTU) {
++		NV_ERROR(dev, "DP: unable to find suitable config\n");
++		return;
++	}
++
++	/* XXX close to vbios numbers, but not right */
++	unk  = (symbol - link_ratio) * bestTU;
++	unk *= link_ratio;
++	r = do_div(unk, symbol);
++	r = do_div(unk, symbol);
++	unk += 6;
++
++	nv_mask(dev, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
++	nv_mask(dev, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
++							     bestVTUf << 16 |
++							     bestVTUi << 8 |
++							     unk);
++}
+ static void
+ nv50_sor_disconnect(struct drm_encoder *encoder)
+ {
+@@ -60,6 +247,8 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
+ 	BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ 	OUT_RING  (evo, 0);
+ 
++	nouveau_hdmi_mode_set(encoder, NULL);
++
+ 	nv_encoder->crtc = NULL;
+ 	nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+ }
+@@ -115,20 +304,13 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
+ 	}
+ 
+ 	if (nv_encoder->dcb->type == OUTPUT_DP) {
+-		struct nouveau_i2c_chan *auxch;
+-
+-		auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+-		if (!auxch)
+-			return;
++		struct dp_train_func func = {
++			.link_set = nv50_sor_dp_link_set,
++			.train_set = nv50_sor_dp_train_set,
++			.train_adj = nv50_sor_dp_train_adj
++		};
+ 
+-		if (mode == DRM_MODE_DPMS_ON) {
+-			u8 status = DP_SET_POWER_D0;
+-			nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+-			nouveau_dp_link_train(encoder, nv_encoder->dp.datarate);
+-		} else {
+-			u8 status = DP_SET_POWER_D3;
+-			nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+-		}
++		nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
+ 	}
+ }
+ 
+@@ -160,11 +342,8 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 	}
+ 
+ 	if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
+-	     connector->native_mode) {
+-		int id = adjusted_mode->base.id;
+-		*adjusted_mode = *connector->native_mode;
+-		adjusted_mode->base.id = id;
+-	}
++	     connector->native_mode)
++		drm_mode_copy(adjusted_mode, connector->native_mode);
+ 
+ 	return true;
+ }
+@@ -172,6 +351,12 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ static void
+ nv50_sor_prepare(struct drm_encoder *encoder)
+ {
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	nv50_sor_disconnect(encoder);
++	if (nv_encoder->dcb->type == OUTPUT_DP) {
++		/* avoid race between link training and supervisor intr */
++		nv50_display_sync(encoder->dev);
++	}
+ }
+ 
+ static void
+@@ -180,8 +365,8 @@ nv50_sor_commit(struct drm_encoder *encoder)
+ }
+ 
+ static void
+-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+-		  struct drm_display_mode *adjusted_mode)
++nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
++		  struct drm_display_mode *mode)
+ {
+ 	struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
+ 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+@@ -193,24 +378,27 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 
+ 	NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
+ 		     nv_encoder->or, nv_encoder->dcb->type, crtc->index);
++	nv_encoder->crtc = encoder->crtc;
+ 
+ 	switch (nv_encoder->dcb->type) {
+ 	case OUTPUT_TMDS:
+ 		if (nv_encoder->dcb->sorconf.link & 1) {
+-			if (adjusted_mode->clock < 165000)
++			if (mode->clock < 165000)
+ 				mode_ctl = 0x0100;
+ 			else
+ 				mode_ctl = 0x0500;
+ 		} else
+ 			mode_ctl = 0x0200;
++
++		nouveau_hdmi_mode_set(encoder, mode);
+ 		break;
+ 	case OUTPUT_DP:
+ 		nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ 		if (nv_connector && nv_connector->base.display_info.bpc == 6) {
+-			nv_encoder->dp.datarate = crtc->mode->clock * 18 / 8;
++			nv_encoder->dp.datarate = mode->clock * 18 / 8;
+ 			mode_ctl |= 0x00020000;
+ 		} else {
+-			nv_encoder->dp.datarate = crtc->mode->clock * 24 / 8;
++			nv_encoder->dp.datarate = mode->clock * 24 / 8;
+ 			mode_ctl |= 0x00050000;
+ 		}
+ 
+@@ -228,10 +416,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 	else
+ 		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
+ 
+-	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
++	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ 		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
+ 
+-	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
++	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ 		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
+ 
+ 	nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+@@ -239,12 +427,11 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 	ret = RING_SPACE(evo, 2);
+ 	if (ret) {
+ 		NV_ERROR(dev, "no space while connecting SOR\n");
++		nv_encoder->crtc = NULL;
+ 		return;
+ 	}
+ 	BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
+ 	OUT_RING(evo, mode_ctl);
+-
+-	nv_encoder->crtc = encoder->crtc;
+ }
+ 
+ static struct drm_crtc *
+diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
+index 40b84f2..44fbac9 100644
+--- a/drivers/gpu/drm/nouveau/nv50_vm.c
++++ b/drivers/gpu/drm/nouveau/nv50_vm.c
+@@ -48,7 +48,7 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ 			phys |= 0x60;
+ 		else if (coverage <= 64 * 1024 * 1024)
+ 			phys |= 0x40;
+-		else if (coverage < 128 * 1024 * 1024)
++		else if (coverage <= 128 * 1024 * 1024)
+ 			phys |= 0x20;
+ 	}
+ 
+@@ -57,27 +57,15 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ }
+ 
+ static inline u64
+-nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
++vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+ {
+-	struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
+-
+ 	phys |= 1; /* present */
+ 	phys |= (u64)memtype << 40;
+-
+-	/* IGPs don't have real VRAM, re-target to stolen system memory */
+-	if (target == 0 && dev_priv->vram_sys_base) {
+-		phys  += dev_priv->vram_sys_base;
+-		target = 3;
+-	}
+-
+ 	phys |= target << 4;
+-
+ 	if (vma->access & NV_MEM_ACCESS_SYS)
+ 		phys |= (1 << 6);
+-
+ 	if (!(vma->access & NV_MEM_ACCESS_WO))
+ 		phys |= (1 << 3);
+-
+ 	return phys;
+ }
+ 
+@@ -85,11 +73,19 @@ void
+ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ 	    struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
+ {
++	struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
+ 	u32 comp = (mem->memtype & 0x180) >> 7;
+-	u32 block;
++	u32 block, target;
+ 	int i;
+ 
+-	phys  = nv50_vm_addr(vma, phys, mem->memtype, 0);
++	/* IGPs don't have real VRAM, re-target to stolen system memory */
++	target = 0;
++	if (dev_priv->vram_sys_base) {
++		phys += dev_priv->vram_sys_base;
++		target = 3;
++	}
++
++	phys  = vm_addr(vma, phys, mem->memtype, target);
+ 	pte <<= 3;
+ 	cnt <<= 3;
+ 
+@@ -125,9 +121,10 @@ void
+ nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ 	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+ {
++	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
+ 	pte <<= 3;
+ 	while (cnt--) {
+-		u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2);
++		u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
+ 		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ 		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ 		pte += 8;
+diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
+index 2e45e57..9ed9ae39 100644
+--- a/drivers/gpu/drm/nouveau/nv50_vram.c
++++ b/drivers/gpu/drm/nouveau/nv50_vram.c
+@@ -189,8 +189,25 @@ nv50_vram_init(struct drm_device *dev)
+ 	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ 	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ 	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
++	u32 pfb714 = nv_rd32(dev, 0x100714);
+ 	u32 rblock, length;
+ 
++	switch (pfb714 & 0x00000007) {
++	case 0: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
++	case 1:
++		if (nouveau_mem_vbios_type(dev) == NV_MEM_TYPE_DDR3)
++			dev_priv->vram_type = NV_MEM_TYPE_DDR3;
++		else
++			dev_priv->vram_type = NV_MEM_TYPE_DDR2;
++		break;
++	case 2: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
++	case 3: dev_priv->vram_type = NV_MEM_TYPE_GDDR4; break;
++	case 4: dev_priv->vram_type = NV_MEM_TYPE_GDDR5; break;
++	default:
++		break;
++	}
++
++	dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x100200) & 0x4);
+ 	dev_priv->vram_size  = nv_rd32(dev, 0x10020c);
+ 	dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
+ 	dev_priv->vram_size &= 0xffffffff00ULL;
+diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
+new file mode 100644
+index 0000000..7487573
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv84_bsp.c
+@@ -0,0 +1,83 @@
++/*
++ * Copyright 2011 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_util.h"
++#include "nouveau_vm.h"
++#include "nouveau_ramht.h"
++
++/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
++ *     more than just an enable/disable stub this needs to be split out to
++ *     nv98_bsp.c...
++ */
++
++struct nv84_bsp_engine {
++	struct nouveau_exec_engine base;
++};
++
++static int
++nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
++{
++	if (!(nv_rd32(dev, 0x000200) & 0x00008000))
++		return 0;
++
++	nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
++	return 0;
++}
++
++static int
++nv84_bsp_init(struct drm_device *dev, int engine)
++{
++	nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
++	nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
++	return 0;
++}
++
++static void
++nv84_bsp_destroy(struct drm_device *dev, int engine)
++{
++	struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
++
++	NVOBJ_ENGINE_DEL(dev, BSP);
++
++	kfree(pbsp);
++}
++
++int
++nv84_bsp_create(struct drm_device *dev)
++{
++	struct nv84_bsp_engine *pbsp;
++
++	pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
++	if (!pbsp)
++		return -ENOMEM;
++
++	pbsp->base.destroy = nv84_bsp_destroy;
++	pbsp->base.init = nv84_bsp_init;
++	pbsp->base.fini = nv84_bsp_fini;
++
++	NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
+new file mode 100644
+index 0000000..6570d30
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv84_vp.c
+@@ -0,0 +1,83 @@
++/*
++ * Copyright 2011 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_util.h"
++#include "nouveau_vm.h"
++#include "nouveau_ramht.h"
++
++/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
++ *     more than just an enable/disable stub this needs to be split out to
++ *     nv98_vp.c...
++ */
++
++struct nv84_vp_engine {
++	struct nouveau_exec_engine base;
++};
++
++static int
++nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
++{
++	if (!(nv_rd32(dev, 0x000200) & 0x00020000))
++		return 0;
++
++	nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
++	return 0;
++}
++
++static int
++nv84_vp_init(struct drm_device *dev, int engine)
++{
++	nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
++	nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
++	return 0;
++}
++
++static void
++nv84_vp_destroy(struct drm_device *dev, int engine)
++{
++	struct nv84_vp_engine *pvp = nv_engine(dev, engine);
++
++	NVOBJ_ENGINE_DEL(dev, VP);
++
++	kfree(pvp);
++}
++
++int
++nv84_vp_create(struct drm_device *dev)
++{
++	struct nv84_vp_engine *pvp;
++
++	pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
++	if (!pvp)
++		return -ENOMEM;
++
++	pvp->base.destroy = nv84_vp_destroy;
++	pvp->base.init = nv84_vp_init;
++	pvp->base.fini = nv84_vp_fini;
++
++	NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
+new file mode 100644
+index 0000000..db94ff0
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv98_crypt.c
+@@ -0,0 +1,78 @@
++/*
++ * Copyright 2011 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_util.h"
++#include "nouveau_vm.h"
++#include "nouveau_ramht.h"
++
++struct nv98_crypt_engine {
++	struct nouveau_exec_engine base;
++};
++
++static int
++nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
++{
++	if (!(nv_rd32(dev, 0x000200) & 0x00004000))
++		return 0;
++
++	nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
++	return 0;
++}
++
++static int
++nv98_crypt_init(struct drm_device *dev, int engine)
++{
++	nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
++	nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
++	return 0;
++}
++
++static void
++nv98_crypt_destroy(struct drm_device *dev, int engine)
++{
++	struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
++
++	NVOBJ_ENGINE_DEL(dev, CRYPT);
++
++	kfree(pcrypt);
++}
++
++int
++nv98_crypt_create(struct drm_device *dev)
++{
++	struct nv98_crypt_engine *pcrypt;
++
++	pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
++	if (!pcrypt)
++		return -ENOMEM;
++
++	pcrypt->base.destroy = nv98_crypt_destroy;
++	pcrypt->base.init = nv98_crypt_init;
++	pcrypt->base.fini = nv98_crypt_fini;
++
++	NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/nv98_ppp.c
+new file mode 100644
+index 0000000..a987dd6
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv98_ppp.c
+@@ -0,0 +1,78 @@
++/*
++ * Copyright 2011 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_util.h"
++#include "nouveau_vm.h"
++#include "nouveau_ramht.h"
++
++struct nv98_ppp_engine {
++	struct nouveau_exec_engine base;
++};
++
++static int
++nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
++{
++	if (!(nv_rd32(dev, 0x000200) & 0x00000002))
++		return 0;
++
++	nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
++	return 0;
++}
++
++static int
++nv98_ppp_init(struct drm_device *dev, int engine)
++{
++	nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
++	nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
++	return 0;
++}
++
++static void
++nv98_ppp_destroy(struct drm_device *dev, int engine)
++{
++	struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
++
++	NVOBJ_ENGINE_DEL(dev, PPP);
++
++	kfree(pppp);
++}
++
++int
++nv98_ppp_create(struct drm_device *dev)
++{
++	struct nv98_ppp_engine *pppp;
++
++	pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
++	if (!pppp)
++		return -ENOMEM;
++
++	pppp->base.destroy = nv98_ppp_destroy;
++	pppp->base.init = nv98_ppp_init;
++	pppp->base.fini = nv98_ppp_fini;
++
++	NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
+index d894731..219850d 100644
+--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
++++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
+@@ -31,8 +31,9 @@
+  */
+ 
+ ifdef(`NVA3',
+-.section nva3_pcopy_data,
+-.section nvc0_pcopy_data
++.section #nva3_pcopy_data
++,
++.section #nvc0_pcopy_data
+ )
+ 
+ ctx_object:                   .b32 0
+@@ -42,7 +43,7 @@ ctx_dma_query:                .b32 0
+ ctx_dma_src:                  .b32 0
+ ctx_dma_dst:                  .b32 0
+ ,)
+-.equ ctx_dma_count 3
++.equ #ctx_dma_count 3
+ ctx_query_address_high:       .b32 0
+ ctx_query_address_low:        .b32 0
+ ctx_query_counter:            .b32 0
+@@ -78,64 +79,65 @@ ctx_ycnt:                     .b32 0
+ dispatch_table:
+ // mthd 0x0000, NAME
+ .b16 0x000 1
+-.b32 ctx_object                     ~0xffffffff
++.b32 #ctx_object                     ~0xffffffff
+ // mthd 0x0100, NOP
+ .b16 0x040 1
+-.b32 0x00010000 + cmd_nop           ~0xffffffff
++.b32 0x00010000 + #cmd_nop           ~0xffffffff
+ // mthd 0x0140, PM_TRIGGER
+ .b16 0x050 1
+-.b32 0x00010000 + cmd_pm_trigger    ~0xffffffff
++.b32 0x00010000 + #cmd_pm_trigger    ~0xffffffff
+ ifdef(`NVA3', `
+ // mthd 0x0180-0x018c, DMA_
+-.b16 0x060 ctx_dma_count
++.b16 0x060 #ctx_dma_count
+ dispatch_dma:
+-.b32 0x00010000 + cmd_dma           ~0xffffffff
+-.b32 0x00010000 + cmd_dma           ~0xffffffff
+-.b32 0x00010000 + cmd_dma           ~0xffffffff
++.b32 0x00010000 + #cmd_dma           ~0xffffffff
++.b32 0x00010000 + #cmd_dma           ~0xffffffff
++.b32 0x00010000 + #cmd_dma           ~0xffffffff
+ ',)
+ // mthd 0x0200-0x0218, SRC_TILE
+ .b16 0x80 7
+-.b32 ctx_src_tile_mode              ~0x00000fff
+-.b32 ctx_src_xsize                  ~0x0007ffff
+-.b32 ctx_src_ysize                  ~0x00001fff
+-.b32 ctx_src_zsize                  ~0x000007ff
+-.b32 ctx_src_zoff                   ~0x00000fff
+-.b32 ctx_src_xoff                   ~0x0007ffff
+-.b32 ctx_src_yoff                   ~0x00001fff
++.b32 #ctx_src_tile_mode              ~0x00000fff
++.b32 #ctx_src_xsize                  ~0x0007ffff
++.b32 #ctx_src_ysize                  ~0x00001fff
++.b32 #ctx_src_zsize                  ~0x000007ff
++.b32 #ctx_src_zoff                   ~0x00000fff
++.b32 #ctx_src_xoff                   ~0x0007ffff
++.b32 #ctx_src_yoff                   ~0x00001fff
+ // mthd 0x0220-0x0238, DST_TILE
+ .b16 0x88 7
+-.b32 ctx_dst_tile_mode              ~0x00000fff
+-.b32 ctx_dst_xsize                  ~0x0007ffff
+-.b32 ctx_dst_ysize                  ~0x00001fff
+-.b32 ctx_dst_zsize                  ~0x000007ff
+-.b32 ctx_dst_zoff                   ~0x00000fff
+-.b32 ctx_dst_xoff                   ~0x0007ffff
+-.b32 ctx_dst_yoff                   ~0x00001fff
++.b32 #ctx_dst_tile_mode              ~0x00000fff
++.b32 #ctx_dst_xsize                  ~0x0007ffff
++.b32 #ctx_dst_ysize                  ~0x00001fff
++.b32 #ctx_dst_zsize                  ~0x000007ff
++.b32 #ctx_dst_zoff                   ~0x00000fff
++.b32 #ctx_dst_xoff                   ~0x0007ffff
++.b32 #ctx_dst_yoff                   ~0x00001fff
+ // mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
+ .b16 0xc0 2
+-.b32 0x00010000 + cmd_exec          ~0xffffffff
+-.b32 0x00010000 + cmd_wrcache_flush ~0xffffffff
++.b32 0x00010000 + #cmd_exec          ~0xffffffff
++.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
+ // mthd 0x030c-0x0340, various stuff
+ .b16 0xc3 14
+-.b32 ctx_src_address_high           ~0x000000ff
+-.b32 ctx_src_address_low            ~0xffffffff
+-.b32 ctx_dst_address_high           ~0x000000ff
+-.b32 ctx_dst_address_low            ~0xffffffff
+-.b32 ctx_src_pitch                  ~0x0007ffff
+-.b32 ctx_dst_pitch                  ~0x0007ffff
+-.b32 ctx_xcnt                       ~0x0000ffff
+-.b32 ctx_ycnt                       ~0x00001fff
+-.b32 ctx_format                     ~0x0333ffff
+-.b32 ctx_swz_const0                 ~0xffffffff
+-.b32 ctx_swz_const1                 ~0xffffffff
+-.b32 ctx_query_address_high         ~0x000000ff
+-.b32 ctx_query_address_low          ~0xffffffff
+-.b32 ctx_query_counter              ~0xffffffff
++.b32 #ctx_src_address_high           ~0x000000ff
++.b32 #ctx_src_address_low            ~0xffffffff
++.b32 #ctx_dst_address_high           ~0x000000ff
++.b32 #ctx_dst_address_low            ~0xffffffff
++.b32 #ctx_src_pitch                  ~0x0007ffff
++.b32 #ctx_dst_pitch                  ~0x0007ffff
++.b32 #ctx_xcnt                       ~0x0000ffff
++.b32 #ctx_ycnt                       ~0x00001fff
++.b32 #ctx_format                     ~0x0333ffff
++.b32 #ctx_swz_const0                 ~0xffffffff
++.b32 #ctx_swz_const1                 ~0xffffffff
++.b32 #ctx_query_address_high         ~0x000000ff
++.b32 #ctx_query_address_low          ~0xffffffff
++.b32 #ctx_query_counter              ~0xffffffff
+ .b16 0x800 0
+ 
+ ifdef(`NVA3',
+-.section nva3_pcopy_code,
+-.section nvc0_pcopy_code
++.section #nva3_pcopy_code
++,
++.section #nvc0_pcopy_code
+ )
+ 
+ main:
+@@ -143,12 +145,12 @@ main:
+    mov $sp $r0
+ 
+    // setup i0 handler and route fifo and ctxswitch to it
+-   mov $r1 ih
++   mov $r1 #ih
+    mov $iv0 $r1
+    mov $r1 0x400
+    movw $r2 0xfff3
+    sethi $r2 0
+-   iowr I[$r2 + 0x300] $r2
++   iowr I[$r1 + 0x300] $r2
+ 
+    // enable interrupts
+    or $r2 0xc
+@@ -164,19 +166,19 @@ main:
+    bset $flags $p0
+    spin:
+       sleep $p0
+-      bra spin
++      bra #spin
+ 
+ // i0 handler
+ ih:
+    iord $r1 I[$r0 + 0x200]
+ 
+    and $r2 $r1 0x00000008
+-   bra e ih_no_chsw
+-      call chsw
++   bra e #ih_no_chsw
++      call #chsw
+    ih_no_chsw:
+    and $r2 $r1 0x00000004
+-   bra e ih_no_cmd
+-      call dispatch
++   bra e #ih_no_cmd
++      call #dispatch
+ 
+    ih_no_cmd:
+    and $r1 $r1 0x0000000c
+@@ -235,9 +237,9 @@ ifdef(`NVA3', `
+    sethi $r4 0x60000
+ 
+    // swap!
+-   bra $p1 swctx_load
++   bra $p1 #swctx_load
+       xdst $r0 $r4
+-      bra swctx_done
++      bra #swctx_done
+    swctx_load:
+       xdld $r0 $r4
+    swctx_done:
+@@ -251,9 +253,9 @@ chsw:
+ 
+    // if it's active, unload it and return
+    xbit $r15 $r3 0x1e
+-   bra e chsw_no_unload
++   bra e #chsw_no_unload
+       bclr $flags $p1
+-      call swctx
++      call #swctx
+       bclr $r3 0x1e
+       iowr I[$r2] $r3
+       mov $r4 1
+@@ -266,20 +268,20 @@ chsw:
+ 
+    // is there a channel waiting to be loaded?
+    xbit $r13 $r3 0x1e
+-   bra e chsw_finish_load
++   bra e #chsw_finish_load
+       bset $flags $p1
+-      call swctx
++      call #swctx
+ ifdef(`NVA3',
+       // load dma objects back into TARGET regs
+-      mov $r5 ctx_dma
+-      mov $r6 ctx_dma_count
++      mov $r5 #ctx_dma
++      mov $r6 #ctx_dma_count
+       chsw_load_ctx_dma:
+          ld b32 $r7 D[$r5 + $r6 * 4]
+          add b32 $r8 $r6 0x180
+          shl b32 $r8 8
+          iowr I[$r8] $r7
+          sub b32 $r6 1
+-         bra nc chsw_load_ctx_dma
++         bra nc #chsw_load_ctx_dma
+ ,)
+ 
+    chsw_finish_load:
+@@ -297,7 +299,7 @@ dispatch:
+    shl b32 $r2 0x10
+ 
+    // lookup method in the dispatch table, ILLEGAL_MTHD if not found
+-   mov $r5 dispatch_table
++   mov $r5 #dispatch_table
+    clear b32 $r6
+    clear b32 $r7
+    dispatch_loop:
+@@ -305,14 +307,14 @@ dispatch:
+       ld b16 $r7 D[$r5 + 2]
+       add b32 $r5 4
+       cmpu b32 $r4 $r6
+-      bra c dispatch_illegal_mthd
++      bra c #dispatch_illegal_mthd
+       add b32 $r7 $r6
+       cmpu b32 $r4 $r7
+-      bra c dispatch_valid_mthd
++      bra c #dispatch_valid_mthd
+       sub b32 $r7 $r6
+       shl b32 $r7 3
+       add b32 $r5 $r7
+-      bra dispatch_loop
++      bra #dispatch_loop
+ 
+    // ensure no bits set in reserved fields, INVALID_BITFIELD
+    dispatch_valid_mthd:
+@@ -322,20 +324,20 @@ dispatch:
+    ld b32 $r5 D[$r4 + 4]
+    and $r5 $r3
+    cmpu b32 $r5 0
+-   bra ne dispatch_invalid_bitfield
++   bra ne #dispatch_invalid_bitfield
+ 
+    // depending on dispatch flags: execute method, or save data as state
+    ld b16 $r5 D[$r4 + 0]
+    ld b16 $r6 D[$r4 + 2]
+    cmpu b32 $r6 0
+-   bra ne dispatch_cmd
++   bra ne #dispatch_cmd
+       st b32 D[$r5] $r3
+-      bra dispatch_done
++      bra #dispatch_done
+    dispatch_cmd:
+       bclr $flags $p1
+       call $r5
+-      bra $p1 dispatch_error
+-      bra dispatch_done
++      bra $p1 #dispatch_error
++      bra #dispatch_done
+ 
+    dispatch_invalid_bitfield:
+    or $r2 2
+@@ -353,7 +355,7 @@ dispatch:
+       iord $r2 I[$r0 + 0x200]
+       and $r2 0x40
+       cmpu b32 $r2 0
+-      bra ne hostirq_wait
++      bra ne #hostirq_wait
+ 
+    dispatch_done:
+    mov $r2 0x1d00
+@@ -409,10 +411,10 @@ ifdef(`NVA3',
+ //       $r2: hostirq state
+ //       $r3: data
+ cmd_dma:
+-   sub b32 $r4 dispatch_dma
++   sub b32 $r4 #dispatch_dma
+    shr b32 $r4 1
+    bset $r3 0x1e
+-   st b32 D[$r4 + ctx_dma] $r3
++   st b32 D[$r4 + #ctx_dma] $r3
+    add b32 $r4 0x600
+    shl b32 $r4 6
+    iowr I[$r4] $r3
+@@ -430,7 +432,7 @@ cmd_exec_set_format:
+    st b32 D[$sp + 0x0c] $r0
+ 
+    // extract cpp, src_ncomp and dst_ncomp from FORMAT
+-   ld b32 $r4 D[$r0 + ctx_format]
++   ld b32 $r4 D[$r0 + #ctx_format]
+    extr $r5 $r4 16:17
+    add b32 $r5 1
+    extr $r6 $r4 20:21
+@@ -448,22 +450,22 @@ cmd_exec_set_format:
+       clear b32 $r11
+       bpc_loop:
+          cmpu b8 $r10 4
+-         bra nc cmp_c0
++         bra nc #cmp_c0
+             mulu $r12 $r10 $r5
+             add b32 $r12 $r11
+             bset $flags $p2
+-            bra bpc_next
++            bra #bpc_next
+          cmp_c0:
+-         bra ne cmp_c1
++         bra ne #cmp_c1
+             mov $r12 0x10
+             add b32 $r12 $r11
+-            bra bpc_next
++            bra #bpc_next
+          cmp_c1:
+          cmpu b8 $r10 6
+-         bra nc cmp_zero
++         bra nc #cmp_zero
+             mov $r12 0x14
+             add b32 $r12 $r11
+-            bra bpc_next
++            bra #bpc_next
+          cmp_zero:
+             mov $r12 0x80
+          bpc_next:
+@@ -471,22 +473,22 @@ cmd_exec_set_format:
+          add b32 $r8 1
+          add b32 $r11 1
+          cmpu b32 $r11 $r5
+-         bra c bpc_loop
++         bra c #bpc_loop
+       add b32 $r9 1
+       cmpu b32 $r9 $r7
+-      bra c ncomp_loop
++      bra c #ncomp_loop
+ 
+    // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
+    mulu $r6 $r5
+-   st b32 D[$r0 + ctx_src_cpp] $r6
+-   ld b32 $r8 D[$r0 + ctx_xcnt]
++   st b32 D[$r0 + #ctx_src_cpp] $r6
++   ld b32 $r8 D[$r0 + #ctx_xcnt]
+    mulu $r6 $r8
+-   bra $p2 dst_xcnt
++   bra $p2 #dst_xcnt
+    clear b32 $r6
+ 
+    dst_xcnt:
+    mulu $r7 $r5
+-   st b32 D[$r0 + ctx_dst_cpp] $r7
++   st b32 D[$r0 + #ctx_dst_cpp] $r7
+    mulu $r7 $r8
+ 
+    mov $r5 0x810
+@@ -494,10 +496,10 @@ cmd_exec_set_format:
+    iowr I[$r5 + 0x000] $r6
+    iowr I[$r5 + 0x100] $r7
+    add b32 $r5 0x800
+-   ld b32 $r6 D[$r0 + ctx_dst_cpp]
++   ld b32 $r6 D[$r0 + #ctx_dst_cpp]
+    sub b32 $r6 1
+    shl b32 $r6 8
+-   ld b32 $r7 D[$r0 + ctx_src_cpp]
++   ld b32 $r7 D[$r0 + #ctx_src_cpp]
+    sub b32 $r7 1
+    or $r6 $r7
+    iowr I[$r5 + 0x000] $r6
+@@ -511,9 +513,9 @@ cmd_exec_set_format:
+    ld b32 $r6 D[$sp + 0x0c]
+    iowr I[$r5 + 0x300] $r6
+    add b32 $r5 0x400
+-   ld b32 $r6 D[$r0 + ctx_swz_const0]
++   ld b32 $r6 D[$r0 + #ctx_swz_const0]
+    iowr I[$r5 + 0x000] $r6
+-   ld b32 $r6 D[$r0 + ctx_swz_const1]
++   ld b32 $r6 D[$r0 + #ctx_swz_const1]
+    iowr I[$r5 + 0x100] $r6
+    add $sp 0x10
+    ret
+@@ -543,7 +545,7 @@ cmd_exec_set_format:
+ //
+ cmd_exec_set_surface_tiled:
+    // translate TILE_MODE into Tp, Th, Td shift values
+-   ld b32 $r7 D[$r5 + ctx_src_tile_mode]
++   ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
+    extr $r9 $r7 8:11
+    extr $r8 $r7 4:7
+ ifdef(`NVA3',
+@@ -553,9 +555,9 @@ ifdef(`NVA3',
+ )
+    extr $r7 $r7 0:3
+    cmp b32 $r7 0xe
+-   bra ne xtile64
++   bra ne #xtile64
+    mov $r7 4
+-   bra xtileok
++   bra #xtileok
+    xtile64:
+    xbit $r7 $flags $p2
+    add b32 $r7 17
+@@ -565,8 +567,8 @@ ifdef(`NVA3',
+ 
+    // Op = (x * cpp) & ((1 << Tp) - 1)
+    // Tx = (x * cpp) >> Tp
+-   ld b32 $r10 D[$r5 + ctx_src_xoff]
+-   ld b32 $r11 D[$r5 + ctx_src_cpp]
++   ld b32 $r10 D[$r5 + #ctx_src_xoff]
++   ld b32 $r11 D[$r5 + #ctx_src_cpp]
+    mulu $r10 $r11
+    mov $r11 1
+    shl b32 $r11 $r7
+@@ -576,7 +578,7 @@ ifdef(`NVA3',
+ 
+    // Tyo = y & ((1 << Th) - 1)
+    // Ty  = y >> Th
+-   ld b32 $r13 D[$r5 + ctx_src_yoff]
++   ld b32 $r13 D[$r5 + #ctx_src_yoff]
+    mov $r14 1
+    shl b32 $r14 $r8
+    sub b32 $r14 1
+@@ -598,8 +600,8 @@ ifdef(`NVA3',
+    add b32 $r12 $r11
+ 
+    // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
+-   ld b32 $r15 D[$r5 + ctx_src_xsize]
+-   ld b32 $r11 D[$r5 + ctx_src_cpp]
++   ld b32 $r15 D[$r5 + #ctx_src_xsize]
++   ld b32 $r11 D[$r5 + #ctx_src_cpp]
+    mulu $r15 $r11
+    mov $r11 1
+    shl b32 $r11 $r7
+@@ -609,7 +611,7 @@ ifdef(`NVA3',
+    push $r15
+ 
+    // nTy = (h + ((1 << Th) - 1)) >> Th
+-   ld b32 $r15 D[$r5 + ctx_src_ysize]
++   ld b32 $r15 D[$r5 + #ctx_src_ysize]
+    mov $r11 1
+    shl b32 $r11 $r8
+    sub b32 $r11 1
+@@ -629,7 +631,7 @@ ifdef(`NVA3',
+    // Tz  = z >> Td
+    // Op += Tzo << Tys
+    // Ts  = Tys + Td
+-   ld b32 $r8 D[$r5 + ctx_src_zoff]
++   ld b32 $r8 D[$r5 + #ctx_src_zoff]
+    mov $r14 1
+    shl b32 $r14 $r9
+    sub b32 $r14 1
+@@ -656,8 +658,8 @@ ifdef(`NVA3',
+ 
+    // SRC_ADDRESS_LOW   = (Ot + Op) & 0xffffffff
+    // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
+-   ld b32 $r7 D[$r5 + ctx_src_address_low]
+-   ld b32 $r8 D[$r5 + ctx_src_address_high]
++   ld b32 $r7 D[$r5 + #ctx_src_address_low]
++   ld b32 $r8 D[$r5 + #ctx_src_address_high]
+    add b32 $r10 $r12
+    add b32 $r7 $r10
+    adc b32 $r8 0
+@@ -677,14 +679,14 @@ cmd_exec_set_surface_linear:
+    xbit $r6 $flags $p2
+    add b32 $r6 0x202
+    shl b32 $r6 8
+-   ld b32 $r7 D[$r5 + ctx_src_address_low]
++   ld b32 $r7 D[$r5 + #ctx_src_address_low]
+    iowr I[$r6 + 0x000] $r7
+    add b32 $r6 0x400
+-   ld b32 $r7 D[$r5 + ctx_src_address_high]
++   ld b32 $r7 D[$r5 + #ctx_src_address_high]
+    shl b32 $r7 16
+    iowr I[$r6 + 0x000] $r7
+    add b32 $r6 0x400
+-   ld b32 $r7 D[$r5 + ctx_src_pitch]
++   ld b32 $r7 D[$r5 + #ctx_src_pitch]
+    iowr I[$r6 + 0x000] $r7
+    ret
+ 
+@@ -697,7 +699,7 @@ cmd_exec_wait:
+    loop:
+       iord $r1 I[$r0]
+       and $r1 1
+-      bra ne loop
++      bra ne #loop
+    pop $r1
+    pop $r0
+    ret
+@@ -705,18 +707,18 @@ cmd_exec_wait:
+ cmd_exec_query:
+    // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
+    xbit $r4 $r3 13
+-   bra ne query_counter
+-      call cmd_exec_wait
++   bra ne #query_counter
++      call #cmd_exec_wait
+       mov $r4 0x80c
+       shl b32 $r4 6
+-      ld b32 $r5 D[$r0 + ctx_query_address_low]
++      ld b32 $r5 D[$r0 + #ctx_query_address_low]
+       add b32 $r5 4
+       iowr I[$r4 + 0x000] $r5
+       iowr I[$r4 + 0x100] $r0
+       mov $r5 0xc
+       iowr I[$r4 + 0x200] $r5
+       add b32 $r4 0x400
+-      ld b32 $r5 D[$r0 + ctx_query_address_high]
++      ld b32 $r5 D[$r0 + #ctx_query_address_high]
+       shl b32 $r5 16
+       iowr I[$r4 + 0x000] $r5
+       add b32 $r4 0x500
+@@ -741,16 +743,16 @@ cmd_exec_query:
+ 
+    // write COUNTER
+    query_counter:
+-   call cmd_exec_wait
++   call #cmd_exec_wait
+    mov $r4 0x80c
+    shl b32 $r4 6
+-   ld b32 $r5 D[$r0 + ctx_query_address_low]
++   ld b32 $r5 D[$r0 + #ctx_query_address_low]
+    iowr I[$r4 + 0x000] $r5
+    iowr I[$r4 + 0x100] $r0
+    mov $r5 0x4
+    iowr I[$r4 + 0x200] $r5
+    add b32 $r4 0x400
+-   ld b32 $r5 D[$r0 + ctx_query_address_high]
++   ld b32 $r5 D[$r0 + #ctx_query_address_high]
+    shl b32 $r5 16
+    iowr I[$r4 + 0x000] $r5
+    add b32 $r4 0x500
+@@ -759,7 +761,7 @@ cmd_exec_query:
+    mov $r5 0x00001110
+    sethi $r5 0x13120000
+    iowr I[$r4 + 0x100] $r5
+-   ld b32 $r5 D[$r0 + ctx_query_counter]
++   ld b32 $r5 D[$r0 + #ctx_query_counter]
+    add b32 $r4 0x500
+    iowr I[$r4 + 0x000] $r5
+    mov $r5 0x00002601
+@@ -787,22 +789,22 @@ cmd_exec_query:
+ //       $r2: hostirq state
+ //       $r3: data
+ cmd_exec:
+-   call cmd_exec_wait
++   call #cmd_exec_wait
+ 
+    // if format requested, call function to calculate it, otherwise
+    // fill in cpp/xcnt for both surfaces as if (cpp == 1)
+    xbit $r15 $r3 0
+-   bra e cmd_exec_no_format
+-      call cmd_exec_set_format
++   bra e #cmd_exec_no_format
++      call #cmd_exec_set_format
+       mov $r4 0x200
+-      bra cmd_exec_init_src_surface
++      bra #cmd_exec_init_src_surface
+    cmd_exec_no_format:
+       mov $r6 0x810
+       shl b32 $r6 6
+       mov $r7 1
+-      st b32 D[$r0 + ctx_src_cpp] $r7
+-      st b32 D[$r0 + ctx_dst_cpp] $r7
+-      ld b32 $r7 D[$r0 + ctx_xcnt]
++      st b32 D[$r0 + #ctx_src_cpp] $r7
++      st b32 D[$r0 + #ctx_dst_cpp] $r7
++      ld b32 $r7 D[$r0 + #ctx_xcnt]
+       iowr I[$r6 + 0x000] $r7
+       iowr I[$r6 + 0x100] $r7
+       clear b32 $r4
+@@ -811,28 +813,28 @@ cmd_exec:
+    bclr $flags $p2
+    clear b32 $r5
+    xbit $r15 $r3 4
+-   bra e src_tiled
+-      call cmd_exec_set_surface_linear
+-      bra cmd_exec_init_dst_surface
++   bra e #src_tiled
++      call #cmd_exec_set_surface_linear
++      bra #cmd_exec_init_dst_surface
+    src_tiled:
+-      call cmd_exec_set_surface_tiled
++      call #cmd_exec_set_surface_tiled
+       bset $r4 7
+ 
+    cmd_exec_init_dst_surface:
+    bset $flags $p2
+-   mov $r5 ctx_dst_address_high - ctx_src_address_high
++   mov $r5 #ctx_dst_address_high - #ctx_src_address_high
+    xbit $r15 $r3 8
+-   bra e dst_tiled
+-      call cmd_exec_set_surface_linear
+-      bra cmd_exec_kick
++   bra e #dst_tiled
++      call #cmd_exec_set_surface_linear
++      bra #cmd_exec_kick
+    dst_tiled:
+-      call cmd_exec_set_surface_tiled
++      call #cmd_exec_set_surface_tiled
+       bset $r4 8
+ 
+    cmd_exec_kick:
+    mov $r5 0x800
+    shl b32 $r5 6
+-   ld b32 $r6 D[$r0 + ctx_ycnt]
++   ld b32 $r6 D[$r0 + #ctx_ycnt]
+    iowr I[$r5 + 0x100] $r6
+    mov $r6 0x0041
+    // SRC_TARGET = 1, DST_TARGET = 2
+@@ -842,8 +844,8 @@ cmd_exec:
+ 
+    // if requested, queue up a QUERY write after the copy has completed
+    xbit $r15 $r3 12
+-   bra e cmd_exec_done
+-      call cmd_exec_query
++   bra e #cmd_exec_done
++      call #cmd_exec_query
+ 
+    cmd_exec_done:
+    ret
+diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+index e2a0e88..37d6de3 100644
+--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
++++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+@@ -190,7 +190,7 @@ u32 nva3_pcopy_code[] = {
+ 	0xf10010fe,
+ 	0xf1040017,
+ 	0xf0fff327,
+-	0x22d00023,
++	0x12d00023,
+ 	0x0c25f0c0,
+ 	0xf40012d0,
+ 	0x17f11031,
+diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
+index 618c144..9e636e6 100644
+--- a/drivers/gpu/drm/nouveau/nva3_pm.c
++++ b/drivers/gpu/drm/nouveau/nva3_pm.c
+@@ -287,12 +287,13 @@ nva3_pm_grcp_idle(void *data)
+ 	return false;
+ }
+ 
+-void
++int
+ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nva3_pm_state *info = pre_state;
+ 	unsigned long flags;
++	int ret = -EAGAIN;
+ 
+ 	/* prevent any new grctx switches from starting */
+ 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+@@ -328,6 +329,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
+ 		nv_wr32(dev, 0x100210, 0x80000000);
+ 	}
+ 
++	ret = 0;
++
+ cleanup:
+ 	/* unfreeze PFIFO */
+ 	nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
+@@ -339,4 +342,5 @@ cleanup:
+ 		nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
+ 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ 	kfree(info);
++	return ret;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+index 9e87036..cd879f3 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
++++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+@@ -178,7 +178,7 @@ u32 nvc0_pcopy_code[] = {
+ 	0xf10010fe,
+ 	0xf1040017,
+ 	0xf0fff327,
+-	0x22d00023,
++	0x12d00023,
+ 	0x0c25f0c0,
+ 	0xf40012d0,
+ 	0x17f11031,
+diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
+index 5bf5503..f704e94 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
++++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
+@@ -54,6 +54,11 @@ nvc0_mfb_isr(struct drm_device *dev)
+ 			nvc0_mfb_subp_isr(dev, unit, subp);
+ 		units &= ~(1 << unit);
+ 	}
++
++	/* we do something horribly wrong and upset PMFB a lot, so mask off
++	 * interrupts from it after the first one until it's fixed
++	 */
++	nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
+ }
+ 
+ static void
+diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
+index dcbe0d5..50d68a7 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
++++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
+@@ -436,6 +436,24 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+ 	printk(" on channel 0x%010llx\n", (u64)inst << 12);
+ }
+ 
++static int
++nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = NULL;
++	unsigned long flags;
++	int ret = -EINVAL;
++
++	spin_lock_irqsave(&dev_priv->channels.lock, flags);
++	if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) {
++		chan = dev_priv->channels.ptr[chid];
++		if (likely(chan))
++			ret = nouveau_finish_page_flip(chan, NULL);
++	}
++	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
++	return ret;
++}
++
+ static void
+ nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+ {
+@@ -445,11 +463,21 @@ nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+ 	u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+ 	u32 subc = (addr & 0x00070000);
+ 	u32 mthd = (addr & 0x00003ffc);
++	u32 show = stat;
+ 
+-	NV_INFO(dev, "PSUBFIFO %d:", unit);
+-	nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat);
+-	NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+-		unit, chid, subc, mthd, data);
++	if (stat & 0x00200000) {
++		if (mthd == 0x0054) {
++			if (!nvc0_fifo_page_flip(dev, chid))
++				show &= ~0x00200000;
++		}
++	}
++
++	if (show) {
++		NV_INFO(dev, "PFIFO%d:", unit);
++		nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
++		NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
++			     unit, chid, subc, mthd, data);
++	}
+ 
+ 	nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ 	nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
+index ecfafd7..9066102 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
++++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
+@@ -333,14 +333,6 @@ nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
+ 	return 0;
+ }
+ 
+-static int
+-nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
+-			  u32 class, u32 mthd, u32 data)
+-{
+-	nouveau_finish_page_flip(chan, NULL);
+-	return 0;
+-}
+-
+ static void
+ nvc0_graph_init_obj418880(struct drm_device *dev)
+ {
+@@ -875,19 +867,20 @@ nvc0_graph_create(struct drm_device *dev)
+ 	case 0xcf: /* 4/0/0/0, 3 */
+ 		priv->magic_not_rop_nr = 0x03;
+ 		break;
++	case 0xd9: /* 1/0/0/0, 1 */
++		priv->magic_not_rop_nr = 0x01;
++		break;
+ 	}
+ 
+ 	if (!priv->magic_not_rop_nr) {
+ 		NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+ 			 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
+ 			 priv->tp_nr[3], priv->rop_nr);
+-		/* use 0xc3's values... */
+-		priv->magic_not_rop_nr = 0x03;
++		priv->magic_not_rop_nr = 0x00;
+ 	}
+ 
+ 	NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
+ 	NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+-	NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
+ 	NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
+ 	if (fermi >= 0x9197)
+ 		NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
+diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
+index 2a4b6dc..e6b2288 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_graph.fuc
++++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
+@@ -71,9 +71,9 @@ queue_put:
+ 	ld b32 $r9 D[$r13 + 0x4]	// PUT
+ 	xor $r8 8
+ 	cmpu b32 $r8 $r9
+-	bra ne queue_put_next
++	bra ne #queue_put_next
+ 		mov $r15 E_CMD_OVERFLOW
+-		call error
++		call #error
+ 		ret
+ 
+ 	// store cmd/data on queue
+@@ -104,7 +104,7 @@ queue_get:
+ 	ld b32 $r8 D[$r13 + 0x0]	// GET
+ 	ld b32 $r9 D[$r13 + 0x4]	// PUT
+ 	cmpu b32 $r8 $r9
+-	bra e queue_get_done
++	bra e #queue_get_done
+ 		// fetch first cmd/data pair
+ 		and $r9 $r8 7
+ 		shl b32 $r9 3
+@@ -135,9 +135,9 @@ nv_rd32:
+ 	nv_rd32_wait:
+ 		iord $r12 I[$r11 + 0x000]
+ 		xbit $r12 $r12 31
+-		bra ne nv_rd32_wait
++		bra ne #nv_rd32_wait
+ 	mov $r10 6			// DONE_MMIO_RD
+-	call wait_doneo
++	call #wait_doneo
+ 	iord $r15 I[$r11 + 0x100]	// MMIO_RDVAL
+ 	ret
+ 
+@@ -157,7 +157,7 @@ nv_wr32:
+ 	nv_wr32_wait:
+ 		iord $r12 I[$r11 + 0x000]
+ 		xbit $r12 $r12 31
+-		bra ne nv_wr32_wait
++		bra ne #nv_wr32_wait
+ 	ret
+ 
+ // (re)set watchdog timer
+@@ -193,7 +193,7 @@ $1:
+ 		shl b32 $r8 6
+ 		iord $r8 I[$r8 + 0x000]	// DONE
+ 		xbit $r8 $r8 $r10
+-		bra $2 wait_done_$1
++		bra $2 #wait_done_$1
+ 	trace_clr(T_WAIT)
+ 	ret
+ ')
+@@ -216,7 +216,7 @@ mmctx_size:
+ 		add b32 $r9 $r8
+ 		add b32 $r14 4
+ 		cmpu b32 $r14 $r15
+-		bra ne nv_mmctx_size_loop
++		bra ne #nv_mmctx_size_loop
+ 	mov b32 $r15 $r9
+ 	ret
+ 
+@@ -238,12 +238,12 @@ mmctx_xfer:
+ 	shl b32 $r8 6
+ 	clear b32 $r9
+ 	or $r11 $r11
+-	bra e mmctx_base_disabled
++	bra e #mmctx_base_disabled
+ 		iowr I[$r8 + 0x000] $r11	// MMCTX_BASE
+ 		bset $r9 0			// BASE_EN
+ 	mmctx_base_disabled:
+ 	or $r14 $r14
+-	bra e mmctx_multi_disabled
++	bra e #mmctx_multi_disabled
+ 		iowr I[$r8 + 0x200] $r14 	// MMCTX_MULTI_STRIDE
+ 		iowr I[$r8 + 0x300] $r15 	// MMCTX_MULTI_MASK
+ 		bset $r9 1			// MULTI_EN
+@@ -264,7 +264,7 @@ mmctx_xfer:
+ 		mmctx_wait_free:
+ 			iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+ 			and $r14 0x1f
+-			bra e mmctx_wait_free
++			bra e #mmctx_wait_free
+ 
+ 		// queue up an entry
+ 		ld b32 $r14 D[$r12]
+@@ -272,19 +272,19 @@ mmctx_xfer:
+ 		iowr I[$r8 + 0x300] $r14
+ 		add b32 $r12 4
+ 		cmpu b32 $r12 $r13
+-		bra ne mmctx_exec_loop
++		bra ne #mmctx_exec_loop
+ 
+ 	xbit $r11 $r10 2
+-	bra ne mmctx_stop
++	bra ne #mmctx_stop
+ 		// wait for queue to empty
+ 		mmctx_fini_wait:
+ 			iord $r11 I[$r8 + 0x000]	// MMCTX_CTRL
+ 			and $r11 0x1f
+ 			cmpu b32 $r11 0x10
+-			bra ne mmctx_fini_wait
++			bra ne #mmctx_fini_wait
+ 		mov $r10 2				// DONE_MMCTX
+-		call wait_donez
+-		bra mmctx_done
++		call #wait_donez
++		bra #mmctx_done
+ 	mmctx_stop:
+ 		xbit $r11 $r10 0
+ 		shl b32 $r11 16			// DIR
+@@ -295,7 +295,7 @@ mmctx_xfer:
+ 			// wait for STOP_TRIGGER to clear
+ 			iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ 			xbit $r11 $r11 18
+-			bra ne mmctx_stop_wait
++			bra ne #mmctx_stop_wait
+ 	mmctx_done:
+ 	trace_clr(T_MMCTX)
+ 	ret
+@@ -305,7 +305,7 @@ mmctx_xfer:
+ strand_wait:
+ 	push $r10
+ 	mov $r10 2
+-	call wait_donez
++	call #wait_donez
+ 	pop $r10
+ 	ret
+ 
+@@ -316,7 +316,7 @@ strand_pre:
+ 	sethi $r8 0x20000
+ 	mov $r9 0xc
+ 	iowr I[$r8] $r9
+-	call strand_wait
++	call #strand_wait
+ 	ret
+ 
+ // unknown - call after issuing strand commands
+@@ -326,7 +326,7 @@ strand_post:
+ 	sethi $r8 0x20000
+ 	mov $r9 0xd
+ 	iowr I[$r8] $r9
+-	call strand_wait
++	call #strand_wait
+ 	ret
+ 
+ // Selects strand set?!
+@@ -341,11 +341,11 @@ strand_set:
+ 	iowr I[$r10 + 0x000] $r12		// 0x93c = 0xf
+ 	mov $r12 0xb
+ 	iowr I[$r11 + 0x000] $r12		// 0x928 = 0xb
+-	call strand_wait
++	call #strand_wait
+ 	iowr I[$r10 + 0x000] $r14		// 0x93c = <id>
+ 	mov $r12 0xa
+ 	iowr I[$r11 + 0x000] $r12		// 0x928 = 0xa
+-	call strand_wait
++	call #strand_wait
+ 	ret
+ 
+ // Initialise strand context data
+@@ -357,22 +357,22 @@ strand_set:
+ //
+ strand_ctx_init:
+ 	trace_set(T_STRINIT)
+-	call strand_pre
++	call #strand_pre
+ 	mov $r14 3
+-	call strand_set
++	call #strand_set
+ 	mov $r10 0x46fc
+ 	sethi $r10 0x20000
+ 	add b32 $r11 $r10 0x400
+ 	iowr I[$r10 + 0x100] $r0	// STRAND_FIRST_GENE = 0
+ 	mov $r12 1
+ 	iowr I[$r11 + 0x000] $r12	// STRAND_CMD = LATCH_FIRST_GENE
+-	call strand_wait
++	call #strand_wait
+ 	sub b32 $r12 $r0 1
+ 	iowr I[$r10 + 0x000] $r12	// STRAND_GENE_CNT = 0xffffffff
+ 	mov $r12 2
+ 	iowr I[$r11 + 0x000] $r12	// STRAND_CMD = LATCH_GENE_CNT
+-	call strand_wait
+-	call strand_post
++	call #strand_wait
++	call #strand_post
+ 
+ 	// read the size of each strand, poke the context offset of
+ 	// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
+@@ -391,7 +391,7 @@ strand_ctx_init:
+ 		add b32 $r14 $r10
+ 		add b32 $r8 4
+ 		sub b32 $r9 1
+-		bra ne ctx_init_strand_loop
++		bra ne #ctx_init_strand_loop
+ 
+ 	shl b32 $r14 8
+ 	sub b32 $r15 $r14 $r15
+diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
+index 636fe98..91d44ea 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
++++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
+@@ -87,6 +87,7 @@ nvc0_graph_class(struct drm_device *dev)
+ 	case 0xc1:
+ 		return 0x9197;
+ 	case 0xc8:
++	case 0xd9:
+ 		return 0x9297;
+ 	default:
+ 		return 0;
+diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
+index 96b0b93d..de77842 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
++++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
+@@ -1268,6 +1268,17 @@ nvc0_grctx_generate_9039(struct drm_device *dev)
+ static void
+ nvc0_grctx_generate_90c0(struct drm_device *dev)
+ {
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int i;
++
++	for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
++		nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
++		nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
++		nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
++		nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
++		nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
++		nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
++	}
+ 	nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
+ 	nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
+ 	nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
+@@ -1276,6 +1287,12 @@ nvc0_grctx_generate_90c0(struct drm_device *dev)
+ 	nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
+ 	nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
+ 	nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
++	for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
++		nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
++		nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
++		nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
++		nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
++	}
+ 	nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
+ 	nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
+ 	nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
+@@ -1471,14 +1488,20 @@ nvc0_grctx_generate_shaders(struct drm_device *dev)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 
+-	if (dev_priv->chipset != 0xc1) {
+-		nv_wr32(dev, 0x405800, 0x078000bf);
+-		nv_wr32(dev, 0x405830, 0x02180000);
+-	} else {
++	if (dev_priv->chipset == 0xd9) {
+ 		nv_wr32(dev, 0x405800, 0x0f8000bf);
+ 		nv_wr32(dev, 0x405830, 0x02180218);
++		nv_wr32(dev, 0x405834, 0x08000000);
++	} else
++	if (dev_priv->chipset == 0xc1) {
++		nv_wr32(dev, 0x405800, 0x0f8000bf);
++		nv_wr32(dev, 0x405830, 0x02180218);
++		nv_wr32(dev, 0x405834, 0x00000000);
++	} else {
++		nv_wr32(dev, 0x405800, 0x078000bf);
++		nv_wr32(dev, 0x405830, 0x02180000);
++		nv_wr32(dev, 0x405834, 0x00000000);
+ 	}
+-	nv_wr32(dev, 0x405834, 0x00000000);
+ 	nv_wr32(dev, 0x405838, 0x00000000);
+ 	nv_wr32(dev, 0x405854, 0x00000000);
+ 	nv_wr32(dev, 0x405870, 0x00000001);
+@@ -1509,7 +1532,10 @@ nvc0_grctx_generate_unk64xx(struct drm_device *dev)
+ 	nv_wr32(dev, 0x4064ac, 0x00003fff);
+ 	nv_wr32(dev, 0x4064b4, 0x00000000);
+ 	nv_wr32(dev, 0x4064b8, 0x00000000);
+-	if (dev_priv->chipset == 0xc1) {
++	if (dev_priv->chipset == 0xd9)
++		nv_wr32(dev, 0x4064bc, 0x00000000);
++	if (dev_priv->chipset == 0xc1 ||
++	    dev_priv->chipset == 0xd9) {
+ 		nv_wr32(dev, 0x4064c0, 0x80140078);
+ 		nv_wr32(dev, 0x4064c4, 0x0086ffff);
+ 	}
+@@ -1550,10 +1576,23 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
+ 	/* ROPC_BROADCAST */
+ 	nv_wr32(dev, 0x408800, 0x02802a3c);
+ 	nv_wr32(dev, 0x408804, 0x00000040);
+-	nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
+-	nv_wr32(dev, 0x408900, 0x3080b801);
+-	nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
+-	nv_wr32(dev, 0x408908, 0x00c80929);
++	if (chipset == 0xd9) {
++		nv_wr32(dev, 0x408808, 0x1043e005);
++		nv_wr32(dev, 0x408900, 0x3080b801);
++		nv_wr32(dev, 0x408904, 0x1043e005);
++		nv_wr32(dev, 0x408908, 0x00c8102f);
++	} else
++	if (chipset == 0xc1) {
++		nv_wr32(dev, 0x408808, 0x1003e005);
++		nv_wr32(dev, 0x408900, 0x3080b801);
++		nv_wr32(dev, 0x408904, 0x62000001);
++		nv_wr32(dev, 0x408908, 0x00c80929);
++	} else {
++		nv_wr32(dev, 0x408808, 0x0003e00d);
++		nv_wr32(dev, 0x408900, 0x3080b801);
++		nv_wr32(dev, 0x408904, 0x02000001);
++		nv_wr32(dev, 0x408908, 0x00c80929);
++	}
+ 	nv_wr32(dev, 0x40890c, 0x00000000);
+ 	nv_wr32(dev, 0x408980, 0x0000011d);
+ }
+@@ -1572,7 +1611,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
+ 	nv_wr32(dev, 0x418408, 0x00000000);
+ 	nv_wr32(dev, 0x41840c, 0x00001008);
+ 	nv_wr32(dev, 0x418410, 0x0fff0fff);
+-	nv_wr32(dev, 0x418414, 0x00200fff);
++	nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
+ 	nv_wr32(dev, 0x418450, 0x00000000);
+ 	nv_wr32(dev, 0x418454, 0x00000000);
+ 	nv_wr32(dev, 0x418458, 0x00000000);
+@@ -1587,14 +1626,17 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
+ 	nv_wr32(dev, 0x418700, 0x00000002);
+ 	nv_wr32(dev, 0x418704, 0x00000080);
+ 	nv_wr32(dev, 0x418708, 0x00000000);
+-	nv_wr32(dev, 0x41870c, 0x07c80000);
++	nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
+ 	nv_wr32(dev, 0x418710, 0x00000000);
+-	nv_wr32(dev, 0x418800, 0x0006860a);
++	nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
+ 	nv_wr32(dev, 0x418808, 0x00000000);
+ 	nv_wr32(dev, 0x41880c, 0x00000000);
+ 	nv_wr32(dev, 0x418810, 0x00000000);
+ 	nv_wr32(dev, 0x418828, 0x00008442);
+-	nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
++	if (chipset == 0xc1 || chipset == 0xd9)
++		nv_wr32(dev, 0x418830, 0x10000001);
++	else
++		nv_wr32(dev, 0x418830, 0x00000001);
+ 	nv_wr32(dev, 0x4188d8, 0x00000008);
+ 	nv_wr32(dev, 0x4188e0, 0x01000000);
+ 	nv_wr32(dev, 0x4188e8, 0x00000000);
+@@ -1602,7 +1644,12 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
+ 	nv_wr32(dev, 0x4188f0, 0x00000000);
+ 	nv_wr32(dev, 0x4188f4, 0x00000000);
+ 	nv_wr32(dev, 0x4188f8, 0x00000000);
+-	nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
++	if (chipset == 0xd9)
++		nv_wr32(dev, 0x4188fc, 0x20100008);
++	else if (chipset == 0xc1)
++		nv_wr32(dev, 0x4188fc, 0x00100018);
++	else
++		nv_wr32(dev, 0x4188fc, 0x00100000);
+ 	nv_wr32(dev, 0x41891c, 0x00ff00ff);
+ 	nv_wr32(dev, 0x418924, 0x00000000);
+ 	nv_wr32(dev, 0x418928, 0x00ffff00);
+@@ -1616,7 +1663,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
+ 		nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
+ 		nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
+ 	}
+-	nv_wr32(dev, 0x418b00, 0x00000000);
++	nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
+ 	nv_wr32(dev, 0x418b08, 0x0a418820);
+ 	nv_wr32(dev, 0x418b0c, 0x062080e6);
+ 	nv_wr32(dev, 0x418b10, 0x020398a4);
+@@ -1633,7 +1680,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
+ 	nv_wr32(dev, 0x418c24, 0x00000000);
+ 	nv_wr32(dev, 0x418c28, 0x00000000);
+ 	nv_wr32(dev, 0x418c2c, 0x00000000);
+-	if (chipset == 0xc1)
++	if (chipset == 0xc1 || chipset == 0xd9)
+ 		nv_wr32(dev, 0x418c6c, 0x00000001);
+ 	nv_wr32(dev, 0x418c80, 0x20200004);
+ 	nv_wr32(dev, 0x418c8c, 0x00000001);
+@@ -1653,7 +1700,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
+ 	nv_wr32(dev, 0x419818, 0x00000000);
+ 	nv_wr32(dev, 0x41983c, 0x00038bc7);
+ 	nv_wr32(dev, 0x419848, 0x00000000);
+-	nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
++	if (chipset == 0xc1 || chipset == 0xd9)
++		nv_wr32(dev, 0x419864, 0x00000129);
++	else
++		nv_wr32(dev, 0x419864, 0x0000012a);
+ 	nv_wr32(dev, 0x419888, 0x00000000);
+ 	nv_wr32(dev, 0x419a00, 0x000001f0);
+ 	nv_wr32(dev, 0x419a04, 0x00000001);
+@@ -1663,7 +1713,9 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
+ 	nv_wr32(dev, 0x419a14, 0x00000200);
+ 	nv_wr32(dev, 0x419a1c, 0x00000000);
+ 	nv_wr32(dev, 0x419a20, 0x00000800);
+-	if (chipset != 0xc0 && chipset != 0xc8)
++	if (chipset == 0xd9)
++		nv_wr32(dev, 0x00419ac4, 0x0017f440);
++	else if (chipset != 0xc0 && chipset != 0xc8)
+ 		nv_wr32(dev, 0x00419ac4, 0x0007f440);
+ 	nv_wr32(dev, 0x419b00, 0x0a418820);
+ 	nv_wr32(dev, 0x419b04, 0x062080e6);
+@@ -1672,21 +1724,33 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
+ 	nv_wr32(dev, 0x419b10, 0x0a418820);
+ 	nv_wr32(dev, 0x419b14, 0x000000e6);
+ 	nv_wr32(dev, 0x419bd0, 0x00900103);
+-	nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
++	if (chipset == 0xc1 || chipset == 0xd9)
++		nv_wr32(dev, 0x419be0, 0x00400001);
++	else
++		nv_wr32(dev, 0x419be0, 0x00000001);
+ 	nv_wr32(dev, 0x419be4, 0x00000000);
+-	nv_wr32(dev, 0x419c00, 0x00000002);
++	nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
+ 	nv_wr32(dev, 0x419c04, 0x00000006);
+ 	nv_wr32(dev, 0x419c08, 0x00000002);
+ 	nv_wr32(dev, 0x419c20, 0x00000000);
+-	if (chipset == 0xce || chipset == 0xcf)
++	if (dev_priv->chipset == 0xd9) {
++		nv_wr32(dev, 0x419c24, 0x00084210);
++		nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
+ 		nv_wr32(dev, 0x419cb0, 0x00020048);
+-	else
++	} else
++	if (chipset == 0xce || chipset == 0xcf) {
++		nv_wr32(dev, 0x419cb0, 0x00020048);
++	} else {
+ 		nv_wr32(dev, 0x419cb0, 0x00060048);
++	}
+ 	nv_wr32(dev, 0x419ce8, 0x00000000);
+ 	nv_wr32(dev, 0x419cf4, 0x00000183);
+-	nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
++	if (chipset == 0xc1 || chipset == 0xd9)
++		nv_wr32(dev, 0x419d20, 0x12180000);
++	else
++		nv_wr32(dev, 0x419d20, 0x02180000);
+ 	nv_wr32(dev, 0x419d24, 0x00001fff);
+-	if (chipset == 0xc1)
++	if (chipset == 0xc1 || chipset == 0xd9)
+ 		nv_wr32(dev, 0x419d44, 0x02180218);
+ 	nv_wr32(dev, 0x419e04, 0x00000000);
+ 	nv_wr32(dev, 0x419e08, 0x00000000);
+@@ -1986,6 +2050,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
+ 	nv_icmd(dev, 0x00000215, 0x00000040);
+ 	nv_icmd(dev, 0x00000216, 0x00000040);
+ 	nv_icmd(dev, 0x00000217, 0x00000040);
++	if (dev_priv->chipset == 0xd9) {
++		for (i = 0x0400; i <= 0x0417; i++)
++			nv_icmd(dev, i, 0x00000040);
++	}
+ 	nv_icmd(dev, 0x00000218, 0x0000c080);
+ 	nv_icmd(dev, 0x00000219, 0x0000c080);
+ 	nv_icmd(dev, 0x0000021a, 0x0000c080);
+@@ -1994,6 +2062,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
+ 	nv_icmd(dev, 0x0000021d, 0x0000c080);
+ 	nv_icmd(dev, 0x0000021e, 0x0000c080);
+ 	nv_icmd(dev, 0x0000021f, 0x0000c080);
++	if (dev_priv->chipset == 0xd9) {
++		for (i = 0x0440; i <= 0x0457; i++)
++			nv_icmd(dev, i, 0x0000c080);
++	}
+ 	nv_icmd(dev, 0x000000ad, 0x0000013e);
+ 	nv_icmd(dev, 0x000000e1, 0x00000010);
+ 	nv_icmd(dev, 0x00000290, 0x00000000);
+@@ -2556,7 +2628,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
+ 	nv_icmd(dev, 0x0000053f, 0xffff0000);
+ 	nv_icmd(dev, 0x00000585, 0x0000003f);
+ 	nv_icmd(dev, 0x00000576, 0x00000003);
+-	if (dev_priv->chipset == 0xc1)
++	if (dev_priv->chipset == 0xc1 ||
++	    dev_priv->chipset == 0xd9)
+ 		nv_icmd(dev, 0x0000057b, 0x00000059);
+ 	nv_icmd(dev, 0x00000586, 0x00000040);
+ 	nv_icmd(dev, 0x00000582, 0x00000080);
+@@ -2658,6 +2731,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
+ 	nv_icmd(dev, 0x00000957, 0x00000003);
+ 	nv_icmd(dev, 0x0000095e, 0x20164010);
+ 	nv_icmd(dev, 0x0000095f, 0x00000020);
++	if (dev_priv->chipset == 0xd9)
++		nv_icmd(dev, 0x0000097d, 0x00000020);
+ 	nv_icmd(dev, 0x00000683, 0x00000006);
+ 	nv_icmd(dev, 0x00000685, 0x003fffff);
+ 	nv_icmd(dev, 0x00000687, 0x00000c48);
+diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+index 06f5e26..15272be 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
++++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+@@ -32,7 +32,7 @@
+  * - watchdog timer around ctx operations
+  */
+ 
+-.section nvc0_grgpc_data
++.section #nvc0_grgpc_data
+ include(`nvc0_graph.fuc')
+ gpc_id:			.b32 0
+ gpc_mmio_list_head:	.b32 0
+@@ -48,40 +48,45 @@ cmd_queue:		queue_init
+ // chipset descriptions
+ chipsets:
+ .b8  0xc0 0 0 0
+-.b16 nvc0_gpc_mmio_head
+-.b16 nvc0_gpc_mmio_tail
+-.b16 nvc0_tpc_mmio_head
+-.b16 nvc0_tpc_mmio_tail
++.b16 #nvc0_gpc_mmio_head
++.b16 #nvc0_gpc_mmio_tail
++.b16 #nvc0_tpc_mmio_head
++.b16 #nvc0_tpc_mmio_tail
+ .b8  0xc1 0 0 0
+-.b16 nvc0_gpc_mmio_head
+-.b16 nvc1_gpc_mmio_tail
+-.b16 nvc0_tpc_mmio_head
+-.b16 nvc1_tpc_mmio_tail
++.b16 #nvc0_gpc_mmio_head
++.b16 #nvc1_gpc_mmio_tail
++.b16 #nvc0_tpc_mmio_head
++.b16 #nvc1_tpc_mmio_tail
+ .b8  0xc3 0 0 0
+-.b16 nvc0_gpc_mmio_head
+-.b16 nvc0_gpc_mmio_tail
+-.b16 nvc0_tpc_mmio_head
+-.b16 nvc3_tpc_mmio_tail
++.b16 #nvc0_gpc_mmio_head
++.b16 #nvc0_gpc_mmio_tail
++.b16 #nvc0_tpc_mmio_head
++.b16 #nvc3_tpc_mmio_tail
+ .b8  0xc4 0 0 0
+-.b16 nvc0_gpc_mmio_head
+-.b16 nvc0_gpc_mmio_tail
+-.b16 nvc0_tpc_mmio_head
+-.b16 nvc3_tpc_mmio_tail
++.b16 #nvc0_gpc_mmio_head
++.b16 #nvc0_gpc_mmio_tail
++.b16 #nvc0_tpc_mmio_head
++.b16 #nvc3_tpc_mmio_tail
+ .b8  0xc8 0 0 0
+-.b16 nvc0_gpc_mmio_head
+-.b16 nvc0_gpc_mmio_tail
+-.b16 nvc0_tpc_mmio_head
+-.b16 nvc0_tpc_mmio_tail
++.b16 #nvc0_gpc_mmio_head
++.b16 #nvc0_gpc_mmio_tail
++.b16 #nvc0_tpc_mmio_head
++.b16 #nvc0_tpc_mmio_tail
+ .b8  0xce 0 0 0
+-.b16 nvc0_gpc_mmio_head
+-.b16 nvc0_gpc_mmio_tail
+-.b16 nvc0_tpc_mmio_head
+-.b16 nvc3_tpc_mmio_tail
++.b16 #nvc0_gpc_mmio_head
++.b16 #nvc0_gpc_mmio_tail
++.b16 #nvc0_tpc_mmio_head
++.b16 #nvc3_tpc_mmio_tail
+ .b8  0xcf 0 0 0
+-.b16 nvc0_gpc_mmio_head
+-.b16 nvc0_gpc_mmio_tail
+-.b16 nvc0_tpc_mmio_head
+-.b16 nvcf_tpc_mmio_tail
++.b16 #nvc0_gpc_mmio_head
++.b16 #nvc0_gpc_mmio_tail
++.b16 #nvc0_tpc_mmio_head
++.b16 #nvcf_tpc_mmio_tail
++.b8  0xd9 0 0 0
++.b16 #nvd9_gpc_mmio_head
++.b16 #nvd9_gpc_mmio_tail
++.b16 #nvd9_tpc_mmio_head
++.b16 #nvd9_tpc_mmio_tail
+ .b8  0 0 0 0
+ 
+ // GPC mmio lists
+@@ -114,6 +119,35 @@ nvc0_gpc_mmio_tail:
+ mmctx_data(0x000c6c, 1);
+ nvc1_gpc_mmio_tail:
+ 
++nvd9_gpc_mmio_head:
++mmctx_data(0x000380, 1)
++mmctx_data(0x000400, 2)
++mmctx_data(0x00040c, 3)
++mmctx_data(0x000450, 9)
++mmctx_data(0x000600, 1)
++mmctx_data(0x000684, 1)
++mmctx_data(0x000700, 5)
++mmctx_data(0x000800, 1)
++mmctx_data(0x000808, 3)
++mmctx_data(0x000828, 1)
++mmctx_data(0x000830, 1)
++mmctx_data(0x0008d8, 1)
++mmctx_data(0x0008e0, 1)
++mmctx_data(0x0008e8, 6)
++mmctx_data(0x00091c, 1)
++mmctx_data(0x000924, 3)
++mmctx_data(0x000b00, 1)
++mmctx_data(0x000b08, 6)
++mmctx_data(0x000bb8, 1)
++mmctx_data(0x000c08, 1)
++mmctx_data(0x000c10, 8)
++mmctx_data(0x000c6c, 1)
++mmctx_data(0x000c80, 1)
++mmctx_data(0x000c8c, 1)
++mmctx_data(0x001000, 3)
++mmctx_data(0x001014, 1)
++nvd9_gpc_mmio_tail:
++
+ // TPC mmio lists
+ nvc0_tpc_mmio_head:
+ mmctx_data(0x000018, 1)
+@@ -146,9 +180,34 @@ nvc3_tpc_mmio_tail:
+ mmctx_data(0x000544, 1)
+ nvc1_tpc_mmio_tail:
+ 
++nvd9_tpc_mmio_head:
++mmctx_data(0x000018, 1)
++mmctx_data(0x00003c, 1)
++mmctx_data(0x000048, 1)
++mmctx_data(0x000064, 1)
++mmctx_data(0x000088, 1)
++mmctx_data(0x000200, 6)
++mmctx_data(0x00021c, 2)
++mmctx_data(0x0002c4, 1)
++mmctx_data(0x000300, 6)
++mmctx_data(0x0003d0, 1)
++mmctx_data(0x0003e0, 2)
++mmctx_data(0x000400, 3)
++mmctx_data(0x000420, 3)
++mmctx_data(0x0004b0, 1)
++mmctx_data(0x0004e8, 1)
++mmctx_data(0x0004f4, 1)
++mmctx_data(0x000520, 2)
++mmctx_data(0x000544, 1)
++mmctx_data(0x000604, 4)
++mmctx_data(0x000644, 20)
++mmctx_data(0x000698, 1)
++mmctx_data(0x0006e0, 1)
++mmctx_data(0x000750, 3)
++nvd9_tpc_mmio_tail:
+ 
+-.section nvc0_grgpc_code
+-bra init
++.section #nvc0_grgpc_code
++bra #init
+ define(`include_code')
+ include(`nvc0_graph.fuc')
+ 
+@@ -160,10 +219,10 @@ error:
+ 	push $r14
+ 	mov $r14 -0x67ec 	// 0x9814
+ 	sethi $r14 0x400000
+-	call nv_wr32		// HUB_CTXCTL_CC_SCRATCH[5] = error code
++	call #nv_wr32		// HUB_CTXCTL_CC_SCRATCH[5] = error code
+ 	add b32 $r14 0x41c
+ 	mov $r15 1
+-	call nv_wr32		// HUB_CTXCTL_INTR_UP_SET
++	call #nv_wr32		// HUB_CTXCTL_INTR_UP_SET
+ 	pop $r14
+ 	ret
+ 
+@@ -190,7 +249,7 @@ init:
+ 	iowr I[$r1 + 0x000] $r2		// FIFO_ENABLE
+ 
+ 	// setup i0 handler, and route all interrupts to it
+-	mov $r1 ih
++	mov $r1 #ih
+ 	mov $iv0 $r1
+ 	mov $r1 0x400
+ 	iowr I[$r1 + 0x300] $r0		// INTR_DISPATCH
+@@ -210,24 +269,24 @@ init:
+ 	and $r2 0x1f
+ 	shl b32 $r3 $r2
+ 	sub b32 $r3 1
+-	st b32 D[$r0 + tpc_count] $r2
+-	st b32 D[$r0 + tpc_mask] $r3
++	st b32 D[$r0 + #tpc_count] $r2
++	st b32 D[$r0 + #tpc_mask] $r3
+ 	add b32 $r1 0x400
+ 	iord $r2 I[$r1 + 0x000]		// MYINDEX
+-	st b32 D[$r0 + gpc_id] $r2
++	st b32 D[$r0 + #gpc_id] $r2
+ 
+ 	// find context data for this chipset
+ 	mov $r2 0x800
+ 	shl b32 $r2 6
+ 	iord $r2 I[$r2 + 0x000]		// CC_SCRATCH[0]
+-	mov $r1 chipsets - 12
++	mov $r1 #chipsets - 12
+ 	init_find_chipset:
+ 		add b32 $r1 12
+ 		ld b32 $r3 D[$r1 + 0x00]
+ 		cmpu b32 $r3 $r2
+-		bra e init_context
++		bra e #init_context
+ 		cmpu b32 $r3 0
+-		bra ne init_find_chipset
++		bra ne #init_find_chipset
+ 		// unknown chipset
+ 		ret
+ 
+@@ -253,19 +312,19 @@ init:
+ 	clear b32 $r15
+ 	ld b16 $r14 D[$r1 + 4]
+ 	ld b16 $r15 D[$r1 + 6]
+-	st b16 D[$r0 + gpc_mmio_list_head] $r14
+-	st b16 D[$r0 + gpc_mmio_list_tail] $r15
+-	call mmctx_size
++	st b16 D[$r0 + #gpc_mmio_list_head] $r14
++	st b16 D[$r0 + #gpc_mmio_list_tail] $r15
++	call #mmctx_size
+ 	add b32 $r2 $r15
+ 	add b32 $r3 $r15
+ 
+ 	// calculate per-TPC mmio context size, store the list pointers
+ 	ld b16 $r14 D[$r1 + 8]
+ 	ld b16 $r15 D[$r1 + 10]
+-	st b16 D[$r0 + tpc_mmio_list_head] $r14
+-	st b16 D[$r0 + tpc_mmio_list_tail] $r15
+-	call mmctx_size
+-	ld b32 $r14 D[$r0 + tpc_count]
++	st b16 D[$r0 + #tpc_mmio_list_head] $r14
++	st b16 D[$r0 + #tpc_mmio_list_tail] $r15
++	call #mmctx_size
++	ld b32 $r14 D[$r0 + #tpc_count]
+ 	mulu $r14 $r15
+ 	add b32 $r2 $r14
+ 	add b32 $r3 $r14
+@@ -283,7 +342,7 @@ init:
+ 
+ 	// calculate size of strand context data
+ 	mov b32 $r15 $r2
+-	call strand_ctx_init
++	call #strand_ctx_init
+ 	add b32 $r3 $r15
+ 
+ 	// save context size, and tell HUB we're done
+@@ -301,13 +360,13 @@ init:
+ main:
+ 	bset $flags $p0
+ 	sleep $p0
+-	mov $r13 cmd_queue
+-	call queue_get
+-	bra $p1 main
++	mov $r13 #cmd_queue
++	call #queue_get
++	bra $p1 #main
+ 
+ 	// 0x0000-0x0003 are all context transfers
+ 	cmpu b32 $r14 0x04
+-	bra nc main_not_ctx_xfer
++	bra nc #main_not_ctx_xfer
+ 		// fetch $flags and mask off $p1/$p2
+ 		mov $r1 $flags
+ 		mov $r2 0x0006
+@@ -318,14 +377,14 @@ main:
+ 		or $r1 $r14
+ 		mov $flags $r1
+ 		// transfer context data
+-		call ctx_xfer
+-		bra main
++		call #ctx_xfer
++		bra #main
+ 
+ 	main_not_ctx_xfer:
+ 	shl b32 $r15 $r14 16
+ 	or $r15 E_BAD_COMMAND
+-	call error
+-	bra main
++	call #error
++	bra #main
+ 
+ // interrupt handler
+ ih:
+@@ -342,13 +401,13 @@ ih:
+ 	// incoming fifo command?
+ 	iord $r10 I[$r0 + 0x200]	// INTR
+ 	and $r11 $r10 0x00000004
+-	bra e ih_no_fifo
++	bra e #ih_no_fifo
+ 		// queue incoming fifo command for later processing
+ 		mov $r11 0x1900
+-		mov $r13 cmd_queue
++		mov $r13 #cmd_queue
+ 		iord $r14 I[$r11 + 0x100]	// FIFO_CMD
+ 		iord $r15 I[$r11 + 0x000]	// FIFO_DATA
+-		call queue_put
++		call #queue_put
+ 		add b32 $r11 0x400
+ 		mov $r14 1
+ 		iowr I[$r11 + 0x000] $r14	// FIFO_ACK
+@@ -374,11 +433,11 @@ ih:
+ //
+ hub_barrier_done:
+ 	mov $r15 1
+-	ld b32 $r14 D[$r0 + gpc_id]
++	ld b32 $r14 D[$r0 + #gpc_id]
+ 	shl b32 $r15 $r14
+ 	mov $r14 -0x6be8 	// 0x409418 - HUB_BAR_SET
+ 	sethi $r14 0x400000
+-	call nv_wr32
++	call #nv_wr32
+ 	ret
+ 
+ // Disables various things, waits a bit, and re-enables them..
+@@ -395,7 +454,7 @@ ctx_redswitch:
+ 	mov $r15 8
+ 	ctx_redswitch_delay:
+ 		sub b32 $r15 1
+-		bra ne ctx_redswitch_delay
++		bra ne #ctx_redswitch_delay
+ 	mov $r15 0xa20
+ 	iowr I[$r14] $r15	// GPC_RED_SWITCH = UNK11, ENABLE, POWER
+ 	ret
+@@ -413,8 +472,8 @@ ctx_xfer:
+ 	mov $r1 0xa04
+ 	shl b32 $r1 6
+ 	iowr I[$r1 + 0x000] $r15// MEM_BASE
+-	bra not $p1 ctx_xfer_not_load
+-		call ctx_redswitch
++	bra not $p1 #ctx_xfer_not_load
++		call #ctx_redswitch
+ 	ctx_xfer_not_load:
+ 
+ 	// strands
+@@ -422,7 +481,7 @@ ctx_xfer:
+ 	sethi $r1 0x20000
+ 	mov $r2 0xc
+ 	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0c
+-	call strand_wait
++	call #strand_wait
+ 	mov $r2 0x47fc
+ 	sethi $r2 0x20000
+ 	iowr I[$r2] $r0		// STRAND_FIRST_GENE(0x3f) = 0x00
+@@ -435,46 +494,46 @@ ctx_xfer:
+ 	or $r10 2		// first
+ 	mov $r11 0x0000
+ 	sethi $r11 0x500000
+-	ld b32 $r12 D[$r0 + gpc_id]
++	ld b32 $r12 D[$r0 + #gpc_id]
+ 	shl b32 $r12 15
+ 	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn
+-	ld b32 $r12 D[$r0 + gpc_mmio_list_head]
+-	ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
++	ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
++	ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
+ 	mov $r14 0		// not multi
+-	call mmctx_xfer
++	call #mmctx_xfer
+ 
+ 	// per-TPC mmio context
+ 	xbit $r10 $flags $p1	// direction
+ 	or $r10 4		// last
+ 	mov $r11 0x4000
+ 	sethi $r11 0x500000	// base = NV_PGRAPH_GPC0_TPC0
+-	ld b32 $r12 D[$r0 + gpc_id]
++	ld b32 $r12 D[$r0 + #gpc_id]
+ 	shl b32 $r12 15
+ 	add b32 $r11 $r12	// base = NV_PGRAPH_GPCn_TPC0
+-	ld b32 $r12 D[$r0 + tpc_mmio_list_head]
+-	ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
+-	ld b32 $r15 D[$r0 + tpc_mask]
++	ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
++	ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
++	ld b32 $r15 D[$r0 + #tpc_mask]
+ 	mov $r14 0x800		// stride = 0x800
+-	call mmctx_xfer
++	call #mmctx_xfer
+ 
+ 	// wait for strands to finish
+-	call strand_wait
++	call #strand_wait
+ 
+ 	// if load, or a save without a load following, do some
+ 	// unknown stuff that's done after finishing a block of
+ 	// strand commands
+-	bra $p1 ctx_xfer_post
+-	bra not $p2 ctx_xfer_done
++	bra $p1 #ctx_xfer_post
++	bra not $p2 #ctx_xfer_done
+ 	ctx_xfer_post:
+ 		mov $r1 0x4afc
+ 		sethi $r1 0x20000
+ 		mov $r2 0xd
+ 		iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0d
+-		call strand_wait
++		call #strand_wait
+ 
+ 	// mark completion in HUB's barrier
+ 	ctx_xfer_done:
+-	call hub_barrier_done
++	call #hub_barrier_done
+ 	ret
+ 
+ .align 256
+diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+index 6f82032..a988b8a 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
++++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+@@ -25,26 +25,29 @@ uint32_t nvc0_grgpc_data[] = {
+ 	0x00000000,
+ 	0x00000000,
+ 	0x000000c0,
+-	0x011c00bc,
+-	0x01700120,
++	0x012800c8,
++	0x01e40194,
+ 	0x000000c1,
+-	0x012000bc,
+-	0x01840120,
++	0x012c00c8,
++	0x01f80194,
+ 	0x000000c3,
+-	0x011c00bc,
+-	0x01800120,
++	0x012800c8,
++	0x01f40194,
+ 	0x000000c4,
+-	0x011c00bc,
+-	0x01800120,
++	0x012800c8,
++	0x01f40194,
+ 	0x000000c8,
+-	0x011c00bc,
+-	0x01700120,
++	0x012800c8,
++	0x01e40194,
+ 	0x000000ce,
+-	0x011c00bc,
+-	0x01800120,
++	0x012800c8,
++	0x01f40194,
+ 	0x000000cf,
+-	0x011c00bc,
+-	0x017c0120,
++	0x012800c8,
++	0x01f00194,
++	0x000000d9,
++	0x0194012c,
++	0x025401f8,
+ 	0x00000000,
+ 	0x00000380,
+ 	0x14000400,
+@@ -71,6 +74,32 @@ uint32_t nvc0_grgpc_data[] = {
+ 	0x08001000,
+ 	0x00001014,
+ 	0x00000c6c,
++	0x00000380,
++	0x04000400,
++	0x0800040c,
++	0x20000450,
++	0x00000600,
++	0x00000684,
++	0x10000700,
++	0x00000800,
++	0x08000808,
++	0x00000828,
++	0x00000830,
++	0x000008d8,
++	0x000008e0,
++	0x140008e8,
++	0x0000091c,
++	0x08000924,
++	0x00000b00,
++	0x14000b08,
++	0x00000bb8,
++	0x00000c08,
++	0x1c000c10,
++	0x00000c6c,
++	0x00000c80,
++	0x00000c8c,
++	0x08001000,
++	0x00001014,
+ 	0x00000018,
+ 	0x0000003c,
+ 	0x00000048,
+@@ -96,6 +125,29 @@ uint32_t nvc0_grgpc_data[] = {
+ 	0x000006e0,
+ 	0x000004bc,
+ 	0x00000544,
++	0x00000018,
++	0x0000003c,
++	0x00000048,
++	0x00000064,
++	0x00000088,
++	0x14000200,
++	0x0400021c,
++	0x000002c4,
++	0x14000300,
++	0x000003d0,
++	0x040003e0,
++	0x08000400,
++	0x08000420,
++	0x000004b0,
++	0x000004e8,
++	0x000004f4,
++	0x04000520,
++	0x00000544,
++	0x0c000604,
++	0x4c000644,
++	0x00000698,
++	0x000006e0,
++	0x08000750,
+ };
+ 
+ uint32_t nvc0_grgpc_code[] = {
+diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+index e4f8c7e..98acddb 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
++++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+@@ -27,7 +27,7 @@
+  *    m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
+  */
+ 
+-.section nvc0_grhub_data
++.section #nvc0_grhub_data
+ include(`nvc0_graph.fuc')
+ gpc_count:		.b32 0
+ rop_count:		.b32 0
+@@ -39,26 +39,29 @@ ctx_current:		.b32 0
+ 
+ chipsets:
+ .b8  0xc0 0 0 0
+-.b16 nvc0_hub_mmio_head
+-.b16 nvc0_hub_mmio_tail
++.b16 #nvc0_hub_mmio_head
++.b16 #nvc0_hub_mmio_tail
+ .b8  0xc1 0 0 0
+-.b16 nvc0_hub_mmio_head
+-.b16 nvc1_hub_mmio_tail
++.b16 #nvc0_hub_mmio_head
++.b16 #nvc1_hub_mmio_tail
+ .b8  0xc3 0 0 0
+-.b16 nvc0_hub_mmio_head
+-.b16 nvc0_hub_mmio_tail
++.b16 #nvc0_hub_mmio_head
++.b16 #nvc0_hub_mmio_tail
+ .b8  0xc4 0 0 0
+-.b16 nvc0_hub_mmio_head
+-.b16 nvc0_hub_mmio_tail
++.b16 #nvc0_hub_mmio_head
++.b16 #nvc0_hub_mmio_tail
+ .b8  0xc8 0 0 0
+-.b16 nvc0_hub_mmio_head
+-.b16 nvc0_hub_mmio_tail
++.b16 #nvc0_hub_mmio_head
++.b16 #nvc0_hub_mmio_tail
+ .b8  0xce 0 0 0
+-.b16 nvc0_hub_mmio_head
+-.b16 nvc0_hub_mmio_tail
++.b16 #nvc0_hub_mmio_head
++.b16 #nvc0_hub_mmio_tail
+ .b8  0xcf 0 0 0
+-.b16 nvc0_hub_mmio_head
+-.b16 nvc0_hub_mmio_tail
++.b16 #nvc0_hub_mmio_head
++.b16 #nvc0_hub_mmio_tail
++.b8  0xd9 0 0 0
++.b16 #nvd9_hub_mmio_head
++.b16 #nvd9_hub_mmio_tail
+ .b8  0 0 0 0
+ 
+ nvc0_hub_mmio_head:
+@@ -105,6 +108,48 @@ nvc0_hub_mmio_tail:
+ mmctx_data(0x4064c0, 2)
+ nvc1_hub_mmio_tail:
+ 
++nvd9_hub_mmio_head:
++mmctx_data(0x17e91c, 2)
++mmctx_data(0x400204, 2)
++mmctx_data(0x404004, 10)
++mmctx_data(0x404044, 1)
++mmctx_data(0x404094, 14)
++mmctx_data(0x4040d0, 7)
++mmctx_data(0x4040f8, 1)
++mmctx_data(0x404130, 3)
++mmctx_data(0x404150, 3)
++mmctx_data(0x404164, 2)
++mmctx_data(0x404178, 2)
++mmctx_data(0x404200, 8)
++mmctx_data(0x404404, 14)
++mmctx_data(0x404460, 4)
++mmctx_data(0x404480, 1)
++mmctx_data(0x404498, 1)
++mmctx_data(0x404604, 4)
++mmctx_data(0x404618, 32)
++mmctx_data(0x404698, 21)
++mmctx_data(0x4046f0, 2)
++mmctx_data(0x404700, 22)
++mmctx_data(0x405800, 1)
++mmctx_data(0x405830, 3)
++mmctx_data(0x405854, 1)
++mmctx_data(0x405870, 4)
++mmctx_data(0x405a00, 2)
++mmctx_data(0x405a18, 1)
++mmctx_data(0x406020, 1)
++mmctx_data(0x406028, 4)
++mmctx_data(0x4064a8, 2)
++mmctx_data(0x4064b4, 5)
++mmctx_data(0x407804, 1)
++mmctx_data(0x40780c, 6)
++mmctx_data(0x4078bc, 1)
++mmctx_data(0x408000, 7)
++mmctx_data(0x408064, 1)
++mmctx_data(0x408800, 3)
++mmctx_data(0x408900, 4)
++mmctx_data(0x408980, 1)
++nvd9_hub_mmio_tail:
++
+ .align 256
+ chan_data:
+ chan_mmio_count:	.b32 0
+@@ -113,8 +158,8 @@ chan_mmio_address:	.b32 0
+ .align 256
+ xfer_data: 		.b32 0
+ 
+-.section nvc0_grhub_code
+-bra init
++.section #nvc0_grhub_code
++bra #init
+ define(`include_code')
+ include(`nvc0_graph.fuc')
+ 
+@@ -157,7 +202,7 @@ init:
+ 	iowr I[$r1 + 0x000] $r2	// FIFO_ENABLE
+ 
+ 	// setup i0 handler, and route all interrupts to it
+-	mov $r1 ih
++	mov $r1 #ih
+ 	mov $iv0 $r1
+ 	mov $r1 0x400
+ 	iowr I[$r1 + 0x300] $r0	// INTR_DISPATCH
+@@ -201,11 +246,11 @@ init:
+ 	// fetch enabled GPC/ROP counts
+ 	mov $r14 -0x69fc	// 0x409604
+ 	sethi $r14 0x400000
+-	call nv_rd32
++	call #nv_rd32
+ 	extr $r1 $r15 16:20
+-	st b32 D[$r0 + rop_count] $r1
++	st b32 D[$r0 + #rop_count] $r1
+ 	and $r15 0x1f
+-	st b32 D[$r0 + gpc_count] $r15
++	st b32 D[$r0 + #gpc_count] $r15
+ 
+ 	// set BAR_REQMASK to GPC mask
+ 	mov $r1 1
+@@ -220,14 +265,14 @@ init:
+ 	mov $r2 0x800
+ 	shl b32 $r2 6
+ 	iord $r2 I[$r2 + 0x000]		// CC_SCRATCH[0]
+-	mov $r15 chipsets - 8
++	mov $r15 #chipsets - 8
+ 	init_find_chipset:
+ 		add b32 $r15 8
+ 		ld b32 $r3 D[$r15 + 0x00]
+ 		cmpu b32 $r3 $r2
+-		bra e init_context
++		bra e #init_context
+ 		cmpu b32 $r3 0
+-		bra ne init_find_chipset
++		bra ne #init_find_chipset
+ 		// unknown chipset
+ 		ret
+ 
+@@ -239,9 +284,9 @@ init:
+ 	ld b16 $r14 D[$r15 + 4]
+ 	ld b16 $r15 D[$r15 + 6]
+ 	sethi $r14 0
+-	st b32 D[$r0 + hub_mmio_list_head] $r14
+-	st b32 D[$r0 + hub_mmio_list_tail] $r15
+-	call mmctx_size
++	st b32 D[$r0 + #hub_mmio_list_head] $r14
++	st b32 D[$r0 + #hub_mmio_list_tail] $r15
++	call #mmctx_size
+ 
+ 	// set mmctx base addresses now so we don't have to do it later,
+ 	// they don't (currently) ever change
+@@ -260,7 +305,7 @@ init:
+ 	add b32 $r1 1
+ 	shl b32 $r1 8
+ 	mov b32 $r15 $r1
+-	call strand_ctx_init
++	call #strand_ctx_init
+ 	add b32 $r1 $r15
+ 
+ 	// initialise each GPC in sequence by passing in the offset of its
+@@ -271,40 +316,40 @@ init:
+ 	// when it has completed, and return the size of its context data
+ 	// in GPCn_CC_SCRATCH[1]
+ 	//
+-	ld b32 $r3 D[$r0 + gpc_count]
++	ld b32 $r3 D[$r0 + #gpc_count]
+ 	mov $r4 0x2000
+ 	sethi $r4 0x500000
+ 	init_gpc:
+ 		// setup, and start GPC ucode running
+ 		add b32 $r14 $r4 0x804
+ 		mov b32 $r15 $r1
+-		call nv_wr32			// CC_SCRATCH[1] = ctx offset
++		call #nv_wr32			// CC_SCRATCH[1] = ctx offset
+ 		add b32 $r14 $r4 0x800
+ 		mov b32 $r15 $r2
+-		call nv_wr32			// CC_SCRATCH[0] = chipset
++		call #nv_wr32			// CC_SCRATCH[0] = chipset
+ 		add b32 $r14 $r4 0x10c
+ 		clear b32 $r15
+-		call nv_wr32
++		call #nv_wr32
+ 		add b32 $r14 $r4 0x104
+-		call nv_wr32			// ENTRY
++		call #nv_wr32			// ENTRY
+ 		add b32 $r14 $r4 0x100
+ 		mov $r15 2			// CTRL_START_TRIGGER
+-		call nv_wr32			// CTRL
++		call #nv_wr32			// CTRL
+ 
+ 		// wait for it to complete, and adjust context size
+ 		add b32 $r14 $r4 0x800
+ 		init_gpc_wait:
+-			call nv_rd32
++			call #nv_rd32
+ 			xbit $r15 $r15 31
+-			bra e init_gpc_wait
++			bra e #init_gpc_wait
+ 		add b32 $r14 $r4 0x804
+-		call nv_rd32
++		call #nv_rd32
+ 		add b32 $r1 $r15
+ 
+ 		// next!
+ 		add b32 $r4 0x8000
+ 		sub b32 $r3 1
+-		bra ne init_gpc
++		bra ne #init_gpc
+ 
+ 	// save context size, and tell host we're ready
+ 	mov $r2 0x800
+@@ -322,13 +367,13 @@ main:
+ 	// sleep until we have something to do
+ 	bset $flags $p0
+ 	sleep $p0
+-	mov $r13 cmd_queue
+-	call queue_get
+-	bra $p1 main
++	mov $r13 #cmd_queue
++	call #queue_get
++	bra $p1 #main
+ 
+ 	// context switch, requested by GPU?
+ 	cmpu b32 $r14 0x4001
+-	bra ne main_not_ctx_switch
++	bra ne #main_not_ctx_switch
+ 		trace_set(T_AUTO)
+ 		mov $r1 0xb00
+ 		shl b32 $r1 6
+@@ -336,39 +381,39 @@ main:
+ 		iord $r1 I[$r1 + 0x000]		// CHAN_CUR
+ 
+ 		xbit $r3 $r1 31
+-		bra e chsw_no_prev
++		bra e #chsw_no_prev
+ 			xbit $r3 $r2 31
+-			bra e chsw_prev_no_next
++			bra e #chsw_prev_no_next
+ 				push $r2
+ 				mov b32 $r2 $r1
+ 				trace_set(T_SAVE)
+ 				bclr $flags $p1
+ 				bset $flags $p2
+-				call ctx_xfer
++				call #ctx_xfer
+ 				trace_clr(T_SAVE);
+ 				pop $r2
+ 				trace_set(T_LOAD);
+ 				bset $flags $p1
+-				call ctx_xfer
++				call #ctx_xfer
+ 				trace_clr(T_LOAD);
+-				bra chsw_done
++				bra #chsw_done
+ 			chsw_prev_no_next:
+ 				push $r2
+ 				mov b32 $r2 $r1
+ 				bclr $flags $p1
+ 				bclr $flags $p2
+-				call ctx_xfer
++				call #ctx_xfer
+ 				pop $r2
+ 				mov $r1 0xb00
+ 				shl b32 $r1 6
+ 				iowr I[$r1] $r2
+-				bra chsw_done
++				bra #chsw_done
+ 		chsw_no_prev:
+ 			xbit $r3 $r2 31
+-			bra e chsw_done
++			bra e #chsw_done
+ 				bset $flags $p1
+ 				bclr $flags $p2
+-				call ctx_xfer
++				call #ctx_xfer
+ 
+ 		// ack the context switch request
+ 		chsw_done:
+@@ -377,32 +422,32 @@ main:
+ 		mov $r2 1
+ 		iowr I[$r1 + 0x000] $r2		// 0x409b0c
+ 		trace_clr(T_AUTO)
+-		bra main
++		bra #main
+ 
+ 	// request to set current channel? (*not* a context switch)
+ 	main_not_ctx_switch:
+ 	cmpu b32 $r14 0x0001
+-	bra ne main_not_ctx_chan
++	bra ne #main_not_ctx_chan
+ 		mov b32 $r2 $r15
+-		call ctx_chan
+-		bra main_done
++		call #ctx_chan
++		bra #main_done
+ 
+ 	// request to store current channel context?
+ 	main_not_ctx_chan:
+ 	cmpu b32 $r14 0x0002
+-	bra ne main_not_ctx_save
++	bra ne #main_not_ctx_save
+ 		trace_set(T_SAVE)
+ 		bclr $flags $p1
+ 		bclr $flags $p2
+-		call ctx_xfer
++		call #ctx_xfer
+ 		trace_clr(T_SAVE)
+-		bra main_done
++		bra #main_done
+ 
+ 	main_not_ctx_save:
+ 		shl b32 $r15 $r14 16
+ 		or $r15 E_BAD_COMMAND
+-		call error
+-		bra main
++		call #error
++		bra #main
+ 
+ 	main_done:
+ 	mov $r1 0x820
+@@ -410,7 +455,7 @@ main:
+ 	clear b32 $r2
+ 	bset $r2 31
+ 	iowr I[$r1 + 0x000] $r2		// CC_SCRATCH[0] |= 0x80000000
+-	bra main
++	bra #main
+ 
+ // interrupt handler
+ ih:
+@@ -427,13 +472,13 @@ ih:
+ 	// incoming fifo command?
+ 	iord $r10 I[$r0 + 0x200]	// INTR
+ 	and $r11 $r10 0x00000004
+-	bra e ih_no_fifo
++	bra e #ih_no_fifo
+ 		// queue incoming fifo command for later processing
+ 		mov $r11 0x1900
+-		mov $r13 cmd_queue
++		mov $r13 #cmd_queue
+ 		iord $r14 I[$r11 + 0x100]	// FIFO_CMD
+ 		iord $r15 I[$r11 + 0x000]	// FIFO_DATA
+-		call queue_put
++		call #queue_put
+ 		add b32 $r11 0x400
+ 		mov $r14 1
+ 		iowr I[$r11 + 0x000] $r14	// FIFO_ACK
+@@ -441,18 +486,18 @@ ih:
+ 	// context switch request?
+ 	ih_no_fifo:
+ 	and $r11 $r10 0x00000100
+-	bra e ih_no_ctxsw
++	bra e #ih_no_ctxsw
+ 		// enqueue a context switch for later processing
+-		mov $r13 cmd_queue
++		mov $r13 #cmd_queue
+ 		mov $r14 0x4001
+-		call queue_put
++		call #queue_put
+ 
+ 	// anything we didn't handle, bring it to the host's attention
+ 	ih_no_ctxsw:
+ 	mov $r11 0x104
+ 	not b32 $r11
+ 	and $r11 $r10 $r11
+-	bra e ih_no_other
++	bra e #ih_no_other
+ 		mov $r10 0xc1c
+ 		shl b32 $r10 6
+ 		iowr I[$r10] $r11	// INTR_UP_SET
+@@ -478,11 +523,11 @@ ctx_4160s:
+ 	mov $r14 0x4160
+ 	sethi $r14 0x400000
+ 	mov $r15 1
+-	call nv_wr32
++	call #nv_wr32
+ 	ctx_4160s_wait:
+-		call nv_rd32
++		call #nv_rd32
+ 		xbit $r15 $r15 4
+-		bra e ctx_4160s_wait
++		bra e #ctx_4160s_wait
+ 	ret
+ 
+ // Without clearing again at end of xfer, some things cause PGRAPH
+@@ -492,7 +537,7 @@ ctx_4160c:
+ 	mov $r14 0x4160
+ 	sethi $r14 0x400000
+ 	clear b32 $r15
+-	call nv_wr32
++	call #nv_wr32
+ 	ret
+ 
+ // Again, not real sure
+@@ -503,7 +548,7 @@ ctx_4170s:
+ 	mov $r14 0x4170
+ 	sethi $r14 0x400000
+ 	or $r15 0x10
+-	call nv_wr32
++	call #nv_wr32
+ 	ret
+ 
+ // Waits for a ctx_4170s() call to complete
+@@ -511,9 +556,9 @@ ctx_4170s:
+ ctx_4170w:
+ 	mov $r14 0x4170
+ 	sethi $r14 0x400000
+-	call nv_rd32
++	call #nv_rd32
+ 	and $r15 0x10
+-	bra ne ctx_4170w
++	bra ne #ctx_4170w
+ 	ret
+ 
+ // Disables various things, waits a bit, and re-enables them..
+@@ -530,7 +575,7 @@ ctx_redswitch:
+ 	mov $r15 8
+ 	ctx_redswitch_delay:
+ 		sub b32 $r15 1
+-		bra ne ctx_redswitch_delay
++		bra ne #ctx_redswitch_delay
+ 	mov $r15 0x770
+ 	iowr I[$r14] $r15	// HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+ 	ret
+@@ -546,10 +591,10 @@ ctx_86c:
+ 	iowr I[$r14] $r15	// HUB(0x86c) = val
+ 	mov $r14 -0x75ec
+ 	sethi $r14 0x400000
+-	call nv_wr32		// ROP(0xa14) = val
++	call #nv_wr32		// ROP(0xa14) = val
+ 	mov $r14 -0x5794
+ 	sethi $r14 0x410000
+-	call nv_wr32		// GPC(0x86c) = val
++	call #nv_wr32		// GPC(0x86c) = val
+ 	ret
+ 
+ // ctx_load - load's a channel's ctxctl data, and selects its vm
+@@ -561,7 +606,7 @@ ctx_load:
+ 
+ 	// switch to channel, somewhat magic in parts..
+ 	mov $r10 12		// DONE_UNK12
+-	call wait_donez
++	call #wait_donez
+ 	mov $r1 0xa24
+ 	shl b32 $r1 6
+ 	iowr I[$r1 + 0x000] $r0	// 0x409a24
+@@ -576,7 +621,7 @@ ctx_load:
+ 	ctx_chan_wait_0:
+ 		iord $r4 I[$r1 + 0x100]
+ 		and $r4 0x1f
+-		bra ne ctx_chan_wait_0
++		bra ne #ctx_chan_wait_0
+ 	iowr I[$r3 + 0x000] $r2	// CHAN_CUR
+ 
+ 	// load channel header, fetch PGRAPH context pointer
+@@ -595,19 +640,19 @@ ctx_load:
+ 	sethi $r2 0x80000000
+ 	iowr I[$r1 + 0x000] $r2		// MEM_TARGET = vram
+ 	mov $r1 0x10			// chan + 0x0210
+-	mov $r2 xfer_data
++	mov $r2 #xfer_data
+ 	sethi $r2 0x00020000		// 16 bytes
+ 	xdld $r1 $r2
+ 	xdwait
+ 	trace_clr(T_LCHAN)
+ 
+ 	// update current context
+-	ld b32 $r1 D[$r0 + xfer_data + 4]
++	ld b32 $r1 D[$r0 + #xfer_data + 4]
+ 	shl b32 $r1 24
+-	ld b32 $r2 D[$r0 + xfer_data + 0]
++	ld b32 $r2 D[$r0 + #xfer_data + 0]
+ 	shr b32 $r2 8
+ 	or $r1 $r2
+-	st b32 D[$r0 + ctx_current] $r1
++	st b32 D[$r0 + #ctx_current] $r1
+ 
+ 	// set transfer base to start of context, and fetch context header
+ 	trace_set(T_LCTXH)
+@@ -618,7 +663,7 @@ ctx_load:
+ 	mov $r1 0xa20
+ 	shl b32 $r1 6
+ 	iowr I[$r1 + 0x000] $r2		// MEM_TARGET = vm
+-	mov $r1 chan_data
++	mov $r1 #chan_data
+ 	sethi $r1 0x00060000		// 256 bytes
+ 	xdld $r0 $r1
+ 	xdwait
+@@ -635,10 +680,10 @@ ctx_load:
+ // In: $r2 channel address
+ //
+ ctx_chan:
+-	call ctx_4160s
+-	call ctx_load
++	call #ctx_4160s
++	call #ctx_load
+ 	mov $r10 12			// DONE_UNK12
+-	call wait_donez
++	call #wait_donez
+ 	mov $r1 0xa10
+ 	shl b32 $r1 6
+ 	mov $r2 5
+@@ -646,8 +691,8 @@ ctx_chan:
+ 	ctx_chan_wait:
+ 		iord $r2 I[$r1 + 0x000]
+ 		or $r2 $r2
+-		bra ne ctx_chan_wait
+-	call ctx_4160c
++		bra ne #ctx_chan_wait
++	call #ctx_4160c
+ 	ret
+ 
+ // Execute per-context state overrides list
+@@ -661,7 +706,7 @@ ctx_chan:
+ //
+ ctx_mmio_exec:
+ 	// set transfer base to be the mmio list
+-	ld b32 $r3 D[$r0 + chan_mmio_address]
++	ld b32 $r3 D[$r0 + #chan_mmio_address]
+ 	mov $r2 0xa04
+ 	shl b32 $r2 6
+ 	iowr I[$r2 + 0x000] $r3		// MEM_BASE
+@@ -670,31 +715,31 @@ ctx_mmio_exec:
+ 	ctx_mmio_loop:
+ 		// fetch next 256 bytes of mmio list if necessary
+ 		and $r4 $r3 0xff
+-		bra ne ctx_mmio_pull
+-			mov $r5 xfer_data
++		bra ne #ctx_mmio_pull
++			mov $r5 #xfer_data
+ 			sethi $r5 0x00060000	// 256 bytes
+ 			xdld $r3 $r5
+ 			xdwait
+ 
+ 		// execute a single list entry
+ 		ctx_mmio_pull:
+-		ld b32 $r14 D[$r4 + xfer_data + 0x00]
+-		ld b32 $r15 D[$r4 + xfer_data + 0x04]
+-		call nv_wr32
++		ld b32 $r14 D[$r4 + #xfer_data + 0x00]
++		ld b32 $r15 D[$r4 + #xfer_data + 0x04]
++		call #nv_wr32
+ 
+ 		// next!
+ 		add b32 $r3 8
+ 		sub b32 $r1 1
+-		bra ne ctx_mmio_loop
++		bra ne #ctx_mmio_loop
+ 
+ 	// set transfer base back to the current context
+ 	ctx_mmio_done:
+-	ld b32 $r3 D[$r0 + ctx_current]
++	ld b32 $r3 D[$r0 + #ctx_current]
+ 	iowr I[$r2 + 0x000] $r3		// MEM_BASE
+ 
+ 	// disable the mmio list now, we don't need/want to execute it again
+-	st b32 D[$r0 + chan_mmio_count] $r0
+-	mov $r1 chan_data
++	st b32 D[$r0 + #chan_mmio_count] $r0
++	mov $r1 #chan_data
+ 	sethi $r1 0x00060000		// 256 bytes
+ 	xdst $r0 $r1
+ 	xdwait
+@@ -709,46 +754,46 @@ ctx_mmio_exec:
+ //		on load it means: "a save preceeded this load"
+ //
+ ctx_xfer:
+-	bra not $p1 ctx_xfer_pre
+-	bra $p2 ctx_xfer_pre_load
++	bra not $p1 #ctx_xfer_pre
++	bra $p2 #ctx_xfer_pre_load
+ 	ctx_xfer_pre:
+ 		mov $r15 0x10
+-		call ctx_86c
+-		call ctx_4160s
+-		bra not $p1 ctx_xfer_exec
++		call #ctx_86c
++		call #ctx_4160s
++		bra not $p1 #ctx_xfer_exec
+ 
+ 	ctx_xfer_pre_load:
+ 		mov $r15 2
+-		call ctx_4170s
+-		call ctx_4170w
+-		call ctx_redswitch
++		call #ctx_4170s
++		call #ctx_4170w
++		call #ctx_redswitch
+ 		clear b32 $r15
+-		call ctx_4170s
+-		call ctx_load
++		call #ctx_4170s
++		call #ctx_load
+ 
+ 	// fetch context pointer, and initiate xfer on all GPCs
+ 	ctx_xfer_exec:
+-	ld b32 $r1 D[$r0 + ctx_current]
++	ld b32 $r1 D[$r0 + #ctx_current]
+ 	mov $r2 0x414
+ 	shl b32 $r2 6
+ 	iowr I[$r2 + 0x000] $r0	// BAR_STATUS = reset
+ 	mov $r14 -0x5b00
+ 	sethi $r14 0x410000
+ 	mov b32 $r15 $r1
+-	call nv_wr32		// GPC_BCAST_WRCMD_DATA = ctx pointer
++	call #nv_wr32		// GPC_BCAST_WRCMD_DATA = ctx pointer
+ 	add b32 $r14 4
+ 	xbit $r15 $flags $p1
+ 	xbit $r2 $flags $p2
+ 	shl b32 $r2 1
+ 	or $r15 $r2
+-	call nv_wr32		// GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
++	call #nv_wr32		// GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+ 
+ 	// strands
+ 	mov $r1 0x4afc
+ 	sethi $r1 0x20000
+ 	mov $r2 0xc
+ 	iowr I[$r1] $r2		// STRAND_CMD(0x3f) = 0x0c
+-	call strand_wait
++	call #strand_wait
+ 	mov $r2 0x47fc
+ 	sethi $r2 0x20000
+ 	iowr I[$r2] $r0		// STRAND_FIRST_GENE(0x3f) = 0x00
+@@ -760,22 +805,22 @@ ctx_xfer:
+ 	xbit $r10 $flags $p1	// direction
+ 	or $r10 6		// first, last
+ 	mov $r11 0		// base = 0
+-	ld b32 $r12 D[$r0 + hub_mmio_list_head]
+-	ld b32 $r13 D[$r0 + hub_mmio_list_tail]
++	ld b32 $r12 D[$r0 + #hub_mmio_list_head]
++	ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
+ 	mov $r14 0		// not multi
+-	call mmctx_xfer
++	call #mmctx_xfer
+ 
+ 	// wait for GPCs to all complete
+ 	mov $r10 8		// DONE_BAR
+-	call wait_doneo
++	call #wait_doneo
+ 
+ 	// wait for strand xfer to complete
+-	call strand_wait
++	call #strand_wait
+ 
+ 	// post-op
+-	bra $p1 ctx_xfer_post
++	bra $p1 #ctx_xfer_post
+ 		mov $r10 12		// DONE_UNK12
+-		call wait_donez
++		call #wait_donez
+ 		mov $r1 0xa10
+ 		shl b32 $r1 6
+ 		mov $r2 5
+@@ -783,27 +828,27 @@ ctx_xfer:
+ 		ctx_xfer_post_save_wait:
+ 			iord $r2 I[$r1]
+ 			or $r2 $r2
+-			bra ne ctx_xfer_post_save_wait
++			bra ne #ctx_xfer_post_save_wait
+ 
+-	bra $p2 ctx_xfer_done
++	bra $p2 #ctx_xfer_done
+ 	ctx_xfer_post:
+ 		mov $r15 2
+-		call ctx_4170s
++		call #ctx_4170s
+ 		clear b32 $r15
+-		call ctx_86c
+-		call strand_post
+-		call ctx_4170w
++		call #ctx_86c
++		call #strand_post
++		call #ctx_4170w
+ 		clear b32 $r15
+-		call ctx_4170s
++		call #ctx_4170s
+ 
+-		bra not $p1 ctx_xfer_no_post_mmio
+-		ld b32 $r1 D[$r0 + chan_mmio_count]
++		bra not $p1 #ctx_xfer_no_post_mmio
++		ld b32 $r1 D[$r0 + #chan_mmio_count]
+ 		or $r1 $r1
+-		bra e ctx_xfer_no_post_mmio
+-			call ctx_mmio_exec
++		bra e #ctx_xfer_no_post_mmio
++			call #ctx_mmio_exec
+ 
+ 		ctx_xfer_no_post_mmio:
+-		call ctx_4160c
++		call #ctx_4160c
+ 
+ 	ctx_xfer_done:
+ 	ret
+diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+index 241d326..c5ed307 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
++++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+@@ -23,19 +23,21 @@ uint32_t nvc0_grhub_data[] = {
+ 	0x00000000,
+ 	0x00000000,
+ 	0x000000c0,
+-	0x01340098,
++	0x013c00a0,
+ 	0x000000c1,
+-	0x01380098,
++	0x014000a0,
+ 	0x000000c3,
+-	0x01340098,
++	0x013c00a0,
+ 	0x000000c4,
+-	0x01340098,
++	0x013c00a0,
+ 	0x000000c8,
+-	0x01340098,
++	0x013c00a0,
+ 	0x000000ce,
+-	0x01340098,
++	0x013c00a0,
+ 	0x000000cf,
+-	0x01340098,
++	0x013c00a0,
++	0x000000d9,
++	0x01dc0140,
+ 	0x00000000,
+ 	0x0417e91c,
+ 	0x04400204,
+@@ -77,47 +79,45 @@ uint32_t nvc0_grhub_data[] = {
+ 	0x0c408900,
+ 	0x00408980,
+ 	0x044064c0,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
+-	0x00000000,
++	0x0417e91c,
++	0x04400204,
++	0x24404004,
++	0x00404044,
++	0x34404094,
++	0x184040d0,
++	0x004040f8,
++	0x08404130,
++	0x08404150,
++	0x04404164,
++	0x04404178,
++	0x1c404200,
++	0x34404404,
++	0x0c404460,
++	0x00404480,
++	0x00404498,
++	0x0c404604,
++	0x7c404618,
++	0x50404698,
++	0x044046f0,
++	0x54404700,
++	0x00405800,
++	0x08405830,
++	0x00405854,
++	0x0c405870,
++	0x04405a00,
++	0x00405a18,
++	0x00406020,
++	0x0c406028,
++	0x044064a8,
++	0x104064b4,
++	0x00407804,
++	0x1440780c,
++	0x004078bc,
++	0x18408000,
++	0x00408064,
++	0x08408800,
++	0x0c408900,
++	0x00408980,
+ 	0x00000000,
+ 	0x00000000,
+ 	0x00000000,
+diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
+index 929aded..ce65f81 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
++++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
+@@ -153,3 +153,240 @@ nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+ 	perflvl->vdec   = read_clk(dev, 0x0e);
+ 	return 0;
+ }
++
++struct nvc0_pm_clock {
++	u32 freq;
++	u32 ssel;
++	u32 mdiv;
++	u32 dsrc;
++	u32 ddiv;
++	u32 coef;
++};
++
++struct nvc0_pm_state {
++	struct nvc0_pm_clock eng[16];
++};
++
++static u32
++calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
++{
++	u32 div = min((ref * 2) / freq, (u32)65);
++	if (div < 2)
++		div = 2;
++
++	*ddiv = div - 2;
++	return (ref * 2) / div;
++}
++
++static u32
++calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
++{
++	u32 sclk;
++
++	/* use one of the fixed frequencies if possible */
++	*ddiv = 0x00000000;
++	switch (freq) {
++	case  27000:
++	case 108000:
++		*dsrc = 0x00000000;
++		if (freq == 108000)
++			*dsrc |= 0x00030000;
++		return freq;
++	case 100000:
++		*dsrc = 0x00000002;
++		return freq;
++	default:
++		*dsrc = 0x00000003;
++		break;
++	}
++
++	/* otherwise, calculate the closest divider */
++	sclk = read_vco(dev, clk);
++	if (clk < 7)
++		sclk = calc_div(dev, clk, sclk, freq, ddiv);
++	return sclk;
++}
++
++static u32
++calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
++{
++	struct pll_lims limits;
++	int N, M, P, ret;
++
++	ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
++	if (ret)
++		return 0;
++
++	limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
++	if (!limits.refclk)
++		return 0;
++
++	ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
++	if (ret <= 0)
++		return 0;
++
++	*coef = (P << 16) | (N << 8) | M;
++	return ret;
++}
++
++/* A (likely rather simplified and incomplete) view of the clock tree
++ *
++ * Key:
++ *
++ * S: source select
++ * D: divider
++ * P: pll
++ * F: switch
++ *
++ * Engine clocks:
++ *
++ * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
++ *                      (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
++ *
++ * Not all registers exist for all clocks.  For example: clocks >= 8 don't
++ * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
++ * they have the divider at 1371d0, though the source selection at 137160
++ * still exists.  You must use the divider at 137250 for these instead.
++ *
++ * Memory clock:
++ *
++ * TBD, read_mem() above is likely very wrong...
++ *
++ */
++
++static int
++calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
++{
++	u32 src0, div0, div1D, div1P = 0;
++	u32 clk0, clk1 = 0;
++
++	/* invalid clock domain */
++	if (!freq)
++		return 0;
++
++	/* first possible path, using only dividers */
++	clk0 = calc_src(dev, clk, freq, &src0, &div0);
++	clk0 = calc_div(dev, clk, clk0, freq, &div1D);
++
++	/* see if we can get any closer using PLLs */
++	if (clk0 != freq && (0x00004387 & (1 << clk))) {
++		if (clk < 7)
++			clk1 = calc_pll(dev, clk, freq, &info->coef);
++		else
++			clk1 = read_pll(dev, 0x1370e0);
++		clk1 = calc_div(dev, clk, clk1, freq, &div1P);
++	}
++
++	/* select the method which gets closest to target freq */
++	if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
++		info->dsrc = src0;
++		if (div0) {
++			info->ddiv |= 0x80000000;
++			info->ddiv |= div0 << 8;
++			info->ddiv |= div0;
++		}
++		if (div1D) {
++			info->mdiv |= 0x80000000;
++			info->mdiv |= div1D;
++		}
++		info->ssel = 0;
++		info->freq = clk0;
++	} else {
++		if (div1P) {
++			info->mdiv |= 0x80000000;
++			info->mdiv |= div1P << 8;
++		}
++		info->ssel = (1 << clk);
++		info->freq = clk1;
++	}
++
++	return 0;
++}
++
++void *
++nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvc0_pm_state *info;
++	int ret;
++
++	info = kzalloc(sizeof(*info), GFP_KERNEL);
++	if (!info)
++		return ERR_PTR(-ENOMEM);
++
++	/* NFI why this is still in the performance table, the ROPCs appear
++	 * to get their clock from clock 2 ("hub07", actually hub05 on this
++	 * chip, but, anyway...) as well.  nvatiming confirms hub05 and ROP
++	 * are always the same freq with the binary driver even when the
++	 * performance table says they should differ.
++	 */
++	if (dev_priv->chipset == 0xd9)
++		perflvl->rop = 0;
++
++	if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
++	    (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
++	    (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
++	    (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
++	    (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
++	    (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
++	    (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
++	    (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
++		kfree(info);
++		return ERR_PTR(ret);
++	}
++
++	return info;
++}
++
++static void
++prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
++{
++	/* program dividers at 137160/1371d0 first */
++	if (clk < 7 && !info->ssel) {
++		nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
++		nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
++	}
++
++	/* switch clock to non-pll mode */
++	nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
++	nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
++
++	/* reprogram pll */
++	if (clk < 7) {
++		/* make sure it's disabled first... */
++		u32 base = 0x137000 + (clk * 0x20);
++		u32 ctrl = nv_rd32(dev, base + 0x00);
++		if (ctrl & 0x00000001) {
++			nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
++			nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
++		}
++		/* program it to new values, if necessary */
++		if (info->ssel) {
++			nv_wr32(dev, base + 0x04, info->coef);
++			nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
++			nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
++			nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
++		}
++	}
++
++	/* select pll/non-pll mode, and program final clock divider */
++	nv_mask(dev, 0x137100, (1 << clk), info->ssel);
++	nv_wait(dev, 0x137100, (1 << clk), info->ssel);
++	nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
++}
++
++int
++nvc0_pm_clocks_set(struct drm_device *dev, void *data)
++{
++	struct nvc0_pm_state *info = data;
++	int i;
++
++	for (i = 0; i < 16; i++) {
++		if (!info->eng[i].freq)
++			continue;
++		prog_clk(dev, i, &info->eng[i]);
++	}
++
++	kfree(info);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
+index 9e35294..30d2bd5 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
++++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
+@@ -77,9 +77,11 @@ void
+ nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ 	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+ {
++	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
++
+ 	pte <<= 3;
+ 	while (cnt--) {
+-		u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5);
++		u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target);
+ 		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ 		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ 		pte += 8;
+diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
+index ce984d5..a7eef89 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
++++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
+@@ -106,31 +106,32 @@ nvc0_vram_init(struct drm_device *dev)
+ 	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ 	const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ 	const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+-	u32 parts = nv_rd32(dev, 0x121c74);
++	u32 parts = nv_rd32(dev, 0x022438);
++	u32 pmask = nv_rd32(dev, 0x022554);
+ 	u32 bsize = nv_rd32(dev, 0x10f20c);
+ 	u32 offset, length;
+ 	bool uniform = true;
+ 	int ret, part;
+ 
+ 	NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
+-	NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize);
++	NV_DEBUG(dev, "parts 0x%08x mask 0x%08x\n", parts, pmask);
++
++	dev_priv->vram_type = nouveau_mem_vbios_type(dev);
++	dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x10f200) & 0x00000004);
+ 
+ 	/* read amount of vram attached to each memory controller */
+-	part = 0;
+-	while (parts) {
+-		u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000));
+-		if (psize == 0)
+-			continue;
+-		parts--;
+-
+-		if (psize != bsize) {
+-			if (psize < bsize)
+-				bsize = psize;
+-			uniform = false;
++	for (part = 0; part < parts; part++) {
++		if (!(pmask & (1 << part))) {
++			u32 psize = nv_rd32(dev, 0x11020c + (part * 0x1000));
++			if (psize != bsize) {
++				if (psize < bsize)
++					bsize = psize;
++				uniform = false;
++			}
++
++			NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
++			dev_priv->vram_size += (u64)psize << 20;
+ 		}
+-
+-		NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
+-		dev_priv->vram_size += (u64)psize << 20;
+ 	}
+ 
+ 	/* if all controllers have the same amount attached, there's no holes */
+diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
+index 3002d82..8a555fb 100644
+--- a/drivers/gpu/drm/nouveau/nvd0_display.c
++++ b/drivers/gpu/drm/nouveau/nvd0_display.c
+@@ -35,12 +35,34 @@
+ #include "nouveau_fb.h"
+ #include "nv50_display.h"
+ 
++#define EVO_DMA_NR 9
++
++#define EVO_MASTER  (0x00)
++#define EVO_FLIP(c) (0x01 + (c))
++#define EVO_OVLY(c) (0x05 + (c))
++#define EVO_OIMM(c) (0x09 + (c))
++#define EVO_CURS(c) (0x0d + (c))
++
++/* offsets in shared sync bo of various structures */
++#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
++#define EVO_MAST_NTFY     EVO_SYNC(  0, 0x00)
++#define EVO_FLIP_SEM0(c)  EVO_SYNC((c), 0x00)
++#define EVO_FLIP_SEM1(c)  EVO_SYNC((c), 0x10)
++
++struct evo {
++	int idx;
++	dma_addr_t handle;
++	u32 *ptr;
++	struct {
++		u32 offset;
++		u16 value;
++	} sem;
++};
++
+ struct nvd0_display {
+ 	struct nouveau_gpuobj *mem;
+-	struct {
+-		dma_addr_t handle;
+-		u32 *ptr;
+-	} evo[1];
++	struct nouveau_bo *sync;
++	struct evo evo[9];
+ 
+ 	struct tasklet_struct tasklet;
+ 	u32 modeset;
+@@ -53,6 +75,15 @@ nvd0_display(struct drm_device *dev)
+ 	return dev_priv->engine.display.priv;
+ }
+ 
++static struct drm_crtc *
++nvd0_display_crtc_get(struct drm_encoder *encoder)
++{
++	return nouveau_encoder(encoder)->crtc;
++}
++
++/******************************************************************************
++ * EVO channel helpers
++ *****************************************************************************/
+ static inline int
+ evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
+ {
+@@ -84,6 +115,9 @@ evo_wait(struct drm_device *dev, int id, int nr)
+ 		put = 0;
+ 	}
+ 
++	if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
++		NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
++
+ 	return disp->evo[id].ptr + put;
+ }
+ 
+@@ -91,104 +125,372 @@ static void
+ evo_kick(u32 *push, struct drm_device *dev, int id)
+ {
+ 	struct nvd0_display *disp = nvd0_display(dev);
++
++	if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) {
++		u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
++		u32 *cur = disp->evo[id].ptr + curp;
++
++		while (cur < push)
++			NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
++		NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
++	}
++
+ 	nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
+ }
+ 
+ #define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
+ #define evo_data(p,d)   *((p)++) = (d)
+ 
+-static struct drm_crtc *
+-nvd0_display_crtc_get(struct drm_encoder *encoder)
++static int
++evo_init_dma(struct drm_device *dev, int ch)
+ {
+-	return nouveau_encoder(encoder)->crtc;
++	struct nvd0_display *disp = nvd0_display(dev);
++	u32 flags;
++
++	flags = 0x00000000;
++	if (ch == EVO_MASTER)
++		flags |= 0x01000000;
++
++	nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
++	nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000);
++	nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001);
++	nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
++	nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000);
++	nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
++	if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
++		NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
++			      nv_rd32(dev, 0x610490 + (ch * 0x0010)));
++		return -EBUSY;
++	}
++
++	nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
++	nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
++	return 0;
++}
++
++static void
++evo_fini_dma(struct drm_device *dev, int ch)
++{
++	if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010))
++		return;
++
++	nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
++	nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
++	nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
++	nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
++	nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
++}
++
++static inline void
++evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
++{
++	nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data);
++}
++
++static int
++evo_init_pio(struct drm_device *dev, int ch)
++{
++	nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001);
++	if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
++		NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
++			      nv_rd32(dev, 0x610490 + (ch * 0x0010)));
++		return -EBUSY;
++	}
++
++	nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
++	nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
++	return 0;
++}
++
++static void
++evo_fini_pio(struct drm_device *dev, int ch)
++{
++	if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001))
++		return;
++
++	nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
++	nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
++	nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
++	nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
++	nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
++}
++
++static bool
++evo_sync_wait(void *data)
++{
++	return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
++}
++
++static int
++evo_sync(struct drm_device *dev, int ch)
++{
++	struct nvd0_display *disp = nvd0_display(dev);
++	u32 *push = evo_wait(dev, ch, 8);
++	if (push) {
++		nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
++		evo_mthd(push, 0x0084, 1);
++		evo_data(push, 0x80000000 | EVO_MAST_NTFY);
++		evo_mthd(push, 0x0080, 2);
++		evo_data(push, 0x00000000);
++		evo_data(push, 0x00000000);
++		evo_kick(push, dev, ch);
++		if (nv_wait_cb(dev, evo_sync_wait, disp->sync))
++			return 0;
++	}
++
++	return -EBUSY;
++}
++
++/******************************************************************************
++ * Page flipping channel
++ *****************************************************************************/
++struct nouveau_bo *
++nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
++{
++	return nvd0_display(dev)->sync;
++}
++
++void
++nvd0_display_flip_stop(struct drm_crtc *crtc)
++{
++	struct nvd0_display *disp = nvd0_display(crtc->dev);
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
++	u32 *push;
++
++	push = evo_wait(crtc->dev, evo->idx, 8);
++	if (push) {
++		evo_mthd(push, 0x0084, 1);
++		evo_data(push, 0x00000000);
++		evo_mthd(push, 0x0094, 1);
++		evo_data(push, 0x00000000);
++		evo_mthd(push, 0x00c0, 1);
++		evo_data(push, 0x00000000);
++		evo_mthd(push, 0x0080, 1);
++		evo_data(push, 0x00000000);
++		evo_kick(push, crtc->dev, evo->idx);
++	}
++}
++
++int
++nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
++		       struct nouveau_channel *chan, u32 swap_interval)
++{
++	struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
++	struct nvd0_display *disp = nvd0_display(crtc->dev);
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
++	u64 offset;
++	u32 *push;
++	int ret;
++
++	evo_sync(crtc->dev, EVO_MASTER);
++
++	swap_interval <<= 4;
++	if (swap_interval == 0)
++		swap_interval |= 0x100;
++
++	push = evo_wait(crtc->dev, evo->idx, 128);
++	if (unlikely(push == NULL))
++		return -EBUSY;
++
++	/* synchronise with the rendering channel, if necessary */
++	if (likely(chan)) {
++		ret = RING_SPACE(chan, 10);
++		if (ret)
++			return ret;
++
++		offset  = chan->dispc_vma[nv_crtc->index].offset;
++		offset += evo->sem.offset;
++
++		BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
++		OUT_RING  (chan, upper_32_bits(offset));
++		OUT_RING  (chan, lower_32_bits(offset));
++		OUT_RING  (chan, 0xf00d0000 | evo->sem.value);
++		OUT_RING  (chan, 0x1002);
++		BEGIN_NVC0(chan, 2, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
++		OUT_RING  (chan, upper_32_bits(offset));
++		OUT_RING  (chan, lower_32_bits(offset ^ 0x10));
++		OUT_RING  (chan, 0x74b1e000);
++		OUT_RING  (chan, 0x1001);
++		FIRE_RING (chan);
++	} else {
++		nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
++				0xf00d0000 | evo->sem.value);
++		evo_sync(crtc->dev, EVO_MASTER);
++	}
++
++	/* queue the flip */
++	evo_mthd(push, 0x0100, 1);
++	evo_data(push, 0xfffe0000);
++	evo_mthd(push, 0x0084, 1);
++	evo_data(push, swap_interval);
++	if (!(swap_interval & 0x00000100)) {
++		evo_mthd(push, 0x00e0, 1);
++		evo_data(push, 0x40000000);
++	}
++	evo_mthd(push, 0x0088, 4);
++	evo_data(push, evo->sem.offset);
++	evo_data(push, 0xf00d0000 | evo->sem.value);
++	evo_data(push, 0x74b1e000);
++	evo_data(push, NvEvoSync);
++	evo_mthd(push, 0x00a0, 2);
++	evo_data(push, 0x00000000);
++	evo_data(push, 0x00000000);
++	evo_mthd(push, 0x00c0, 1);
++	evo_data(push, nv_fb->r_dma);
++	evo_mthd(push, 0x0110, 2);
++	evo_data(push, 0x00000000);
++	evo_data(push, 0x00000000);
++	evo_mthd(push, 0x0400, 5);
++	evo_data(push, nv_fb->nvbo->bo.offset >> 8);
++	evo_data(push, 0);
++	evo_data(push, (fb->height << 16) | fb->width);
++	evo_data(push, nv_fb->r_pitch);
++	evo_data(push, nv_fb->r_format);
++	evo_mthd(push, 0x0080, 1);
++	evo_data(push, 0x00000000);
++	evo_kick(push, crtc->dev, evo->idx);
++
++	evo->sem.offset ^= 0x10;
++	evo->sem.value++;
++	return 0;
+ }
+ 
+ /******************************************************************************
+  * CRTC
+  *****************************************************************************/
+ static int
+-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
++nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
+ {
++	struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
+ 	struct drm_device *dev = nv_crtc->base.dev;
+-	u32 *push, mode;
++	struct nouveau_connector *nv_connector;
++	struct drm_connector *connector;
++	u32 *push, mode = 0x00;
++	u32 mthd;
+ 
+-	mode = 0x00000000;
+-	if (on) {
+-		/* 0x11: 6bpc dynamic 2x2
+-		 * 0x13: 8bpc dynamic 2x2
+-		 * 0x19: 6bpc static 2x2
+-		 * 0x1b: 8bpc static 2x2
+-		 * 0x21: 6bpc temporal
+-		 * 0x23: 8bpc temporal
+-		 */
+-		mode = 0x00000011;
++	nv_connector = nouveau_crtc_connector_get(nv_crtc);
++	connector = &nv_connector->base;
++	if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
++		if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
++			mode = DITHERING_MODE_DYNAMIC2X2;
++	} else {
++		mode = nv_connector->dithering_mode;
+ 	}
+ 
+-	push = evo_wait(dev, 0, 4);
++	if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
++		if (connector->display_info.bpc >= 8)
++			mode |= DITHERING_DEPTH_8BPC;
++	} else {
++		mode |= nv_connector->dithering_depth;
++	}
++
++	if (dev_priv->card_type < NV_E0)
++		mthd = 0x0490 + (nv_crtc->index * 0x0300);
++	else
++		mthd = 0x04a0 + (nv_crtc->index * 0x0300);
++
++	push = evo_wait(dev, EVO_MASTER, 4);
+ 	if (push) {
+-		evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
++		evo_mthd(push, mthd, 1);
+ 		evo_data(push, mode);
+ 		if (update) {
+ 			evo_mthd(push, 0x0080, 1);
+ 			evo_data(push, 0x00000000);
+ 		}
+-		evo_kick(push, dev, 0);
++		evo_kick(push, dev, EVO_MASTER);
+ 	}
+ 
+ 	return 0;
+ }
+ 
+ static int
+-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update)
++nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
+ {
+-	struct drm_display_mode *mode = &nv_crtc->base.mode;
++	struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
+ 	struct drm_device *dev = nv_crtc->base.dev;
++	struct drm_crtc *crtc = &nv_crtc->base;
+ 	struct nouveau_connector *nv_connector;
+-	u32 *push, outX, outY;
+-
+-	outX = mode->hdisplay;
+-	outY = mode->vdisplay;
++	int mode = DRM_MODE_SCALE_NONE;
++	u32 oX, oY, *push;
+ 
++	/* start off at the resolution we programmed the crtc for, this
++	 * effectively handles NONE/FULL scaling
++	 */
+ 	nv_connector = nouveau_crtc_connector_get(nv_crtc);
+-	if (nv_connector && nv_connector->native_mode) {
+-		struct drm_display_mode *native = nv_connector->native_mode;
+-		u32 xratio = (native->hdisplay << 19) / mode->hdisplay;
+-		u32 yratio = (native->vdisplay << 19) / mode->vdisplay;
+-
+-		switch (type) {
+-		case DRM_MODE_SCALE_ASPECT:
+-			if (xratio > yratio) {
+-				outX = (mode->hdisplay * yratio) >> 19;
+-				outY = (mode->vdisplay * yratio) >> 19;
+-			} else {
+-				outX = (mode->hdisplay * xratio) >> 19;
+-				outY = (mode->vdisplay * xratio) >> 19;
+-			}
+-			break;
+-		case DRM_MODE_SCALE_FULLSCREEN:
+-			outX = native->hdisplay;
+-			outY = native->vdisplay;
+-			break;
+-		default:
+-			break;
++	if (nv_connector && nv_connector->native_mode)
++		mode = nv_connector->scaling_mode;
++
++	if (mode != DRM_MODE_SCALE_NONE)
++		omode = nv_connector->native_mode;
++	else
++		omode = umode;
++
++	oX = omode->hdisplay;
++	oY = omode->vdisplay;
++	if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
++		oY *= 2;
++
++	/* add overscan compensation if necessary, will keep the aspect
++	 * ratio the same as the backend mode unless overridden by the
++	 * user setting both hborder and vborder properties.
++	 */
++	if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
++			     (nv_connector->underscan == UNDERSCAN_AUTO &&
++			      nv_connector->edid &&
++			      drm_detect_hdmi_monitor(nv_connector->edid)))) {
++		u32 bX = nv_connector->underscan_hborder;
++		u32 bY = nv_connector->underscan_vborder;
++		u32 aspect = (oY << 19) / oX;
++
++		if (bX) {
++			oX -= (bX * 2);
++			if (bY) oY -= (bY * 2);
++			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
++		} else {
++			oX -= (oX >> 4) + 32;
++			if (bY) oY -= (bY * 2);
++			else    oY  = ((oX * aspect) + (aspect / 2)) >> 19;
+ 		}
+ 	}
+ 
+-	push = evo_wait(dev, 0, 16);
++	/* handle CENTER/ASPECT scaling, taking into account the areas
++	 * removed already for overscan compensation
++	 */
++	switch (mode) {
++	case DRM_MODE_SCALE_CENTER:
++		oX = min((u32)umode->hdisplay, oX);
++		oY = min((u32)umode->vdisplay, oY);
++		/* fall-through */
++	case DRM_MODE_SCALE_ASPECT:
++		if (oY < oX) {
++			u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
++			oX = ((oY * aspect) + (aspect / 2)) >> 19;
++		} else {
++			u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
++			oY = ((oX * aspect) + (aspect / 2)) >> 19;
++		}
++		break;
++	default:
++		break;
++	}
++
++	push = evo_wait(dev, EVO_MASTER, 8);
+ 	if (push) {
+ 		evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
+-		evo_data(push, (outY << 16) | outX);
+-		evo_data(push, (outY << 16) | outX);
+-		evo_data(push, (outY << 16) | outX);
++		evo_data(push, (oY << 16) | oX);
++		evo_data(push, (oY << 16) | oX);
++		evo_data(push, (oY << 16) | oX);
+ 		evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
+ 		evo_data(push, 0x00000000);
+ 		evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
+-		evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
++		evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
++		evo_kick(push, dev, EVO_MASTER);
+ 		if (update) {
+-			evo_mthd(push, 0x0080, 1);
+-			evo_data(push, 0x00000000);
++			nvd0_display_flip_stop(crtc);
++			nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
+ 		}
+-		evo_kick(push, dev, 0);
+ 	}
+ 
+ 	return 0;
+@@ -201,7 +503,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+ 	struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
+ 	u32 *push;
+ 
+-	push = evo_wait(fb->dev, 0, 16);
++	push = evo_wait(fb->dev, EVO_MASTER, 16);
+ 	if (push) {
+ 		evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
+ 		evo_data(push, nvfb->nvbo->bo.offset >> 8);
+@@ -216,7 +518,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
+ 			evo_mthd(push, 0x0080, 1);
+ 			evo_data(push, 0x00000000);
+ 		}
+-		evo_kick(push, fb->dev, 0);
++		evo_kick(push, fb->dev, EVO_MASTER);
+ 	}
+ 
+ 	nv_crtc->fb.tile_flags = nvfb->r_dma;
+@@ -227,7 +529,7 @@ static void
+ nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
+ {
+ 	struct drm_device *dev = nv_crtc->base.dev;
+-	u32 *push = evo_wait(dev, 0, 16);
++	u32 *push = evo_wait(dev, EVO_MASTER, 16);
+ 	if (push) {
+ 		if (show) {
+ 			evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
+@@ -247,7 +549,7 @@ nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
+ 			evo_data(push, 0x00000000);
+ 		}
+ 
+-		evo_kick(push, dev, 0);
++		evo_kick(push, dev, EVO_MASTER);
+ 	}
+ }
+ 
+@@ -262,7 +564,9 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
+ 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ 	u32 *push;
+ 
+-	push = evo_wait(crtc->dev, 0, 2);
++	nvd0_display_flip_stop(crtc);
++
++	push = evo_wait(crtc->dev, EVO_MASTER, 2);
+ 	if (push) {
+ 		evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ 		evo_data(push, 0x00000000);
+@@ -270,7 +574,7 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
+ 		evo_data(push, 0x03000000);
+ 		evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
+ 		evo_data(push, 0x00000000);
+-		evo_kick(push, crtc->dev, 0);
++		evo_kick(push, crtc->dev, EVO_MASTER);
+ 	}
+ 
+ 	nvd0_crtc_cursor_show(nv_crtc, false, false);
+@@ -282,7 +586,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
+ 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ 	u32 *push;
+ 
+-	push = evo_wait(crtc->dev, 0, 32);
++	push = evo_wait(crtc->dev, EVO_MASTER, 32);
+ 	if (push) {
+ 		evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
+ 		evo_data(push, nv_crtc->fb.tile_flags);
+@@ -295,10 +599,11 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
+ 		evo_data(push, NvEvoVRAM);
+ 		evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
+ 		evo_data(push, 0xffffff00);
+-		evo_kick(push, crtc->dev, 0);
++		evo_kick(push, crtc->dev, EVO_MASTER);
+ 	}
+ 
+ 	nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
++	nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
+ }
+ 
+ static bool
+@@ -333,53 +638,61 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+ {
+ 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ 	struct nouveau_connector *nv_connector;
+-	u32 htotal = mode->htotal;
+-	u32 vtotal = mode->vtotal;
+-	u32 hsyncw = mode->hsync_end - mode->hsync_start - 1;
+-	u32 vsyncw = mode->vsync_end - mode->vsync_start - 1;
+-	u32 hfrntp = mode->hsync_start - mode->hdisplay;
+-	u32 vfrntp = mode->vsync_start - mode->vdisplay;
+-	u32 hbackp = mode->htotal - mode->hsync_end;
+-	u32 vbackp = mode->vtotal - mode->vsync_end;
+-	u32 hss2be = hsyncw + hbackp;
+-	u32 vss2be = vsyncw + vbackp;
+-	u32 hss2de = htotal - hfrntp;
+-	u32 vss2de = vtotal - vfrntp;
+-	u32 syncs, *push;
++	u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
++	u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
++	u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
++	u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
++	u32 vblan2e = 0, vblan2s = 1;
++	u32 *push;
+ 	int ret;
+ 
+-	syncs = 0x00000001;
+-	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+-		syncs |= 0x00000008;
+-	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+-		syncs |= 0x00000010;
++	hactive = mode->htotal;
++	hsynce  = mode->hsync_end - mode->hsync_start - 1;
++	hbackp  = mode->htotal - mode->hsync_end;
++	hblanke = hsynce + hbackp;
++	hfrontp = mode->hsync_start - mode->hdisplay;
++	hblanks = mode->htotal - hfrontp - 1;
++
++	vactive = mode->vtotal * vscan / ilace;
++	vsynce  = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
++	vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
++	vblanke = vsynce + vbackp;
++	vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
++	vblanks = vactive - vfrontp - 1;
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
++		vblan2e = vactive + vsynce + vbackp;
++		vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
++		vactive = (vactive * 2) + 1;
++	}
+ 
+ 	ret = nvd0_crtc_swap_fbs(crtc, old_fb);
+ 	if (ret)
+ 		return ret;
+ 
+-	push = evo_wait(crtc->dev, 0, 64);
++	push = evo_wait(crtc->dev, EVO_MASTER, 64);
+ 	if (push) {
+-		evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5);
++		evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
+ 		evo_data(push, 0x00000000);
+-		evo_data(push, (vtotal << 16) | htotal);
+-		evo_data(push, (vsyncw << 16) | hsyncw);
+-		evo_data(push, (vss2be << 16) | hss2be);
+-		evo_data(push, (vss2de << 16) | hss2de);
++		evo_data(push, (vactive << 16) | hactive);
++		evo_data(push, ( vsynce << 16) | hsynce);
++		evo_data(push, (vblanke << 16) | hblanke);
++		evo_data(push, (vblanks << 16) | hblanks);
++		evo_data(push, (vblan2e << 16) | vblan2s);
+ 		evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
+ 		evo_data(push, 0x00000000); /* ??? */
+ 		evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
+ 		evo_data(push, mode->clock * 1000);
+ 		evo_data(push, 0x00200000); /* ??? */
+ 		evo_data(push, mode->clock * 1000);
+-		evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1);
+-		evo_data(push, syncs);
+-		evo_kick(push, crtc->dev, 0);
++		evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
++		evo_data(push, 0x00000311);
++		evo_data(push, 0x00000100);
++		evo_kick(push, crtc->dev, EVO_MASTER);
+ 	}
+ 
+ 	nv_connector = nouveau_crtc_connector_get(nv_crtc);
+-	nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false);
+-	nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false);
++	nvd0_crtc_set_dither(nv_crtc, false);
++	nvd0_crtc_set_scale(nv_crtc, false);
+ 	nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
+ 	return 0;
+ }
+@@ -400,7 +713,9 @@ nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ 	if (ret)
+ 		return ret;
+ 
++	nvd0_display_flip_stop(crtc);
+ 	nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
++	nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
+ 	return 0;
+ }
+ 
+@@ -410,6 +725,7 @@ nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
+ 			       enum mode_set_atomic state)
+ {
+ 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	nvd0_display_flip_stop(crtc);
+ 	nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
+ 	return 0;
+ }
+@@ -472,10 +788,10 @@ static int
+ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+ {
+ 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+-	const u32 data = (y << 16) | (x & 0xffff);
++	int ch = EVO_CURS(nv_crtc->index);
+ 
+-	nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
+-	nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
++	evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
++	evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
+ 	return 0;
+ }
+ 
+@@ -525,6 +841,7 @@ static const struct drm_crtc_funcs nvd0_crtc_func = {
+ 	.gamma_set = nvd0_crtc_gamma_set,
+ 	.set_config = drm_crtc_helper_set_config,
+ 	.destroy = nvd0_crtc_destroy,
++	.page_flip = nouveau_crtc_page_flip,
+ };
+ 
+ static void
+@@ -640,11 +957,6 @@ nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ }
+ 
+ static void
+-nvd0_dac_prepare(struct drm_encoder *encoder)
+-{
+-}
+-
+-static void
+ nvd0_dac_commit(struct drm_encoder *encoder)
+ {
+ }
+@@ -655,16 +967,29 @@ nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ {
+ 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+-	u32 *push;
++	u32 syncs, magic, *push;
++
++	syncs = 0x00000001;
++	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
++		syncs |= 0x00000008;
++	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
++		syncs |= 0x00000010;
++
++	magic = 0x31ec6000 | (nv_crtc->index << 25);
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++		magic |= 0x00000001;
+ 
+ 	nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
+ 
+-	push = evo_wait(encoder->dev, 0, 4);
++	push = evo_wait(encoder->dev, EVO_MASTER, 8);
+ 	if (push) {
+-		evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
++		evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
++		evo_data(push, syncs);
++		evo_data(push, magic);
++		evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
+ 		evo_data(push, 1 << nv_crtc->index);
+ 		evo_data(push, 0x00ff);
+-		evo_kick(push, encoder->dev, 0);
++		evo_kick(push, encoder->dev, EVO_MASTER);
+ 	}
+ 
+ 	nv_encoder->crtc = encoder->crtc;
+@@ -680,13 +1005,13 @@ nvd0_dac_disconnect(struct drm_encoder *encoder)
+ 	if (nv_encoder->crtc) {
+ 		nvd0_crtc_prepare(nv_encoder->crtc);
+ 
+-		push = evo_wait(dev, 0, 4);
++		push = evo_wait(dev, EVO_MASTER, 4);
+ 		if (push) {
+ 			evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
+ 			evo_data(push, 0x00000000);
+ 			evo_mthd(push, 0x0080, 1);
+ 			evo_data(push, 0x00000000);
+-			evo_kick(push, dev, 0);
++			evo_kick(push, dev, EVO_MASTER);
+ 		}
+ 
+ 		nv_encoder->crtc = NULL;
+@@ -724,7 +1049,7 @@ nvd0_dac_destroy(struct drm_encoder *encoder)
+ static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
+ 	.dpms = nvd0_dac_dpms,
+ 	.mode_fixup = nvd0_dac_mode_fixup,
+-	.prepare = nvd0_dac_prepare,
++	.prepare = nvd0_dac_disconnect,
+ 	.commit = nvd0_dac_commit,
+ 	.mode_set = nvd0_dac_mode_set,
+ 	.disable = nvd0_dac_disconnect,
+@@ -760,8 +1085,253 @@ nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
+ }
+ 
+ /******************************************************************************
++ * Audio
++ *****************************************************************************/
++static void
++nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_connector *nv_connector;
++	struct drm_device *dev = encoder->dev;
++	int i, or = nv_encoder->or * 0x30;
++
++	nv_connector = nouveau_encoder_connector_get(nv_encoder);
++	if (!drm_detect_monitor_audio(nv_connector->edid))
++		return;
++
++	nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001);
++
++	drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
++	if (nv_connector->base.eld[0]) {
++		u8 *eld = nv_connector->base.eld;
++
++		for (i = 0; i < eld[2] * 4; i++)
++			nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]);
++		for (i = eld[2] * 4; i < 0x60; i++)
++			nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00);
++
++		nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002);
++	}
++}
++
++static void
++nvd0_audio_disconnect(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	int or = nv_encoder->or * 0x30;
++
++	nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000);
++}
++
++/******************************************************************************
++ * HDMI
++ *****************************************************************************/
++static void
++nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
++	struct nouveau_connector *nv_connector;
++	struct drm_device *dev = encoder->dev;
++	int head = nv_crtc->index * 0x800;
++	u32 rekey = 56; /* binary driver, and tegra constant */
++	u32 max_ac_packet;
++
++	nv_connector = nouveau_encoder_connector_get(nv_encoder);
++	if (!drm_detect_hdmi_monitor(nv_connector->edid))
++		return;
++
++	max_ac_packet  = mode->htotal - mode->hdisplay;
++	max_ac_packet -= rekey;
++	max_ac_packet -= 18; /* constant from tegra */
++	max_ac_packet /= 32;
++
++	/* AVI InfoFrame */
++	nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
++	nv_wr32(dev, 0x61671c + head, 0x000d0282);
++	nv_wr32(dev, 0x616720 + head, 0x0000006f);
++	nv_wr32(dev, 0x616724 + head, 0x00000000);
++	nv_wr32(dev, 0x616728 + head, 0x00000000);
++	nv_wr32(dev, 0x61672c + head, 0x00000000);
++	nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001);
++
++	/* ??? InfoFrame? */
++	nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
++	nv_wr32(dev, 0x6167ac + head, 0x00000010);
++	nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001);
++
++	/* HDMI_CTRL */
++	nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
++						  max_ac_packet << 16);
++
++	/* NFI, audio doesn't work without it though.. */
++	nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000);
++
++	nvd0_audio_mode_set(encoder, mode);
++}
++
++static void
++nvd0_hdmi_disconnect(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
++	struct drm_device *dev = encoder->dev;
++	int head = nv_crtc->index * 0x800;
++
++	nvd0_audio_disconnect(encoder);
++
++	nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000);
++	nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
++	nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
++}
++
++/******************************************************************************
+  * SOR
+  *****************************************************************************/
++static inline u32
++nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_entry *dcb, u8 lane)
++{
++	static const u8 nvd0[] = { 16, 8, 0, 24 };
++	return nvd0[lane];
++}
++
++static void
++nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_entry *dcb, u8 pattern)
++{
++	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
++	const u32 loff = (or * 0x800) + (link * 0x80);
++	nv_mask(dev, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
++}
++
++static void
++nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_entry *dcb,
++		      u8 lane, u8 swing, u8 preem)
++{
++	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
++	const u32 loff = (or * 0x800) + (link * 0x80);
++	u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
++	u32 mask = 0x000000ff << shift;
++	u8 *table, *entry, *config = NULL;
++
++	switch (swing) {
++	case 0: preem += 0; break;
++	case 1: preem += 4; break;
++	case 2: preem += 7; break;
++	case 3: preem += 9; break;
++	}
++
++	table = nouveau_dp_bios_data(dev, dcb, &entry);
++	if (table) {
++		if (table[0] == 0x30) {
++			config  = entry + table[4];
++			config += table[5] * preem;
++		} else
++		if (table[0] == 0x40) {
++			config  = table + table[1];
++			config += table[2] * table[3];
++			config += table[6] * preem;
++		}
++	}
++
++	if (!config) {
++		NV_ERROR(dev, "PDISP: unsupported DP table for chipset\n");
++		return;
++	}
++
++	nv_mask(dev, 0x61c118 + loff, mask, config[1] << shift);
++	nv_mask(dev, 0x61c120 + loff, mask, config[2] << shift);
++	nv_mask(dev, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
++	nv_mask(dev, 0x61c13c + loff, 0x00000000, 0x00000000);
++}
++
++static void
++nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_entry *dcb, int crtc,
++		     int link_nr, u32 link_bw, bool enhframe)
++{
++	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
++	const u32 loff = (or * 0x800) + (link * 0x80);
++	const u32 soff = (or * 0x800);
++	u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & ~0x001f4000;
++	u32 clksor = nv_rd32(dev, 0x612300 + soff) & ~0x007c0000;
++	u32 script = 0x0000, lane_mask = 0;
++	u8 *table, *entry;
++	int i;
++
++	link_bw /= 27000;
++
++	table = nouveau_dp_bios_data(dev, dcb, &entry);
++	if (table) {
++		if      (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
++		else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
++		else                       entry = NULL;
++
++		while (entry) {
++			if (entry[0] >= link_bw)
++				break;
++			entry += 3;
++		}
++
++		nouveau_bios_run_init_table(dev, script, dcb, crtc);
++	}
++
++	clksor |= link_bw << 18;
++	dpctrl |= ((1 << link_nr) - 1) << 16;
++	if (enhframe)
++		dpctrl |= 0x00004000;
++
++	for (i = 0; i < link_nr; i++)
++		lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
++
++	nv_wr32(dev, 0x612300 + soff, clksor);
++	nv_wr32(dev, 0x61c10c + loff, dpctrl);
++	nv_mask(dev, 0x61c130 + loff, 0x0000000f, lane_mask);
++}
++
++static void
++nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_entry *dcb,
++		     u32 *link_nr, u32 *link_bw)
++{
++	const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
++	const u32 loff = (or * 0x800) + (link * 0x80);
++	const u32 soff = (or * 0x800);
++	u32 dpctrl = nv_rd32(dev, 0x61c10c + loff) & 0x000f0000;
++	u32 clksor = nv_rd32(dev, 0x612300 + soff);
++
++	if      (dpctrl > 0x00030000) *link_nr = 4;
++	else if (dpctrl > 0x00010000) *link_nr = 2;
++	else			      *link_nr = 1;
++
++	*link_bw  = (clksor & 0x007c0000) >> 18;
++	*link_bw *= 27000;
++}
++
++static void
++nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_entry *dcb,
++		    u32 crtc, u32 datarate)
++{
++	const u32 symbol = 100000;
++	const u32 TU = 64;
++	u32 link_nr, link_bw;
++	u64 ratio, value;
++
++	nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
++
++	ratio  = datarate;
++	ratio *= symbol;
++	do_div(ratio, link_nr * link_bw);
++
++	value  = (symbol - ratio) * TU;
++	value *= ratio;
++	do_div(value, symbol);
++	do_div(value, symbol);
++
++	value += 5;
++	value |= 0x08000000;
++
++	nv_wr32(dev, 0x616610 + (crtc * 0x800), value);
++}
++
+ static void
+ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
+ {
+@@ -794,6 +1364,16 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
+ 	nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
+ 	nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
+ 	nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
++
++	if (nv_encoder->dcb->type == OUTPUT_DP) {
++		struct dp_train_func func = {
++			.link_set = nvd0_sor_dp_link_set,
++			.train_set = nvd0_sor_dp_train_set,
++			.train_adj = nvd0_sor_dp_train_adj
++		};
++
++		nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
++	}
+ }
+ 
+ static bool
+@@ -816,8 +1396,37 @@ nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ }
+ 
+ static void
++nvd0_sor_disconnect(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	u32 *push;
++
++	if (nv_encoder->crtc) {
++		nvd0_crtc_prepare(nv_encoder->crtc);
++
++		push = evo_wait(dev, EVO_MASTER, 4);
++		if (push) {
++			evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
++			evo_data(push, 0x00000000);
++			evo_mthd(push, 0x0080, 1);
++			evo_data(push, 0x00000000);
++			evo_kick(push, dev, EVO_MASTER);
++		}
++
++		nvd0_hdmi_disconnect(encoder);
++
++		nv_encoder->crtc = NULL;
++		nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
++	}
++}
++
++static void
+ nvd0_sor_prepare(struct drm_encoder *encoder)
+ {
++	nvd0_sor_disconnect(encoder);
++	if (nouveau_encoder(encoder)->dcb->type == OUTPUT_DP)
++		evo_sync(encoder->dev, EVO_MASTER);
+ }
+ 
+ static void
+@@ -829,13 +1438,25 @@ static void
+ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ 		  struct drm_display_mode *mode)
+ {
+-	struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ 	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ 	struct nouveau_connector *nv_connector;
+ 	struct nvbios *bios = &dev_priv->vbios;
+ 	u32 mode_ctrl = (1 << nv_crtc->index);
+-	u32 *push, or_config;
++	u32 syncs, magic, *push;
++	u32 or_config;
++
++	syncs = 0x00000001;
++	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
++		syncs |= 0x00000008;
++	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
++		syncs |= 0x00000010;
++
++	magic = 0x31ec6000 | (nv_crtc->index << 25);
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++		magic |= 0x00000001;
+ 
+ 	nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ 	switch (nv_encoder->dcb->type) {
+@@ -852,6 +1473,8 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ 		or_config = (mode_ctrl & 0x00000f00) >> 8;
+ 		if (mode->clock >= 165000)
+ 			or_config |= 0x0100;
++
++		nvd0_hdmi_mode_set(encoder, mode);
+ 		break;
+ 	case OUTPUT_LVDS:
+ 		or_config = (mode_ctrl & 0x00000f00) >> 8;
+@@ -861,7 +1484,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ 			if (bios->fp.if_is_24bit)
+ 				or_config |= 0x0200;
+ 		} else {
+-			if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
++			if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ 				if (((u8 *)nv_connector->edid)[121] == 2)
+ 					or_config |= 0x0100;
+ 			} else
+@@ -882,6 +1505,22 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ 
+ 		}
+ 		break;
++	case OUTPUT_DP:
++		if (nv_connector->base.display_info.bpc == 6) {
++			nv_encoder->dp.datarate = mode->clock * 18 / 8;
++			syncs |= 0x00000140;
++		} else {
++			nv_encoder->dp.datarate = mode->clock * 24 / 8;
++			syncs |= 0x00000180;
++		}
++
++		if (nv_encoder->dcb->sorconf.link & 1)
++			mode_ctrl |= 0x00000800;
++		else
++			mode_ctrl |= 0x00000900;
++
++		or_config = (mode_ctrl & 0x00000f00) >> 8;
++		break;
+ 	default:
+ 		BUG_ON(1);
+ 		break;
+@@ -889,42 +1528,26 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ 
+ 	nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
+ 
+-	push = evo_wait(encoder->dev, 0, 4);
++	if (nv_encoder->dcb->type == OUTPUT_DP) {
++		nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
++					 nv_encoder->dp.datarate);
++	}
++
++	push = evo_wait(dev, EVO_MASTER, 8);
+ 	if (push) {
+-		evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
++		evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
++		evo_data(push, syncs);
++		evo_data(push, magic);
++		evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
+ 		evo_data(push, mode_ctrl);
+ 		evo_data(push, or_config);
+-		evo_kick(push, encoder->dev, 0);
++		evo_kick(push, dev, EVO_MASTER);
+ 	}
+ 
+ 	nv_encoder->crtc = encoder->crtc;
+ }
+ 
+ static void
+-nvd0_sor_disconnect(struct drm_encoder *encoder)
+-{
+-	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+-	struct drm_device *dev = encoder->dev;
+-	u32 *push;
+-
+-	if (nv_encoder->crtc) {
+-		nvd0_crtc_prepare(nv_encoder->crtc);
+-
+-		push = evo_wait(dev, 0, 4);
+-		if (push) {
+-			evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
+-			evo_data(push, 0x00000000);
+-			evo_mthd(push, 0x0080, 1);
+-			evo_data(push, 0x00000000);
+-			evo_kick(push, dev, 0);
+-		}
+-
+-		nv_encoder->crtc = NULL;
+-		nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
+-	}
+-}
+-
+-static void
+ nvd0_sor_destroy(struct drm_encoder *encoder)
+ {
+ 	drm_encoder_cleanup(encoder);
+@@ -976,17 +1599,19 @@ static struct dcb_entry *
+ lookup_dcb(struct drm_device *dev, int id, u32 mc)
+ {
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+-	int type, or, i;
++	int type, or, i, link = -1;
+ 
+ 	if (id < 4) {
+ 		type = OUTPUT_ANALOG;
+ 		or   = id;
+ 	} else {
+ 		switch (mc & 0x00000f00) {
+-		case 0x00000000: type = OUTPUT_LVDS; break;
+-		case 0x00000100: type = OUTPUT_TMDS; break;
+-		case 0x00000200: type = OUTPUT_TMDS; break;
+-		case 0x00000500: type = OUTPUT_TMDS; break;
++		case 0x00000000: link = 0; type = OUTPUT_LVDS; break;
++		case 0x00000100: link = 0; type = OUTPUT_TMDS; break;
++		case 0x00000200: link = 1; type = OUTPUT_TMDS; break;
++		case 0x00000500: link = 0; type = OUTPUT_TMDS; break;
++		case 0x00000800: link = 0; type = OUTPUT_DP; break;
++		case 0x00000900: link = 1; type = OUTPUT_DP; break;
+ 		default:
+ 			NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc);
+ 			return NULL;
+@@ -997,7 +1622,8 @@ lookup_dcb(struct drm_device *dev, int id, u32 mc)
+ 
+ 	for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
+ 		struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
+-		if (dcb->type == type && (dcb->or & (1 << or)))
++		if (dcb->type == type && (dcb->or & (1 << or)) &&
++		    (link < 0 || link == !(dcb->sorconf.link & 1)))
+ 			return dcb;
+ 	}
+ 
+@@ -1048,7 +1674,9 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
+ 	}
+ 
+ 	pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
+-	if (mask & 0x00010000) {
++	NV_DEBUG_KMS(dev, "PDISP: crtc %d pclk %d mask 0x%08x\n",
++			  crtc, pclk, mask);
++	if (pclk && (mask & 0x00010000)) {
+ 		nv50_crtc_set_clock(dev, crtc, pclk);
+ 	}
+ 
+@@ -1072,6 +1700,7 @@ nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
+ 			break;
+ 		case OUTPUT_TMDS:
+ 		case OUTPUT_LVDS:
++		case OUTPUT_DP:
+ 			if (cfg & 0x00000100)
+ 				tmp = 0x00000101;
+ 			else
+@@ -1122,7 +1751,7 @@ nvd0_display_bh(unsigned long data)
+ {
+ 	struct drm_device *dev = (struct drm_device *)data;
+ 	struct nvd0_display *disp = nvd0_display(dev);
+-	u32 mask, crtc;
++	u32 mask = 0, crtc = ~0;
+ 	int i;
+ 
+ 	if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
+@@ -1138,12 +1767,8 @@ nvd0_display_bh(unsigned long data)
+ 		}
+ 	}
+ 
+-	mask = nv_rd32(dev, 0x6101d4);
+-	crtc = 0;
+-	if (!mask) {
+-		mask = nv_rd32(dev, 0x6109d4);
+-		crtc = 1;
+-	}
++	while (!mask && ++crtc < dev->mode_config.num_crtc)
++		mask = nv_rd32(dev, 0x6101d4 + (crtc * 0x800));
+ 
+ 	if (disp->modeset & 0x00000001)
+ 		nvd0_display_unk1_handler(dev, crtc, mask);
+@@ -1158,6 +1783,13 @@ nvd0_display_intr(struct drm_device *dev)
+ {
+ 	struct nvd0_display *disp = nvd0_display(dev);
+ 	u32 intr = nv_rd32(dev, 0x610088);
++	int i;
++
++	if (intr & 0x00000001) {
++		u32 stat = nv_rd32(dev, 0x61008c);
++		nv_wr32(dev, 0x61008c, stat);
++		intr &= ~0x00000001;
++	}
+ 
+ 	if (intr & 0x00000002) {
+ 		u32 stat = nv_rd32(dev, 0x61009c);
+@@ -1196,16 +1828,13 @@ nvd0_display_intr(struct drm_device *dev)
+ 		intr &= ~0x00100000;
+ 	}
+ 
+-	if (intr & 0x01000000) {
+-		u32 stat = nv_rd32(dev, 0x6100bc);
+-		nv_wr32(dev, 0x6100bc, stat);
+-		intr &= ~0x01000000;
+-	}
+-
+-	if (intr & 0x02000000) {
+-		u32 stat = nv_rd32(dev, 0x6108bc);
+-		nv_wr32(dev, 0x6108bc, stat);
+-		intr &= ~0x02000000;
++	for (i = 0; i < dev->mode_config.num_crtc; i++) {
++		u32 mask = 0x01000000 << i;
++		if (intr & mask) {
++			u32 stat = nv_rd32(dev, 0x6100bc + (i * 0x800));
++			nv_wr32(dev, 0x6100bc + (i * 0x800), stat);
++			intr &= ~mask;
++		}
+ 	}
+ 
+ 	if (intr)
+@@ -1215,38 +1844,29 @@ nvd0_display_intr(struct drm_device *dev)
+ /******************************************************************************
+  * Init
+  *****************************************************************************/
+-static void
++void
+ nvd0_display_fini(struct drm_device *dev)
+ {
+ 	int i;
+ 
+-	/* fini cursors */
+-	for (i = 14; i >= 13; i--) {
+-		if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
+-			continue;
+-
+-		nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
+-		nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
+-		nv_mask(dev, 0x610090, 1 << i, 0x00000000);
+-		nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
++	/* fini cursors + overlays + flips */
++	for (i = 1; i >= 0; i--) {
++		evo_fini_pio(dev, EVO_CURS(i));
++		evo_fini_pio(dev, EVO_OIMM(i));
++		evo_fini_dma(dev, EVO_OVLY(i));
++		evo_fini_dma(dev, EVO_FLIP(i));
+ 	}
+ 
+ 	/* fini master */
+-	if (nv_rd32(dev, 0x610490) & 0x00000010) {
+-		nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
+-		nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
+-		nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
+-		nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
+-		nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
+-	}
++	evo_fini_dma(dev, EVO_MASTER);
+ }
+ 
+ int
+ nvd0_display_init(struct drm_device *dev)
+ {
+ 	struct nvd0_display *disp = nvd0_display(dev);
++	int ret, i;
+ 	u32 *push;
+-	int i;
+ 
+ 	if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
+ 		nv_wr32(dev, 0x6100ac, 0x00000100);
+@@ -1271,7 +1891,7 @@ nvd0_display_init(struct drm_device *dev)
+ 		nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
+ 	}
+ 
+-	for (i = 0; i < 2; i++) {
++	for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ 		u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
+ 		u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
+ 		u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
+@@ -1285,36 +1905,24 @@ nvd0_display_init(struct drm_device *dev)
+ 	nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
+ 
+ 	/* init master */
+-	nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
+-	nv_wr32(dev, 0x610498, 0x00010000);
+-	nv_wr32(dev, 0x61049c, 0x00000001);
+-	nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
+-	nv_wr32(dev, 0x640000, 0x00000000);
+-	nv_wr32(dev, 0x610490, 0x01000013);
+-	if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
+-		NV_ERROR(dev, "PDISP: master 0x%08x\n",
+-			 nv_rd32(dev, 0x610490));
+-		return -EBUSY;
++	ret = evo_init_dma(dev, EVO_MASTER);
++	if (ret)
++		goto error;
++
++	/* init flips + overlays + cursors */
++	for (i = 0; i < dev->mode_config.num_crtc; i++) {
++		if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
++		    (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
++		    (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
++		    (ret = evo_init_pio(dev, EVO_CURS(i))))
++			goto error;
+ 	}
+-	nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
+-	nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
+ 
+-	/* init cursors */
+-	for (i = 13; i <= 14; i++) {
+-		nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
+-		if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
+-			NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
+-				 nv_rd32(dev, 0x610490 + (i * 0x10)));
+-			return -EBUSY;
+-		}
+-
+-		nv_mask(dev, 0x610090, 1 << i, 1 << i);
+-		nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
++	push = evo_wait(dev, EVO_MASTER, 32);
++	if (!push) {
++		ret = -EBUSY;
++		goto error;
+ 	}
+-
+-	push = evo_wait(dev, 0, 32);
+-	if (!push)
+-		return -EBUSY;
+ 	evo_mthd(push, 0x0088, 1);
+ 	evo_data(push, NvEvoSync);
+ 	evo_mthd(push, 0x0084, 1);
+@@ -1323,9 +1931,12 @@ nvd0_display_init(struct drm_device *dev)
+ 	evo_data(push, 0x80000000);
+ 	evo_mthd(push, 0x008c, 1);
+ 	evo_data(push, 0x00000000);
+-	evo_kick(push, dev, 0);
++	evo_kick(push, dev, EVO_MASTER);
+ 
+-	return 0;
++error:
++	if (ret)
++		nvd0_display_fini(dev);
++	return ret;
+ }
+ 
+ void
+@@ -1334,11 +1945,16 @@ nvd0_display_destroy(struct drm_device *dev)
+ 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+ 	struct nvd0_display *disp = nvd0_display(dev);
+ 	struct pci_dev *pdev = dev->pdev;
++	int i;
+ 
+-	nvd0_display_fini(dev);
++	for (i = 0; i < EVO_DMA_NR; i++) {
++		struct evo *evo = &disp->evo[i];
++		pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
++	}
+ 
+-	pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
+ 	nouveau_gpuobj_ref(NULL, &disp->mem);
++	nouveau_bo_unmap(disp->sync);
++	nouveau_bo_ref(NULL, &disp->sync);
+ 	nouveau_irq_unregister(dev, 26);
+ 
+ 	dev_priv->engine.display.priv = NULL;
+@@ -1355,7 +1971,7 @@ nvd0_display_create(struct drm_device *dev)
+ 	struct pci_dev *pdev = dev->pdev;
+ 	struct nvd0_display *disp;
+ 	struct dcb_entry *dcbe;
+-	int ret, i;
++	int crtcs, ret, i;
+ 
+ 	disp = kzalloc(sizeof(*disp), GFP_KERNEL);
+ 	if (!disp)
+@@ -1363,7 +1979,8 @@ nvd0_display_create(struct drm_device *dev)
+ 	dev_priv->engine.display.priv = disp;
+ 
+ 	/* create crtc objects to represent the hw heads */
+-	for (i = 0; i < 2; i++) {
++	crtcs = nv_rd32(dev, 0x022448);
++	for (i = 0; i < crtcs; i++) {
+ 		ret = nvd0_crtc_create(dev, i);
+ 		if (ret)
+ 			goto out;
+@@ -1384,6 +2001,7 @@ nvd0_display_create(struct drm_device *dev)
+ 		switch (dcbe->type) {
+ 		case OUTPUT_TMDS:
+ 		case OUTPUT_LVDS:
++		case OUTPUT_DP:
+ 			nvd0_sor_create(connector, dcbe);
+ 			break;
+ 		case OUTPUT_ANALOG:
+@@ -1410,61 +2028,83 @@ nvd0_display_create(struct drm_device *dev)
+ 	tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
+ 	nouveau_irq_register(dev, 26, nvd0_display_intr);
+ 
++	/* small shared memory area we use for notifiers and semaphores */
++	ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
++			     0, 0x0000, &disp->sync);
++	if (!ret) {
++		ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
++		if (!ret)
++			ret = nouveau_bo_map(disp->sync);
++		if (ret)
++			nouveau_bo_ref(NULL, &disp->sync);
++	}
++
++	if (ret)
++		goto out;
++
+ 	/* hash table and dma objects for the memory areas we care about */
+ 	ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
+ 				 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
+ 	if (ret)
+ 		goto out;
+ 
+-	nv_wo32(disp->mem, 0x1000, 0x00000049);
+-	nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
+-	nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
+-	nv_wo32(disp->mem, 0x100c, 0x00000000);
+-	nv_wo32(disp->mem, 0x1010, 0x00000000);
+-	nv_wo32(disp->mem, 0x1014, 0x00000000);
+-	nv_wo32(disp->mem, 0x0000, NvEvoSync);
+-	nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
+-
+-	nv_wo32(disp->mem, 0x1020, 0x00000049);
+-	nv_wo32(disp->mem, 0x1024, 0x00000000);
+-	nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
+-	nv_wo32(disp->mem, 0x102c, 0x00000000);
+-	nv_wo32(disp->mem, 0x1030, 0x00000000);
+-	nv_wo32(disp->mem, 0x1034, 0x00000000);
+-	nv_wo32(disp->mem, 0x0008, NvEvoVRAM);
+-	nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
+-
+-	nv_wo32(disp->mem, 0x1040, 0x00000009);
+-	nv_wo32(disp->mem, 0x1044, 0x00000000);
+-	nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8);
+-	nv_wo32(disp->mem, 0x104c, 0x00000000);
+-	nv_wo32(disp->mem, 0x1050, 0x00000000);
+-	nv_wo32(disp->mem, 0x1054, 0x00000000);
+-	nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP);
+-	nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001);
+-
+-	nv_wo32(disp->mem, 0x1060, 0x0fe00009);
+-	nv_wo32(disp->mem, 0x1064, 0x00000000);
+-	nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8);
+-	nv_wo32(disp->mem, 0x106c, 0x00000000);
+-	nv_wo32(disp->mem, 0x1070, 0x00000000);
+-	nv_wo32(disp->mem, 0x1074, 0x00000000);
+-	nv_wo32(disp->mem, 0x0018, NvEvoFB32);
+-	nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001);
+-
+-	pinstmem->flush(dev);
++	/* create evo dma channels */
++	for (i = 0; i < EVO_DMA_NR; i++) {
++		struct evo *evo = &disp->evo[i];
++		u64 offset = disp->sync->bo.offset;
++		u32 dmao = 0x1000 + (i * 0x100);
++		u32 hash = 0x0000 + (i * 0x040);
++
++		evo->idx = i;
++		evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
++		evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
++		if (!evo->ptr) {
++			ret = -ENOMEM;
++			goto out;
++		}
+ 
+-	/* push buffers for evo channels */
+-	disp->evo[0].ptr =
+-		pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
+-	if (!disp->evo[0].ptr) {
+-		ret = -ENOMEM;
+-		goto out;
++		nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
++		nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
++		nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
++		nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
++		nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
++		nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
++						((dmao + 0x00) << 9));
++
++		nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
++		nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8);
++		nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
++		nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
++		nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
++						((dmao + 0x20) << 9));
++
++		nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
++		nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8);
++		nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
++		nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
++		nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
++						((dmao + 0x40) << 9));
++
++		nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
++		nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8);
++		nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
++		nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
++		nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
++		nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
++						((dmao + 0x60) << 9));
+ 	}
+ 
+-	ret = nvd0_display_init(dev);
+-	if (ret)
+-		goto out;
++	pinstmem->flush(dev);
+ 
+ out:
+ 	if (ret)
+diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
+index 4c8796b..88718fa 100644
+--- a/drivers/gpu/drm/r128/r128_drv.c
++++ b/drivers/gpu/drm/r128/r128_drv.c
+@@ -42,6 +42,20 @@ static struct pci_device_id pciidlist[] = {
+ 	r128_PCI_IDS
+ };
+ 
++static const struct file_operations r128_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = drm_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = r128_compat_ioctl,
++#endif
++	.llseek = noop_llseek,
++};
++
+ static struct drm_driver driver = {
+ 	.driver_features =
+ 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+@@ -60,21 +74,7 @@ static struct drm_driver driver = {
+ 	.reclaim_buffers = drm_core_reclaim_buffers,
+ 	.ioctls = r128_ioctls,
+ 	.dma_ioctl = r128_cce_buffers,
+-	.fops = {
+-		.owner = THIS_MODULE,
+-		.open = drm_open,
+-		.release = drm_release,
+-		.unlocked_ioctl = drm_ioctl,
+-		.mmap = drm_mmap,
+-		.poll = drm_poll,
+-		.fasync = drm_fasync,
+-#ifdef CONFIG_COMPAT
+-		.compat_ioctl = r128_compat_ioctl,
+-#endif
+-		.llseek = noop_llseek,
+-	},
+-
+-
++	.fops = &r128_driver_fops,
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ 	.date = DRIVER_DATE,
+@@ -85,6 +85,7 @@ static struct drm_driver driver = {
+ 
+ int r128_driver_load(struct drm_device *dev, unsigned long flags)
+ {
++	pci_set_master(dev->pdev);
+ 	return drm_vblank_init(dev, 1);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
+index cf8b4bc..9d83729 100644
+--- a/drivers/gpu/drm/radeon/Makefile
++++ b/drivers/gpu/drm/radeon/Makefile
+@@ -70,7 +70,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
+ 	r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
+ 	r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ 	evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+-	radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o
++	radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
++	radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o si_blit_shaders.o
+ 
+ radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
+ radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
+@@ -78,4 +79,4 @@ radeon-$(CONFIG_ACPI) += radeon_acpi.o
+ 
+ obj-$(CONFIG_DRM_RADEON)+= radeon.o
+ 
+-CFLAGS_radeon_trace_points.o := -I$(src)
+\ No newline at end of file
++CFLAGS_radeon_trace_points.o := -I$(src)
+diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
+index c61c3fe..ca4b038 100644
+--- a/drivers/gpu/drm/radeon/ObjectID.h
++++ b/drivers/gpu/drm/radeon/ObjectID.h
+@@ -85,6 +85,7 @@
+ #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA   0x1F
+ #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1        0x20
+ #define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2        0x21
++#define ENCODER_OBJECT_ID_INTERNAL_VCE            0x24
+ 
+ #define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO    0xFF
+ 
+@@ -387,6 +388,10 @@
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+ 
++#define ENCODER_VCE_ENUM_ID1                     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                  ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
++
+ /****************************************************/
+ /* Connector Object ID definition - Shared with BIOS */
+ /****************************************************/
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index 3a05cdb..5ce9bf5 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -665,6 +665,8 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
+ 	SDEBUG("   count: %d\n", count);
+ 	if (arg == ATOM_UNIT_MICROSEC)
+ 		udelay(count);
++	else if (!drm_can_sleep())
++		mdelay(count);
+ 	else
+ 		msleep(count);
+ }
+diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
+index 4760466..4b04ba3 100644
+--- a/drivers/gpu/drm/radeon/atombios.h
++++ b/drivers/gpu/drm/radeon/atombios.h
+@@ -7270,6 +7270,8 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+ #define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+ #define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
+ #define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
++#define ATOM_PP_THERMALCONTROLLER_SISLANDS  16
++#define ATOM_PP_THERMALCONTROLLER_LM96163   17
+ 
+ // Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+ // We probably should reserve the bit 0x80 for this use.
+@@ -7285,6 +7287,7 @@ typedef struct _ATOM_PPLIB_STATE
+     UCHAR ucClockStateIndices[1]; // variable-sized
+ } ATOM_PPLIB_STATE;
+ 
++
+ typedef struct _ATOM_PPLIB_FANTABLE
+ {
+     UCHAR   ucFanTableFormat;                // Change this if the table format changes or version changes so that the other fields are not the same.
+@@ -7297,12 +7300,20 @@ typedef struct _ATOM_PPLIB_FANTABLE
+     USHORT  usPWMHigh;                       // The PWM value at THigh.
+ } ATOM_PPLIB_FANTABLE;
+ 
++typedef struct _ATOM_PPLIB_FANTABLE2
++{
++    ATOM_PPLIB_FANTABLE basicTable;
++    USHORT  usTMax;                          // The max temperature
++} ATOM_PPLIB_FANTABLE2;
++
+ typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+ {
+     USHORT  usSize;
+     ULONG   ulMaxEngineClock;   // For Overdrive.
+     ULONG   ulMaxMemoryClock;   // For Overdrive.
+     // Add extra system parameters here, always adjust size to include all fields.
++    USHORT  usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
++    USHORT  usUVDTableOffset;   //points to ATOM_PPLIB_UVD_Table
+ } ATOM_PPLIB_EXTENDEDHEADER;
+ 
+ //// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+@@ -7325,6 +7336,7 @@ typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+ #define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000               // Enable the 'regulator hot' feature.
+ #define ATOM_PP_PLATFORM_CAP_BACO          0x00020000               // Does the driver supports BACO state.
+ 
++
+ typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+ {
+       ATOM_COMMON_TABLE_HEADER sHeader;
+@@ -7383,7 +7395,8 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+     USHORT                     usVddciDependencyOnMCLKOffset;
+     USHORT                     usVddcDependencyOnMCLKOffset;
+     USHORT                     usMaxClockVoltageOnDCOffset;
+-    USHORT                     usReserved[2];  
++    USHORT                     usVddcPhaseShedLimitsTableOffset;    // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
++    USHORT                     usReserved;  
+ } ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+ 
+ typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+@@ -7393,8 +7406,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+     ULONG                      ulNearTDPLimit;
+     ULONG                      ulSQRampingThreshold;
+     USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
+-    ULONG                      ulCACLeakage;                    // TBD, this parameter is still under discussion.  Change to ulReserved if not needed.
+-    ULONG                      ulReserved;
++    ULONG                      ulCACLeakage;                    // The iLeakage for driver calculated CAC leakage table
++    USHORT                     usTDPODLimit;
++    USHORT                     usLoadLineSlope;                 // in milliOhms * 100
+ } ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+ 
+ //// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+@@ -7423,6 +7437,7 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+ //// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+ #define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
+ #define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
++#define ATOM_PPLIB_CLASSIFICATION2_MVC                      0x0004   //Multi-View Codec (BD-3D)
+ 
+ //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+ #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
+@@ -7446,7 +7461,9 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+ 
+ #define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
+ #define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
+-#define ATOM_PPLIB_DISALLOW_ON_DC                        0x00004000
++
++#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
++
+ #define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
+ 
+ //memory related flags
+@@ -7508,7 +7525,7 @@ typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+ #define ATOM_PPLIB_R600_FLAGS_UVDSAFE           2
+ #define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
+ #define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
+-#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF    16
++#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF   16
+ #define ATOM_PPLIB_R600_FLAGS_LOWPOWER         32   // On the RV770 use 'low power' setting (sequencer S0).
+ 
+ typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+@@ -7527,6 +7544,24 @@ typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+ 
+ } ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+ 
++typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
++{
++      USHORT usEngineClockLow;
++      UCHAR  ucEngineClockHigh;
++
++      USHORT usMemoryClockLow;
++      UCHAR  ucMemoryClockHigh;
++
++      USHORT usVDDC;
++      USHORT usVDDCI;
++      UCHAR  ucPCIEGen;
++      UCHAR  ucUnused1;
++
++      ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
++
++} ATOM_PPLIB_SI_CLOCK_INFO;
++
++
+ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+ 
+ {
+@@ -7539,7 +7574,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+       UCHAR  ucPadding;                   // For proper alignment and size.
+       USHORT usVDDC;                      // For the 780, use: None, Low, High, Variable
+       UCHAR  ucMaxHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}
+-      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement.
++      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+       USHORT usHTLinkFreq;                // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+       ULONG  ulFlags; 
+ } ATOM_PPLIB_RS780_CLOCK_INFO;
+@@ -7561,9 +7596,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+       USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
+       UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
+       UCHAR  vddcIndex;         //2-bit vddc index;
+-      UCHAR  leakage;          //please use 8-bit absolute value, not the 6-bit % value 
+-      //please initalize to 0
+-      UCHAR  rsv;
++      USHORT tdpLimit;
+       //please initalize to 0
+       USHORT rsv1;
+       //please initialize to 0s
+@@ -7586,7 +7619,7 @@ typedef struct _ATOM_PPLIB_STATE_V2
+       UCHAR clockInfoIndex[1];
+ } ATOM_PPLIB_STATE_V2;
+ 
+-typedef struct StateArray{
++typedef struct _StateArray{
+     //how many states we have 
+     UCHAR ucNumEntries;
+     
+@@ -7594,18 +7627,17 @@ typedef struct StateArray{
+ }StateArray;
+ 
+ 
+-typedef struct ClockInfoArray{
++typedef struct _ClockInfoArray{
+     //how many clock levels we have
+     UCHAR ucNumEntries;
+     
+-    //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
++    //sizeof(ATOM_PPLIB_CLOCK_INFO)
+     UCHAR ucEntrySize;
+     
+-    //this is for Sumo
+-    ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
++    UCHAR clockInfo[1];
+ }ClockInfoArray;
+ 
+-typedef struct NonClockInfoArray{
++typedef struct _NonClockInfoArray{
+ 
+     //how many non-clock levels we have. normally should be same as number of states
+     UCHAR ucNumEntries;
+@@ -7644,6 +7676,124 @@ typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+     ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
+ }ATOM_PPLIB_Clock_Voltage_Limit_Table;
+ 
++typedef struct _ATOM_PPLIB_CAC_Leakage_Record
++{
++    USHORT usVddc;  // We use this field for the "fake" standardized VDDC for power calculations                                                  
++    ULONG  ulLeakageValue;
++}ATOM_PPLIB_CAC_Leakage_Record;
++
++typedef struct _ATOM_PPLIB_CAC_Leakage_Table
++{
++    UCHAR ucNumEntries;                                                 // Number of entries.
++    ATOM_PPLIB_CAC_Leakage_Record entries[1];                           // Dynamically allocate entries.
++}ATOM_PPLIB_CAC_Leakage_Table;
++
++typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
++{
++    USHORT usVoltage;
++    USHORT usSclkLow;
++    UCHAR  ucSclkHigh;
++    USHORT usMclkLow;
++    UCHAR  ucMclkHigh;
++}ATOM_PPLIB_PhaseSheddingLimits_Record;
++
++typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
++{
++    UCHAR ucNumEntries;                                                 // Number of entries.
++    ATOM_PPLIB_PhaseSheddingLimits_Record entries[1];                   // Dynamically allocate entries.
++}ATOM_PPLIB_PhaseSheddingLimits_Table;
++
++typedef struct _VCEClockInfo{
++    USHORT usEVClkLow;
++    UCHAR  ucEVClkHigh;
++    USHORT usECClkLow;
++    UCHAR  ucECClkHigh;
++}VCEClockInfo;
++
++typedef struct _VCEClockInfoArray{
++    UCHAR ucNumEntries;
++    VCEClockInfo entries[1];
++}VCEClockInfoArray;
++
++typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
++{
++    USHORT usVoltage;
++    UCHAR  ucVCEClockInfoIndex;
++}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
++
++typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
++{
++    UCHAR numEntries;
++    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
++}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
++
++typedef struct _ATOM_PPLIB_VCE_State_Record
++{
++    UCHAR  ucVCEClockInfoIndex;
++    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
++}ATOM_PPLIB_VCE_State_Record;
++
++typedef struct _ATOM_PPLIB_VCE_State_Table
++{
++    UCHAR numEntries;
++    ATOM_PPLIB_VCE_State_Record entries[1];
++}ATOM_PPLIB_VCE_State_Table;
++
++
++typedef struct _ATOM_PPLIB_VCE_Table
++{
++      UCHAR revid;
++//    VCEClockInfoArray array;
++//    ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
++//    ATOM_PPLIB_VCE_State_Table states;
++}ATOM_PPLIB_VCE_Table;
++
++
++typedef struct _UVDClockInfo{
++    USHORT usVClkLow;
++    UCHAR  ucVClkHigh;
++    USHORT usDClkLow;
++    UCHAR  ucDClkHigh;
++}UVDClockInfo;
++
++typedef struct _UVDClockInfoArray{
++    UCHAR ucNumEntries;
++    UVDClockInfo entries[1];
++}UVDClockInfoArray;
++
++typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
++{
++    USHORT usVoltage;
++    UCHAR  ucUVDClockInfoIndex;
++}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
++
++typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
++{
++    UCHAR numEntries;
++    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
++}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
++
++typedef struct _ATOM_PPLIB_UVD_State_Record
++{
++    UCHAR  ucUVDClockInfoIndex;
++    UCHAR  ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
++}ATOM_PPLIB_UVD_State_Record;
++
++typedef struct _ATOM_PPLIB_UVD_State_Table
++{
++    UCHAR numEntries;
++    ATOM_PPLIB_UVD_State_Record entries[1];
++}ATOM_PPLIB_UVD_State_Table;
++
++
++typedef struct _ATOM_PPLIB_UVD_Table
++{
++      UCHAR revid;
++//    UVDClockInfoArray array;
++//    ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
++//    ATOM_PPLIB_UVD_State_Table states;
++}ATOM_PPLIB_UVD_Table;
++
+ /**************************************************************************/
+ 
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index a25d08a..15594a3 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -231,6 +231,22 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
+ 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ 
++static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
++{
++	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
++	ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
++
++	memset(&args, 0, sizeof(args));
++
++	args.ucDispPipeId = radeon_crtc->crtc_id;
++	args.ucEnable = state;
++
++	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
++}
++
+ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+ {
+ 	struct drm_device *dev = crtc->dev;
+@@ -242,8 +258,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 		radeon_crtc->enabled = true;
+ 		/* adjust pm to dpms changes BEFORE enabling crtcs */
+ 		radeon_pm_compute_clocks(rdev);
++		if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
++			atombios_powergate_crtc(crtc, ATOM_DISABLE);
+ 		atombios_enable_crtc(crtc, ATOM_ENABLE);
+-		if (ASIC_IS_DCE3(rdev))
++		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+ 			atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+ 		atombios_blank_crtc(crtc, ATOM_DISABLE);
+ 		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+@@ -255,10 +273,12 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ 		if (radeon_crtc->enabled)
+ 			atombios_blank_crtc(crtc, ATOM_ENABLE);
+-		if (ASIC_IS_DCE3(rdev))
++		if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
+ 			atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
+ 		atombios_enable_crtc(crtc, ATOM_DISABLE);
+ 		radeon_crtc->enabled = false;
++		if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
++			atombios_powergate_crtc(crtc, ATOM_ENABLE);
+ 		/* adjust pm to dpms changes AFTER disabling crtcs */
+ 		radeon_pm_compute_clocks(rdev);
+ 		break;
+@@ -355,15 +375,12 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ 
+-static void atombios_disable_ss(struct drm_crtc *crtc)
++static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
+ {
+-	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+-	struct drm_device *dev = crtc->dev;
+-	struct radeon_device *rdev = dev->dev_private;
+ 	u32 ss_cntl;
+ 
+ 	if (ASIC_IS_DCE4(rdev)) {
+-		switch (radeon_crtc->pll_id) {
++		switch (pll_id) {
+ 		case ATOM_PPLL1:
+ 			ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
+ 			ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+@@ -379,7 +396,7 @@ static void atombios_disable_ss(struct drm_crtc *crtc)
+ 			return;
+ 		}
+ 	} else if (ASIC_IS_AVIVO(rdev)) {
+-		switch (radeon_crtc->pll_id) {
++		switch (pll_id) {
+ 		case ATOM_PPLL1:
+ 			ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
+ 			ss_cntl &= ~1;
+@@ -406,16 +423,31 @@ union atom_enable_ss {
+ 	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
+ };
+ 
+-static void atombios_crtc_program_ss(struct drm_crtc *crtc,
++static void atombios_crtc_program_ss(struct radeon_device *rdev,
+ 				     int enable,
+ 				     int pll_id,
++				     int crtc_id,
+ 				     struct radeon_atom_ss *ss)
+ {
+-	struct drm_device *dev = crtc->dev;
+-	struct radeon_device *rdev = dev->dev_private;
++	unsigned i;
+ 	int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
+ 	union atom_enable_ss args;
+ 
++	if (!enable) {
++		for (i = 0; i < rdev->num_crtc; i++) {
++			if (rdev->mode_info.crtcs[i] &&
++			    rdev->mode_info.crtcs[i]->enabled &&
++			    i != crtc_id &&
++			    pll_id == rdev->mode_info.crtcs[i]->pll_id) {
++				/* one other crtc is using this pll don't turn
++				 * off spread spectrum as it might turn off
++				 * display on active crtc
++				 */
++				return;
++			}
++		}
++	}
++
+ 	memset(&args, 0, sizeof(args));
+ 
+ 	if (ASIC_IS_DCE5(rdev)) {
+@@ -441,7 +473,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+ 			return;
+ 		}
+ 		args.v3.ucEnable = enable;
+-		if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
++		if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
+ 			args.v3.ucEnable = ATOM_DISABLE;
+ 	} else if (ASIC_IS_DCE4(rdev)) {
+ 		args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+@@ -479,7 +511,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+ 	} else if (ASIC_IS_AVIVO(rdev)) {
+ 		if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
+ 		    (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+-			atombios_disable_ss(crtc);
++			atombios_disable_ss(rdev, pll_id);
+ 			return;
+ 		}
+ 		args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+@@ -491,7 +523,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+ 	} else {
+ 		if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
+ 		    (ss->type & ATOM_EXTERNAL_SS_MASK)) {
+-			atombios_disable_ss(crtc);
++			atombios_disable_ss(rdev, pll_id);
+ 			return;
+ 		}
+ 		args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
+@@ -523,6 +555,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ 	int encoder_mode = 0;
+ 	u32 dp_clock = mode->clock;
+ 	int bpc = 8;
++	bool is_duallink = false;
+ 
+ 	/* reset the pll flags */
+ 	pll->flags = 0;
+@@ -542,9 +575,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ 		if (rdev->family < CHIP_RV770)
+ 			pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+ 		/* use frac fb div on APUs */
+-		if (ASIC_IS_DCE41(rdev))
+-			pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+-		if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
++		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+ 			pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ 	} else {
+ 		pll->flags |= RADEON_PLL_LEGACY;
+@@ -559,9 +590,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ 		if (encoder->crtc == crtc) {
+ 			radeon_encoder = to_radeon_encoder(encoder);
+ 			connector = radeon_get_connector_for_encoder(encoder);
+-			if (connector)
+-				bpc = connector->display_info.bpc;
++			/* if (connector && connector->display_info.bpc)
++				bpc = connector->display_info.bpc; */
+ 			encoder_mode = atombios_get_encoder_mode(encoder);
++			is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
+ 			if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+ 			    (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
+ 				if (connector) {
+@@ -657,7 +689,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ 					if (dig->coherent_mode)
+ 						args.v3.sInput.ucDispPllConfig |=
+ 							DISPPLL_CONFIG_COHERENT_MODE;
+-					if (mode->clock > 165000)
++					if (is_duallink)
+ 						args.v3.sInput.ucDispPllConfig |=
+ 							DISPPLL_CONFIG_DUAL_LINK;
+ 				}
+@@ -707,11 +739,9 @@ union set_pixel_clock {
+ /* on DCE5, make sure the voltage is high enough to support the
+  * required disp clk.
+  */
+-static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
++static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
+ 				    u32 dispclk)
+ {
+-	struct drm_device *dev = crtc->dev;
+-	struct radeon_device *rdev = dev->dev_private;
+ 	u8 frev, crev;
+ 	int index;
+ 	union set_pixel_clock args;
+@@ -739,7 +769,12 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
+ 			 * SetPixelClock provides the dividers
+ 			 */
+ 			args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
+-			args.v6.ucPpll = ATOM_DCPLL;
++			if (ASIC_IS_DCE61(rdev))
++				args.v6.ucPpll = ATOM_EXT_PLL1;
++			else if (ASIC_IS_DCE6(rdev))
++				args.v6.ucPpll = ATOM_PPLL0;
++			else
++				args.v6.ucPpll = ATOM_DCPLL;
+ 			break;
+ 		default:
+ 			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+@@ -932,7 +967,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+ 		struct radeon_connector_atom_dig *dig_connector =
+ 			radeon_connector->con_priv;
+ 		int dp_clock;
+-		bpc = connector->display_info.bpc;
++
++		/* if (connector->display_info.bpc)
++			bpc = connector->display_info.bpc; */
+ 
+ 		switch (encoder_mode) {
+ 		case ATOM_ENCODER_MODE_DP_MST:
+@@ -1001,7 +1038,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+ 		radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ 					  &ref_div, &post_div);
+ 
+-	atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
++	atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
+ 
+ 	atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+ 				  encoder_mode, radeon_encoder->encoder_id, mode->clock,
+@@ -1024,7 +1061,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
+ 			ss.step = step_size;
+ 		}
+ 
+-		atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
++		atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
+ 	}
+ }
+ 
+@@ -1041,6 +1078,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+ 	struct radeon_bo *rbo;
+ 	uint64_t fb_location;
+ 	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
++	unsigned bankw, bankh, mtaspect, tile_split;
+ 	u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
+ 	u32 tmp, viewport_w, viewport_h;
+ 	int r;
+@@ -1131,20 +1169,13 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+ 			break;
+ 		}
+ 
+-		switch ((tmp & 0xf000) >> 12) {
+-		case 0: /* 1KB rows */
+-		default:
+-			fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+-			break;
+-		case 1: /* 2KB rows */
+-			fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+-			break;
+-		case 2: /* 4KB rows */
+-			fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+-			break;
+-		}
+-
+ 		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
++
++		evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
++		fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
++		fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
++		fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
++		fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
+ 	} else if (tiling_flags & RADEON_TILING_MICRO)
+ 		fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+ 
+@@ -1189,7 +1220,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+ 	WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+ 	WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
+ 
+-	fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
++	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ 	WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+ 	WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+ 
+@@ -1358,7 +1389,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
+ 	WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
+ 	WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
+ 
+-	fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
++	fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ 	WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+ 	WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+ 
+@@ -1460,7 +1491,36 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ 	struct drm_crtc *test_crtc;
+ 	uint32_t pll_in_use = 0;
+ 
+-	if (ASIC_IS_DCE4(rdev)) {
++	if (ASIC_IS_DCE61(rdev)) {
++		list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
++			if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
++				struct radeon_encoder *test_radeon_encoder =
++					to_radeon_encoder(test_encoder);
++				struct radeon_encoder_atom_dig *dig =
++					test_radeon_encoder->enc_priv;
++
++				if ((test_radeon_encoder->encoder_id ==
++				     ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
++				    (dig->linkb == false)) /* UNIPHY A uses PPLL2 */
++					return ATOM_PPLL2;
++			}
++		}
++		/* UNIPHY B/C/D/E/F */
++		list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
++			struct radeon_crtc *radeon_test_crtc;
++
++			if (crtc == test_crtc)
++				continue;
++
++			radeon_test_crtc = to_radeon_crtc(test_crtc);
++			if ((radeon_test_crtc->pll_id == ATOM_PPLL0) ||
++			    (radeon_test_crtc->pll_id == ATOM_PPLL1))
++				pll_in_use |= (1 << radeon_test_crtc->pll_id);
++		}
++		if (!(pll_in_use & 4))
++			return ATOM_PPLL0;
++		return ATOM_PPLL1;
++	} else if (ASIC_IS_DCE4(rdev)) {
+ 		list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ 			if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
+ 				/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
+@@ -1475,6 +1535,8 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ 				if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
+ 					if (rdev->clock.dp_extclk)
+ 						return ATOM_PPLL_INVALID;
++					else if (ASIC_IS_DCE6(rdev))
++						return ATOM_PPLL0;
+ 					else if (ASIC_IS_DCE5(rdev))
+ 						return ATOM_DCPLL;
+ 				}
+@@ -1501,6 +1563,26 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+ 
+ }
+ 
++void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
++{
++	/* always set DCPLL */
++	if (ASIC_IS_DCE6(rdev))
++		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
++	else if (ASIC_IS_DCE4(rdev)) {
++		struct radeon_atom_ss ss;
++		bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
++								   ASIC_INTERNAL_SS_ON_DCPLL,
++								   rdev->clock.default_dispclk);
++		if (ss_enabled)
++			atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
++		/* XXX: DCE5, make sure voltage, dispclk is high enough */
++		atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
++		if (ss_enabled)
++			atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
++	}
++
++}
++
+ int atombios_crtc_mode_set(struct drm_crtc *crtc,
+ 			   struct drm_display_mode *mode,
+ 			   struct drm_display_mode *adjusted_mode,
+@@ -1522,19 +1604,6 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
+ 		}
+ 	}
+ 
+-	/* always set DCPLL */
+-	if (ASIC_IS_DCE4(rdev)) {
+-		struct radeon_atom_ss ss;
+-		bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+-								   ASIC_INTERNAL_SS_ON_DCPLL,
+-								   rdev->clock.default_dispclk);
+-		if (ss_enabled)
+-			atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
+-		/* XXX: DCE5, make sure voltage, dispclk is high enough */
+-		atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
+-		if (ss_enabled)
+-			atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
+-	}
+ 	atombios_crtc_set_pll(crtc, adjusted_mode);
+ 
+ 	if (ASIC_IS_DCE4(rdev))
+@@ -1568,18 +1637,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+ static void atombios_crtc_prepare(struct drm_crtc *crtc)
+ {
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
+ 
++	radeon_crtc->in_mode_set = true;
+ 	/* pick pll */
+ 	radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+ 
++	/* disable crtc pair power gating before programming */
++	if (ASIC_IS_DCE6(rdev))
++		atombios_powergate_crtc(crtc, ATOM_DISABLE);
++
+ 	atombios_lock_crtc(crtc, ATOM_ENABLE);
+ 	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ }
+ 
+ static void atombios_crtc_commit(struct drm_crtc *crtc)
+ {
++	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++
+ 	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ 	atombios_lock_crtc(crtc, ATOM_DISABLE);
++	radeon_crtc->in_mode_set = false;
+ }
+ 
+ static void atombios_crtc_disable(struct drm_crtc *crtc)
+@@ -1611,6 +1690,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
+ 		atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+ 					  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ 		break;
++	case ATOM_PPLL0:
++		/* disable the ppll */
++		if (ASIC_IS_DCE61(rdev))
++			atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
++						  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
++		break;
+ 	default:
+ 		break;
+ 	}
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 3254d51..886b41f 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -64,12 +64,12 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
+ 
+ 	memset(&args, 0, sizeof(args));
+ 
+-	base = (unsigned char *)rdev->mode_info.atom_context->scratch;
++	base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
+ 
+ 	memcpy(base, send, send_bytes);
+ 
+-	args.v1.lpAuxRequest = 0;
+-	args.v1.lpDataOut = 16;
++	args.v1.lpAuxRequest = 0 + 4;
++	args.v1.lpDataOut = 16 + 4;
+ 	args.v1.ucDataOutLen = 0;
+ 	args.v1.ucChannelID = chan->rec.i2c_id;
+ 	args.v1.ucDelay = delay / 10;
+@@ -406,10 +406,13 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
+ /* get bpc from the EDID */
+ static int convert_bpc_to_bpp(int bpc)
+ {
++#if 0
+ 	if (bpc == 0)
+ 		return 24;
+ 	else
+ 		return bpc * 3;
++#endif
++	return 24;
+ }
+ 
+ /* get the max pix clock supported by the link rate and lane num */
+@@ -746,7 +749,8 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
+ 
+ 	/* set the lane count on the sink */
+ 	tmp = dp_info->dp_lane_count;
+-	if (dp_info->dpcd[0] >= 0x11)
++	if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
++	    dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+ 		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ 	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index f0dc04b..23e3ea6 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -57,22 +57,6 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+ 	}
+ }
+ 
+-static struct drm_connector *
+-radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
+-{
+-	struct drm_device *dev = encoder->dev;
+-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+-	struct drm_connector *connector;
+-	struct radeon_connector *radeon_connector;
+-
+-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+-		radeon_connector = to_radeon_connector(connector);
+-		if (radeon_encoder->devices & radeon_connector->devices)
+-			return connector;
+-	}
+-	return NULL;
+-}
+-
+ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ 				   struct drm_display_mode *mode,
+ 				   struct drm_display_mode *adjusted_mode)
+@@ -257,7 +241,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
+ 			/* R4xx, R5xx */
+ 			args.ext_tmds.sXTmdsEncoder.ucEnable = action;
+ 
+-			if (radeon_encoder->pixel_clock > 165000)
++			if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 				args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ 
+ 			args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+@@ -269,7 +253,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
+ 			/* DFP1, CRT1, TV1 depending on the type of port */
+ 			args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+ 
+-			if (radeon_encoder->pixel_clock > 165000)
++			if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 				args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+ 			break;
+ 		case 3:
+@@ -353,7 +337,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
+ 			} else {
+ 				if (dig->linkb)
+ 					args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+-				if (radeon_encoder->pixel_clock > 165000)
++				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ 				/*if (pScrn->rgbBits == 8) */
+ 				args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+@@ -392,7 +376,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
+ 			} else {
+ 				if (dig->linkb)
+ 					args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+-				if (radeon_encoder->pixel_clock > 165000)
++				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+ 			}
+ 			break;
+@@ -413,8 +397,6 @@ int
+ atombios_get_encoder_mode(struct drm_encoder *encoder)
+ {
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+-	struct drm_device *dev = encoder->dev;
+-	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_connector *connector;
+ 	struct radeon_connector *radeon_connector;
+ 	struct radeon_connector_atom_dig *dig_connector;
+@@ -438,13 +420,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
+ 	switch (connector->connector_type) {
+ 	case DRM_MODE_CONNECTOR_DVII:
+ 	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
+-		if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+-			/* fix me */
+-			if (ASIC_IS_DCE4(rdev))
+-				return ATOM_ENCODER_MODE_DVI;
+-			else
+-				return ATOM_ENCODER_MODE_HDMI;
+-		} else if (radeon_connector->use_digital)
++		if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
++		    radeon_audio)
++			return ATOM_ENCODER_MODE_HDMI;
++		else if (radeon_connector->use_digital)
+ 			return ATOM_ENCODER_MODE_DVI;
+ 		else
+ 			return ATOM_ENCODER_MODE_CRT;
+@@ -452,13 +431,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
+ 	case DRM_MODE_CONNECTOR_DVID:
+ 	case DRM_MODE_CONNECTOR_HDMIA:
+ 	default:
+-		if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+-			/* fix me */
+-			if (ASIC_IS_DCE4(rdev))
+-				return ATOM_ENCODER_MODE_DVI;
+-			else
+-				return ATOM_ENCODER_MODE_HDMI;
+-		} else
++		if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
++		    radeon_audio)
++			return ATOM_ENCODER_MODE_HDMI;
++		else
+ 			return ATOM_ENCODER_MODE_DVI;
+ 		break;
+ 	case DRM_MODE_CONNECTOR_LVDS:
+@@ -469,13 +445,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
+ 		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ 		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ 			return ATOM_ENCODER_MODE_DP;
+-		else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+-			/* fix me */
+-			if (ASIC_IS_DCE4(rdev))
+-				return ATOM_ENCODER_MODE_DVI;
+-			else
+-				return ATOM_ENCODER_MODE_HDMI;
+-		} else
++		else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
++			 radeon_audio)
++			return ATOM_ENCODER_MODE_HDMI;
++		else
+ 			return ATOM_ENCODER_MODE_DVI;
+ 		break;
+ 	case DRM_MODE_CONNECTOR_eDP:
+@@ -510,7 +483,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
+  * - 2 DIG encoder blocks.
+  * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+  *
+- * DCE 4.0/5.0
++ * DCE 4.0/5.0/6.0
+  * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+  * Supports up to 6 digital outputs
+  * - 6 DIG encoder blocks.
+@@ -526,7 +499,11 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
+  * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+  * Supports up to 6 digital outputs
+  * - 2 DIG encoder blocks.
++ * llano
+  * DIG1/2 can drive UNIPHY0/1/2 link A or link B
++ * ontario
++ * DIG1 drives UNIPHY0/1/2 link A
++ * DIG2 drives UNIPHY0/1/2 link B
+  *
+  * Routing
+  * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+@@ -568,7 +545,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
+ 		dp_clock = dig_connector->dp_clock;
+ 		dp_lane_count = dig_connector->dp_lane_count;
+ 		hpd_id = radeon_connector->hpd.hpd;
+-		bpc = connector->display_info.bpc;
++		/* bpc = connector->display_info.bpc; */
+ 	}
+ 
+ 	/* no dig encoder assigned */
+@@ -602,7 +579,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
+ 
+ 			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+ 				args.v1.ucLaneNum = dp_lane_count;
+-			else if (radeon_encoder->pixel_clock > 165000)
++			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 				args.v1.ucLaneNum = 8;
+ 			else
+ 				args.v1.ucLaneNum = 4;
+@@ -637,7 +614,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
+ 
+ 			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+ 				args.v3.ucLaneNum = dp_lane_count;
+-			else if (radeon_encoder->pixel_clock > 165000)
++			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 				args.v3.ucLaneNum = 8;
+ 			else
+ 				args.v3.ucLaneNum = 4;
+@@ -677,7 +654,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
+ 
+ 			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+ 				args.v4.ucLaneNum = dp_lane_count;
+-			else if (radeon_encoder->pixel_clock > 165000)
++			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 				args.v4.ucLaneNum = 8;
+ 			else
+ 				args.v4.ucLaneNum = 4;
+@@ -734,6 +711,7 @@ union dig_transmitter_control {
+ 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+ 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+ 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
++	DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
+ };
+ 
+ void
+@@ -754,6 +732,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 	int connector_object_id = 0;
+ 	int igp_lane_info = 0;
+ 	int dig_encoder = dig->dig_encoder;
++	int hpd_id = RADEON_HPD_NONE;
+ 
+ 	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ 		connector = radeon_get_connector_for_encoder_init(encoder);
+@@ -769,6 +748,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 		struct radeon_connector_atom_dig *dig_connector =
+ 			radeon_connector->con_priv;
+ 
++		hpd_id = radeon_connector->hpd.hpd;
+ 		dp_clock = dig_connector->dp_clock;
+ 		dp_lane_count = dig_connector->dp_lane_count;
+ 		connector_object_id =
+@@ -821,7 +801,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 				if (is_dp)
+ 					args.v1.usPixelClock =
+ 						cpu_to_le16(dp_clock / 10);
+-				else if (radeon_encoder->pixel_clock > 165000)
++				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 					args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ 				else
+ 					args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+@@ -836,7 +816,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 
+ 			if ((rdev->flags & RADEON_IS_IGP) &&
+ 			    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+-				if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
++				if (is_dp ||
++				    !radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) {
+ 					if (igp_lane_info & 0x1)
+ 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ 					else if (igp_lane_info & 0x2)
+@@ -863,7 +844,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ 				if (dig->coherent_mode)
+ 					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+-				if (radeon_encoder->pixel_clock > 165000)
++				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+ 			}
+ 			break;
+@@ -878,7 +859,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 				if (is_dp)
+ 					args.v2.usPixelClock =
+ 						cpu_to_le16(dp_clock / 10);
+-				else if (radeon_encoder->pixel_clock > 165000)
++				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 					args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ 				else
+ 					args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+@@ -906,7 +887,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 			} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ 				if (dig->coherent_mode)
+ 					args.v2.acConfig.fCoherentMode = 1;
+-				if (radeon_encoder->pixel_clock > 165000)
++				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 					args.v2.acConfig.fDualLinkConnector = 1;
+ 			}
+ 			break;
+@@ -921,7 +902,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 				if (is_dp)
+ 					args.v3.usPixelClock =
+ 						cpu_to_le16(dp_clock / 10);
+-				else if (radeon_encoder->pixel_clock > 165000)
++				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 					args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ 				else
+ 					args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+@@ -929,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 
+ 			if (is_dp)
+ 				args.v3.ucLaneNum = dp_lane_count;
+-			else if (radeon_encoder->pixel_clock > 165000)
++			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 				args.v3.ucLaneNum = 8;
+ 			else
+ 				args.v3.ucLaneNum = 4;
+@@ -966,7 +947,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ 				if (dig->coherent_mode)
+ 					args.v3.acConfig.fCoherentMode = 1;
+-				if (radeon_encoder->pixel_clock > 165000)
++				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 					args.v3.acConfig.fDualLinkConnector = 1;
+ 			}
+ 			break;
+@@ -981,7 +962,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 				if (is_dp)
+ 					args.v4.usPixelClock =
+ 						cpu_to_le16(dp_clock / 10);
+-				else if (radeon_encoder->pixel_clock > 165000)
++				else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 					args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ 				else
+ 					args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+@@ -989,7 +970,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 
+ 			if (is_dp)
+ 				args.v4.ucLaneNum = dp_lane_count;
+-			else if (radeon_encoder->pixel_clock > 165000)
++			else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 				args.v4.ucLaneNum = 8;
+ 			else
+ 				args.v4.ucLaneNum = 4;
+@@ -1029,10 +1010,64 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
+ 			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ 				if (dig->coherent_mode)
+ 					args.v4.acConfig.fCoherentMode = 1;
+-				if (radeon_encoder->pixel_clock > 165000)
++				if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 					args.v4.acConfig.fDualLinkConnector = 1;
+ 			}
+ 			break;
++		case 5:
++			args.v5.ucAction = action;
++			if (is_dp)
++				args.v5.usSymClock = cpu_to_le16(dp_clock / 10);
++			else
++				args.v5.usSymClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
++
++			switch (radeon_encoder->encoder_id) {
++			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++				if (dig->linkb)
++					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB;
++				else
++					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA;
++				break;
++			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++				if (dig->linkb)
++					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD;
++				else
++					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC;
++				break;
++			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++				if (dig->linkb)
++					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF;
++				else
++					args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
++				break;
++			}
++			if (is_dp)
++				args.v5.ucLaneNum = dp_lane_count;
++			else if (radeon_encoder->pixel_clock > 165000)
++				args.v5.ucLaneNum = 8;
++			else
++				args.v5.ucLaneNum = 4;
++			args.v5.ucConnObjId = connector_object_id;
++			args.v5.ucDigMode = atombios_get_encoder_mode(encoder);
++
++			if (is_dp && rdev->clock.dp_extclk)
++				args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK;
++			else
++				args.v5.asConfig.ucPhyClkSrcId = pll_id;
++
++			if (is_dp)
++				args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */
++			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
++				if (dig->coherent_mode)
++					args.v5.asConfig.ucCoherentMode = 1;
++			}
++			if (hpd_id == RADEON_HPD_NONE)
++				args.v5.asConfig.ucHPDSel = 0;
++			else
++				args.v5.asConfig.ucHPDSel = hpd_id + 1;
++			args.v5.ucDigEncoderSel = 1 << dig_encoder;
++			args.v5.ucDPLaneSet = lane_set;
++			break;
+ 		default:
+ 			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ 			break;
+@@ -1128,7 +1163,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
+ 		dp_lane_count = dig_connector->dp_lane_count;
+ 		connector_object_id =
+ 			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+-		bpc = connector->display_info.bpc;
++		/* bpc = connector->display_info.bpc; */
+ 	}
+ 
+ 	memset(&args, 0, sizeof(args));
+@@ -1152,7 +1187,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
+ 				if (dp_clock == 270000)
+ 					args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ 				args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+-			} else if (radeon_encoder->pixel_clock > 165000)
++			} else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 				args.v1.sDigEncoder.ucLaneNum = 8;
+ 			else
+ 				args.v1.sDigEncoder.ucLaneNum = 4;
+@@ -1171,7 +1206,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
+ 				else if (dp_clock == 540000)
+ 					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+ 				args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+-			} else if (radeon_encoder->pixel_clock > 165000)
++			} else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
+ 				args.v3.sExtEncoder.ucLaneNum = 8;
+ 			else
+ 				args.v3.sExtEncoder.ucLaneNum = 4;
+@@ -1369,7 +1404,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ 						   ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+ 						   dig->panel_mode);
+ 			if (ext_encoder) {
+-				if (ASIC_IS_DCE41(rdev))
++				if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+ 					atombios_external_encoder_setup(encoder, ext_encoder,
+ 									EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+ 			}
+@@ -1444,7 +1479,7 @@ radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
+ 	switch (mode) {
+ 	case DRM_MODE_DPMS_ON:
+ 	default:
+-		if (ASIC_IS_DCE41(rdev)) {
++		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
+ 			atombios_external_encoder_setup(encoder, ext_encoder,
+ 							EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+ 			atombios_external_encoder_setup(encoder, ext_encoder,
+@@ -1455,7 +1490,7 @@ radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
+ 	case DRM_MODE_DPMS_STANDBY:
+ 	case DRM_MODE_DPMS_SUSPEND:
+ 	case DRM_MODE_DPMS_OFF:
+-		if (ASIC_IS_DCE41(rdev)) {
++		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
+ 			atombios_external_encoder_setup(encoder, ext_encoder,
+ 							EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+ 			atombios_external_encoder_setup(encoder, ext_encoder,
+@@ -1734,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct drm_encoder *test_encoder;
+-	struct radeon_encoder_atom_dig *dig;
++	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ 	uint32_t dig_enc_in_use = 0;
+ 
+-	/* DCE4/5 */
+-	if (ASIC_IS_DCE4(rdev)) {
+-		dig = radeon_encoder->enc_priv;
+-		if (ASIC_IS_DCE41(rdev)) {
++	if (ASIC_IS_DCE6(rdev)) {
++		/* DCE6 */
++		switch (radeon_encoder->encoder_id) {
++		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++			if (dig->linkb)
++				return 1;
++			else
++				return 0;
++			break;
++		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++			if (dig->linkb)
++				return 3;
++			else
++				return 2;
++			break;
++		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++			if (dig->linkb)
++				return 5;
++			else
++				return 4;
++			break;
++		}
++	} else if (ASIC_IS_DCE4(rdev)) {
++		/* DCE4/5 */
++		if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
+ 			/* ontario follows DCE4 */
+ 			if (rdev->family == CHIP_PALM) {
+ 				if (dig->linkb)
+@@ -1828,7 +1884,7 @@ radeon_atom_encoder_init(struct radeon_device *rdev)
+ 			break;
+ 		}
+ 
+-		if (ext_encoder && ASIC_IS_DCE41(rdev))
++		if (ext_encoder && (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)))
+ 			atombios_external_encoder_setup(encoder, ext_encoder,
+ 							EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+ 	}
+diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
+new file mode 100644
+index 0000000..44d87b6
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/atombios_i2c.c
+@@ -0,0 +1,139 @@
++/*
++ * Copyright 2011 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Alex Deucher
++ *
++ */
++#include "drmP.h"
++#include "radeon_drm.h"
++#include "radeon.h"
++#include "atom.h"
++
++#define TARGET_HW_I2C_CLOCK 50
++
++/* these are a limitation of ProcessI2cChannelTransaction not the hw */
++#define ATOM_MAX_HW_I2C_WRITE 2
++#define ATOM_MAX_HW_I2C_READ  255
++
++static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
++				 u8 slave_addr, u8 flags,
++				 u8 *buf, u8 num)
++{
++	struct drm_device *dev = chan->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
++	int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
++	unsigned char *base;
++	u16 out;
++
++	memset(&args, 0, sizeof(args));
++
++	base = (unsigned char *)rdev->mode_info.atom_context->scratch;
++
++	if (flags & HW_I2C_WRITE) {
++		if (num > ATOM_MAX_HW_I2C_WRITE) {
++			DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
++			return -EINVAL;
++		}
++		memcpy(&out, buf, num);
++		args.lpI2CDataOut = cpu_to_le16(out);
++	} else {
++		if (num > ATOM_MAX_HW_I2C_READ) {
++			DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
++			return -EINVAL;
++		}
++	}
++
++	args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
++	args.ucRegIndex = 0;
++	args.ucTransBytes = num;
++	args.ucSlaveAddr = slave_addr << 1;
++	args.ucLineNumber = chan->rec.i2c_id;
++
++	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
++
++	/* error */
++	if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
++		DRM_DEBUG_KMS("hw_i2c error\n");
++		return -EIO;
++	}
++
++	if (!(flags & HW_I2C_WRITE))
++		memcpy(buf, base, num);
++
++	return 0;
++}
++
++int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
++			    struct i2c_msg *msgs, int num)
++{
++	struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
++	struct i2c_msg *p;
++	int i, remaining, current_count, buffer_offset, max_bytes, ret;
++	u8 buf = 0, flags;
++
++	/* check for bus probe */
++	p = &msgs[0];
++	if ((num == 1) && (p->len == 0)) {
++		ret = radeon_process_i2c_ch(i2c,
++					    p->addr, HW_I2C_WRITE,
++					    &buf, 1);
++		if (ret)
++			return ret;
++		else
++			return num;
++	}
++
++	for (i = 0; i < num; i++) {
++		p = &msgs[i];
++		remaining = p->len;
++		buffer_offset = 0;
++		/* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
++		if (p->flags & I2C_M_RD) {
++			max_bytes = ATOM_MAX_HW_I2C_READ;
++			flags = HW_I2C_READ;
++		} else {
++			max_bytes = ATOM_MAX_HW_I2C_WRITE;
++			flags = HW_I2C_WRITE;
++		}
++		while (remaining) {
++			if (remaining > max_bytes)
++				current_count = max_bytes;
++			else
++				current_count = remaining;
++			ret = radeon_process_i2c_ch(i2c,
++						    p->addr, flags,
++						    &p->buf[buffer_offset], current_count);
++			if (ret)
++				return ret;
++			remaining -= current_count;
++			buffer_offset += current_count;
++		}
++	}
++
++	return num;
++}
++
++u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap)
++{
++	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
++}
++
+diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+index 7b4eeb7..19a0114 100644
+--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
++++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+@@ -24,6 +24,7 @@
+  *     Alex Deucher <alexander.deucher at amd.com>
+  */
+ 
++#include <linux/bug.h>
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ 
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index 0977849..4a1d8f3 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -50,6 +50,39 @@ static const u32 crtc_offsets[6] =
+ static void evergreen_gpu_init(struct radeon_device *rdev);
+ void evergreen_fini(struct radeon_device *rdev);
+ void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
++extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
++				     int ring, u32 cp_int_cntl);
++
++void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
++			     unsigned *bankh, unsigned *mtaspect,
++			     unsigned *tile_split)
++{
++	*bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
++	*bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
++	*mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
++	*tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
++	switch (*bankw) {
++	default:
++	case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
++	case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
++	case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
++	case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
++	}
++	switch (*bankh) {
++	default:
++	case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
++	case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
++	case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
++	case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
++	}
++	switch (*mtaspect) {
++	default:
++	case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
++	case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
++	case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
++	case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
++	}
++}
+ 
+ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
+ {
+@@ -560,7 +593,7 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
+ 	return 0;
+ }
+ 
+-static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
++u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
+ {
+ 	u32 tmp = RREG32(MC_SHARED_CHMAP);
+ 
+@@ -1109,11 +1142,20 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ 		crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
+ 		if (crtc_enabled) {
+ 			save->crtc_enabled[i] = true;
+-			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+-			if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
+-				dce4_wait_for_vblank(rdev, i);
+-				tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+-				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++			if (ASIC_IS_DCE6(rdev)) {
++				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
++				if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
++					radeon_wait_for_vblank(rdev, i);
++					tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
++					WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
++				}
++			} else {
++				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
++				if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
++					radeon_wait_for_vblank(rdev, i);
++					tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++					WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++				}
+ 			}
+ 			/* wait for the next frame */
+ 			frame_count = radeon_get_vblank_counter(rdev, i);
+@@ -1127,7 +1169,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ 		}
+ 	}
+ 
+-	evergreen_mc_wait_for_idle(rdev);
++	radeon_mc_wait_for_idle(rdev);
+ 
+ 	blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+ 	if ((blackout & BLACKOUT_MODE_MASK) != 1) {
+@@ -1166,10 +1208,16 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
+ 	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
+ 
+ 	for (i = 0; i < rdev->num_crtc; i++) {
+-		if (save->crtc_enabled[i]) {
+-			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+-			tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
+-			WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++		if (save->crtc_enabled) {
++			if (ASIC_IS_DCE6(rdev)) {
++				tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
++				tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
++				WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
++			} else {
++				tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
++				tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
++				WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
++			}
+ 			/* wait for the next frame */
+ 			frame_count = radeon_get_vblank_counter(rdev, i);
+ 			for (j = 0; j < rdev->usec_timeout; j++) {
+@@ -1229,7 +1277,10 @@ void evergreen_mc_program(struct radeon_device *rdev)
+ 			rdev->mc.vram_end >> 12);
+ 	}
+ 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
+-	if (rdev->flags & RADEON_IS_IGP) {
++	/* llano/ontario only */
++	if ((rdev->family == CHIP_PALM) ||
++	    (rdev->family == CHIP_SUMO) ||
++	    (rdev->family == CHIP_SUMO2)) {
+ 		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+ 		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+ 		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
+@@ -1264,18 +1315,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
+  */
+ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+ {
++	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
++
+ 	/* set to DX10/11 mode */
+-	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+-	radeon_ring_write(rdev, 1);
++	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
++	radeon_ring_write(ring, 1);
+ 	/* FIXME: implement */
+-	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
++	radeon_ring_write(ring,
+ #ifdef __BIG_ENDIAN
+ 			  (2 << 0) |
+ #endif
+ 			  (ib->gpu_addr & 0xFFFFFFFC));
+-	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
+-	radeon_ring_write(rdev, ib->length_dw);
++	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
++	radeon_ring_write(ring, ib->length_dw);
+ }
+ 
+ 
+@@ -1313,71 +1366,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
+ 
+ static int evergreen_cp_start(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	int r, i;
+ 	uint32_t cp_me;
+ 
+-	r = radeon_ring_lock(rdev, 7);
++	r = radeon_ring_lock(rdev, ring, 7);
+ 	if (r) {
+ 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ 		return r;
+ 	}
+-	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+-	radeon_ring_write(rdev, 0x1);
+-	radeon_ring_write(rdev, 0x0);
+-	radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
+-	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
++	radeon_ring_write(ring, 0x1);
++	radeon_ring_write(ring, 0x0);
++	radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
++	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_unlock_commit(rdev, ring);
+ 
+ 	cp_me = 0xff;
+ 	WREG32(CP_ME_CNTL, cp_me);
+ 
+-	r = radeon_ring_lock(rdev, evergreen_default_size + 19);
++	r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
+ 	if (r) {
+ 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ 		return r;
+ 	}
+ 
+ 	/* setup clear context state */
+-	radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+-	radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
++	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
++	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ 
+ 	for (i = 0; i < evergreen_default_size; i++)
+-		radeon_ring_write(rdev, evergreen_default_state[i]);
++		radeon_ring_write(ring, evergreen_default_state[i]);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+-	radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
++	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
++	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ 
+ 	/* set clear context state */
+-	radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
++	radeon_ring_write(ring, 0);
+ 
+ 	/* SQ_VTX_BASE_VTX_LOC */
+-	radeon_ring_write(rdev, 0xc0026f00);
+-	radeon_ring_write(rdev, 0x00000000);
+-	radeon_ring_write(rdev, 0x00000000);
+-	radeon_ring_write(rdev, 0x00000000);
++	radeon_ring_write(ring, 0xc0026f00);
++	radeon_ring_write(ring, 0x00000000);
++	radeon_ring_write(ring, 0x00000000);
++	radeon_ring_write(ring, 0x00000000);
+ 
+ 	/* Clear consts */
+-	radeon_ring_write(rdev, 0xc0036f00);
+-	radeon_ring_write(rdev, 0x00000bc4);
+-	radeon_ring_write(rdev, 0xffffffff);
+-	radeon_ring_write(rdev, 0xffffffff);
+-	radeon_ring_write(rdev, 0xffffffff);
++	radeon_ring_write(ring, 0xc0036f00);
++	radeon_ring_write(ring, 0x00000bc4);
++	radeon_ring_write(ring, 0xffffffff);
++	radeon_ring_write(ring, 0xffffffff);
++	radeon_ring_write(ring, 0xffffffff);
+ 
+-	radeon_ring_write(rdev, 0xc0026900);
+-	radeon_ring_write(rdev, 0x00000316);
+-	radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+-	radeon_ring_write(rdev, 0x00000010); /*  */
++	radeon_ring_write(ring, 0xc0026900);
++	radeon_ring_write(ring, 0x00000316);
++	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
++	radeon_ring_write(ring, 0x00000010); /*  */
+ 
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_unlock_commit(rdev, ring);
+ 
+ 	return 0;
+ }
+ 
+ int evergreen_cp_resume(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 tmp;
+ 	u32 rb_bufsz;
+ 	int r;
+@@ -1395,13 +1450,14 @@ int evergreen_cp_resume(struct radeon_device *rdev)
+ 	RREG32(GRBM_SOFT_RESET);
+ 
+ 	/* Set ring buffer size */
+-	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
++	rb_bufsz = drm_order(ring->ring_size / 8);
+ 	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ #ifdef __BIG_ENDIAN
+ 	tmp |= BUF_SWAP_32BIT;
+ #endif
+ 	WREG32(CP_RB_CNTL, tmp);
+-	WREG32(CP_SEM_WAIT_TIMER, 0x4);
++	WREG32(CP_SEM_WAIT_TIMER, 0x0);
++	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+ 
+ 	/* Set the write pointer delay */
+ 	WREG32(CP_RB_WPTR_DELAY, 0);
+@@ -1409,8 +1465,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
+ 	/* Initialize the ring buffer's read and write pointers */
+ 	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+ 	WREG32(CP_RB_RPTR_WR, 0);
+-	rdev->cp.wptr = 0;
+-	WREG32(CP_RB_WPTR, rdev->cp.wptr);
++	ring->wptr = 0;
++	WREG32(CP_RB_WPTR, ring->wptr);
+ 
+ 	/* set the wb address wether it's enabled or not */
+ 	WREG32(CP_RB_RPTR_ADDR,
+@@ -1428,16 +1484,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
+ 	mdelay(1);
+ 	WREG32(CP_RB_CNTL, tmp);
+ 
+-	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
++	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+ 	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+ 
+-	rdev->cp.rptr = RREG32(CP_RB_RPTR);
++	ring->rptr = RREG32(CP_RB_RPTR);
+ 
+ 	evergreen_cp_start(rdev);
+-	rdev->cp.ready = true;
+-	r = radeon_ring_test(rdev);
++	ring->ready = true;
++	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+ 	if (r) {
+-		rdev->cp.ready = false;
++		ring->ready = false;
+ 		return r;
+ 	}
+ 	return 0;
+@@ -1730,7 +1786,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ 	case CHIP_SUMO:
+ 		rdev->config.evergreen.num_ses = 1;
+ 		rdev->config.evergreen.max_pipes = 4;
+-		rdev->config.evergreen.max_tile_pipes = 4;
++		rdev->config.evergreen.max_tile_pipes = 2;
+ 		if (rdev->pdev->device == 0x9648)
+ 			rdev->config.evergreen.max_simds = 3;
+ 		else if ((rdev->pdev->device == 0x9647) ||
+@@ -1819,7 +1875,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ 		break;
+ 	case CHIP_CAICOS:
+ 		rdev->config.evergreen.num_ses = 1;
+-		rdev->config.evergreen.max_pipes = 2;
++		rdev->config.evergreen.max_pipes = 4;
+ 		rdev->config.evergreen.max_tile_pipes = 2;
+ 		rdev->config.evergreen.max_simds = 2;
+ 		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+@@ -1868,7 +1924,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
+ 
+ 
+ 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+-	if (rdev->flags & RADEON_IS_IGP)
++	if ((rdev->family == CHIP_PALM) ||
++	    (rdev->family == CHIP_SUMO) ||
++	    (rdev->family == CHIP_SUMO2))
+ 		mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
+ 	else
+ 		mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+@@ -2272,7 +2330,9 @@ int evergreen_mc_init(struct radeon_device *rdev)
+ 
+ 	/* Get VRAM informations */
+ 	rdev->mc.vram_is_ddr = true;
+-	if (rdev->flags & RADEON_IS_IGP)
++	if ((rdev->family == CHIP_PALM) ||
++	    (rdev->family == CHIP_SUMO) ||
++	    (rdev->family == CHIP_SUMO2))
+ 		tmp = RREG32(FUS_MC_ARB_RAMCFG);
+ 	else
+ 		tmp = RREG32(MC_ARB_RAMCFG);
+@@ -2304,12 +2364,14 @@ int evergreen_mc_init(struct radeon_device *rdev)
+ 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
+ 	/* Setup GPU memory space */
+-	if (rdev->flags & RADEON_IS_IGP) {
++	if ((rdev->family == CHIP_PALM) ||
++	    (rdev->family == CHIP_SUMO) ||
++	    (rdev->family == CHIP_SUMO2)) {
+ 		/* size in bytes on fusion */
+ 		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+ 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+ 	} else {
+-		/* size in MB on evergreen */
++		/* size in MB on evergreen/cayman/tn */
+ 		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ 		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ 	}
+@@ -2320,7 +2382,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
+ 	return 0;
+ }
+ 
+-bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
++bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	u32 srbm_status;
+ 	u32 grbm_status;
+@@ -2333,19 +2395,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
+ 	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+ 	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+ 	if (!(grbm_status & GUI_ACTIVE)) {
+-		r100_gpu_lockup_update(lockup, &rdev->cp);
++		r100_gpu_lockup_update(lockup, ring);
+ 		return false;
+ 	}
+ 	/* force CP activities */
+-	r = radeon_ring_lock(rdev, 2);
++	r = radeon_ring_lock(rdev, ring, 2);
+ 	if (!r) {
+ 		/* PACKET2 NOP */
+-		radeon_ring_write(rdev, 0x80000000);
+-		radeon_ring_write(rdev, 0x80000000);
+-		radeon_ring_unlock_commit(rdev);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_unlock_commit(rdev, ring);
+ 	}
+-	rdev->cp.rptr = RREG32(CP_RB_RPTR);
+-	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
++	ring->rptr = RREG32(CP_RB_RPTR);
++	return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ }
+ 
+ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
+@@ -2437,7 +2499,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+ {
+ 	u32 tmp;
+ 
+-	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
++	if (rdev->family >= CHIP_CAYMAN) {
++		cayman_cp_int_cntl_setup(rdev, 0,
++					 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
++		cayman_cp_int_cntl_setup(rdev, 1, 0);
++		cayman_cp_int_cntl_setup(rdev, 2, 0);
++	} else
++		WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ 	WREG32(GRBM_INT_CNTL, 0);
+ 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+@@ -2461,7 +2529,9 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+ 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ 	}
+ 
+-	WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
++	/* only one DAC on DCE6 */
++	if (!ASIC_IS_DCE6(rdev))
++		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+ 	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
+ 
+ 	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+@@ -2482,6 +2552,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
+ int evergreen_irq_set(struct radeon_device *rdev)
+ {
+ 	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
++	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
+ 	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
+ 	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
+ 	u32 grbm_int_cntl = 0;
+@@ -2506,11 +2577,28 @@ int evergreen_irq_set(struct radeon_device *rdev)
+ 	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ 	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ 
+-	if (rdev->irq.sw_int) {
+-		DRM_DEBUG("evergreen_irq_set: sw int\n");
+-		cp_int_cntl |= RB_INT_ENABLE;
+-		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
++	if (rdev->family >= CHIP_CAYMAN) {
++		/* enable CP interrupts on all rings */
++		if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
++			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
++			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
++		}
++		if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
++			DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
++			cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
++		}
++		if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
++			DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
++			cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
++		}
++	} else {
++		if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
++			DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
++			cp_int_cntl |= RB_INT_ENABLE;
++			cp_int_cntl |= TIME_STAMP_INT_ENABLE;
++		}
+ 	}
++
+ 	if (rdev->irq.crtc_vblank_int[0] ||
+ 	    rdev->irq.pflip[0]) {
+ 		DRM_DEBUG("evergreen_irq_set: vblank 0\n");
+@@ -2570,7 +2658,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
+ 		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
+ 	}
+ 
+-	WREG32(CP_INT_CNTL, cp_int_cntl);
++	if (rdev->family >= CHIP_CAYMAN) {
++		cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
++		cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
++		cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
++	} else
++		WREG32(CP_INT_CNTL, cp_int_cntl);
+ 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
+ 
+ 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+@@ -2985,11 +3078,24 @@ restart_ih:
+ 		case 177: /* CP_INT in IB1 */
+ 		case 178: /* CP_INT in IB2 */
+ 			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+-			radeon_fence_process(rdev);
++			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ 			break;
+ 		case 181: /* CP EOP event */
+ 			DRM_DEBUG("IH: CP EOP\n");
+-			radeon_fence_process(rdev);
++			if (rdev->family >= CHIP_CAYMAN) {
++				switch (src_data) {
++				case 0:
++					radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
++					break;
++				case 1:
++					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
++					break;
++				case 2:
++					radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
++					break;
++				}
++			} else
++				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ 			break;
+ 		case 233: /* GUI IDLE */
+ 			DRM_DEBUG("IH: GUI idle\n");
+@@ -3019,6 +3125,7 @@ restart_ih:
+ 
+ static int evergreen_startup(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	int r;
+ 
+ 	/* enable pcie gen2 link */
+@@ -3064,7 +3171,7 @@ static int evergreen_startup(struct radeon_device *rdev)
+ 	r = evergreen_blit_init(rdev);
+ 	if (r) {
+ 		r600_blit_fini(rdev);
+-		rdev->asic->copy = NULL;
++		rdev->asic->copy.copy = NULL;
+ 		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ 	}
+ 
+@@ -3073,6 +3180,12 @@ static int evergreen_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	r = r600_irq_init(rdev);
+ 	if (r) {
+@@ -3082,7 +3195,9 @@ static int evergreen_startup(struct radeon_device *rdev)
+ 	}
+ 	evergreen_irq_set(rdev);
+ 
+-	r = radeon_ring_init(rdev, rdev->cp.ring_size);
++	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
++			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
++			     0, 0xfffff, RADEON_CP_PACKET2);
+ 	if (r)
+ 		return r;
+ 	r = evergreen_cp_load_microcode(rdev);
+@@ -3092,6 +3207,23 @@ static int evergreen_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
++	if (r) {
++		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
++		return r;
++	}
++
++	r = r600_audio_init(rdev);
++	if (r) {
++		DRM_ERROR("radeon: audio init failed\n");
++		return r;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -3111,15 +3243,11 @@ int evergreen_resume(struct radeon_device *rdev)
+ 	/* post card */
+ 	atom_asic_init(rdev->mode_info.atom_context);
+ 
++	rdev->accel_working = true;
+ 	r = evergreen_startup(rdev);
+ 	if (r) {
+ 		DRM_ERROR("evergreen startup failed on resume\n");
+-		return r;
+-	}
+-
+-	r = r600_ib_test(rdev);
+-	if (r) {
+-		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
+ 
+@@ -3129,13 +3257,17 @@ int evergreen_resume(struct radeon_device *rdev)
+ 
+ int evergreen_suspend(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++
++	r600_audio_fini(rdev);
+ 	/* FIXME: we should wait for ring to be empty */
++	radeon_ib_pool_suspend(rdev);
++	r600_blit_suspend(rdev);
+ 	r700_cp_stop(rdev);
+-	rdev->cp.ready = false;
++	ring->ready = false;
+ 	evergreen_irq_suspend(rdev);
+ 	radeon_wb_disable(rdev);
+ 	evergreen_pcie_gart_disable(rdev);
+-	r600_blit_suspend(rdev);
+ 
+ 	return 0;
+ }
+@@ -3210,8 +3342,8 @@ int evergreen_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
+-	rdev->cp.ring_obj = NULL;
+-	r600_ring_init(rdev, 1024 * 1024);
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
++	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ 
+ 	rdev->ih.ring_obj = NULL;
+ 	r600_ih_ring_init(rdev, 64 * 1024);
+@@ -3220,29 +3352,24 @@ int evergreen_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = evergreen_startup(rdev);
+ 	if (r) {
+ 		dev_err(rdev->dev, "disabling GPU acceleration\n");
+ 		r700_cp_fini(rdev);
+ 		r600_irq_fini(rdev);
+ 		radeon_wb_fini(rdev);
++		r100_ib_fini(rdev);
+ 		radeon_irq_kms_fini(rdev);
+ 		evergreen_pcie_gart_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+-	if (rdev->accel_working) {
+-		r = radeon_ib_pool_init(rdev);
+-		if (r) {
+-			DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+-			rdev->accel_working = false;
+-		}
+-		r = r600_ib_test(rdev);
+-		if (r) {
+-			DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+-			rdev->accel_working = false;
+-		}
+-	}
+ 
+ 	/* Don't start up if the MC ucode is missing on BTC parts.
+ 	 * The default clocks and voltages before the MC ucode
+@@ -3260,15 +3387,17 @@ int evergreen_init(struct radeon_device *rdev)
+ 
+ void evergreen_fini(struct radeon_device *rdev)
+ {
++	r600_audio_fini(rdev);
+ 	r600_blit_fini(rdev);
+ 	r700_cp_fini(rdev);
+ 	r600_irq_fini(rdev);
+ 	radeon_wb_fini(rdev);
+-	radeon_ib_pool_fini(rdev);
++	r100_ib_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	evergreen_pcie_gart_fini(rdev);
+ 	r600_vram_scratch_fini(rdev);
+ 	radeon_gem_fini(rdev);
++	radeon_semaphore_driver_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+ 	radeon_agp_fini(rdev);
+ 	radeon_bo_fini(rdev);
+diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+index 914e5af..222acd2 100644
+--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
++++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+@@ -32,23 +32,14 @@
+ #include "evergreend.h"
+ #include "evergreen_blit_shaders.h"
+ #include "cayman_blit_shaders.h"
+-
+-#define DI_PT_RECTLIST        0x11
+-#define DI_INDEX_SIZE_16_BIT  0x0
+-#define DI_SRC_SEL_AUTO_INDEX 0x2
+-
+-#define FMT_8                 0x1
+-#define FMT_5_6_5             0x8
+-#define FMT_8_8_8_8           0x1a
+-#define COLOR_8               0x1
+-#define COLOR_5_6_5           0x8
+-#define COLOR_8_8_8_8         0x1a
++#include "radeon_blit_common.h"
+ 
+ /* emits 17 */
+ static void
+ set_render_target(struct radeon_device *rdev, int format,
+ 		  int w, int h, u64 gpu_addr)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 cb_color_info;
+ 	int pitch, slice;
+ 
+@@ -62,23 +53,23 @@ set_render_target(struct radeon_device *rdev, int format,
+ 	pitch = (w / 8) - 1;
+ 	slice = ((w * h) / 64) - 1;
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+-	radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+-	radeon_ring_write(rdev, gpu_addr >> 8);
+-	radeon_ring_write(rdev, pitch);
+-	radeon_ring_write(rdev, slice);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, cb_color_info);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
++	radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
++	radeon_ring_write(ring, gpu_addr >> 8);
++	radeon_ring_write(ring, pitch);
++	radeon_ring_write(ring, slice);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, cb_color_info);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
+ }
+ 
+ /* emits 5dw */
+@@ -87,6 +78,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
+ 		    u32 sync_type, u32 size,
+ 		    u64 mc_addr)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 cp_coher_size;
+ 
+ 	if (size == 0xffffffff)
+@@ -99,39 +91,40 @@ cp_set_surface_sync(struct radeon_device *rdev,
+ 		 * to the RB directly. For IBs, the CP programs this as part of the
+ 		 * surface_sync packet.
+ 		 */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+-		radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+-		radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
++		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++		radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
++		radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
+ 	}
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+-	radeon_ring_write(rdev, sync_type);
+-	radeon_ring_write(rdev, cp_coher_size);
+-	radeon_ring_write(rdev, mc_addr >> 8);
+-	radeon_ring_write(rdev, 10); /* poll interval */
++	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
++	radeon_ring_write(ring, sync_type);
++	radeon_ring_write(ring, cp_coher_size);
++	radeon_ring_write(ring, mc_addr >> 8);
++	radeon_ring_write(ring, 10); /* poll interval */
+ }
+ 
+ /* emits 11dw + 1 surface sync = 16dw */
+ static void
+ set_shaders(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u64 gpu_addr;
+ 
+ 	/* VS */
+ 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+-	radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+-	radeon_ring_write(rdev, gpu_addr >> 8);
+-	radeon_ring_write(rdev, 2);
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
++	radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
++	radeon_ring_write(ring, gpu_addr >> 8);
++	radeon_ring_write(ring, 2);
++	radeon_ring_write(ring, 0);
+ 
+ 	/* PS */
+ 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+-	radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+-	radeon_ring_write(rdev, gpu_addr >> 8);
+-	radeon_ring_write(rdev, 1);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 2);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
++	radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
++	radeon_ring_write(ring, gpu_addr >> 8);
++	radeon_ring_write(ring, 1);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 2);
+ 
+ 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+ 	cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+@@ -141,6 +134,7 @@ set_shaders(struct radeon_device *rdev)
+ static void
+ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
+ 
+ 	/* high addr, stride */
+@@ -155,16 +149,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+ 		SQ_VTCX_SEL_Z(SQ_SEL_Z) |
+ 		SQ_VTCX_SEL_W(SQ_SEL_W);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
+-	radeon_ring_write(rdev, 0x580);
+-	radeon_ring_write(rdev, gpu_addr & 0xffffffff);
+-	radeon_ring_write(rdev, 48 - 1); /* size */
+-	radeon_ring_write(rdev, sq_vtx_constant_word2);
+-	radeon_ring_write(rdev, sq_vtx_constant_word3);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
++	radeon_ring_write(ring, 0x580);
++	radeon_ring_write(ring, gpu_addr & 0xffffffff);
++	radeon_ring_write(ring, 48 - 1); /* size */
++	radeon_ring_write(ring, sq_vtx_constant_word2);
++	radeon_ring_write(ring, sq_vtx_constant_word3);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
+ 
+ 	if ((rdev->family == CHIP_CEDAR) ||
+ 	    (rdev->family == CHIP_PALM) ||
+@@ -185,6 +179,7 @@ set_tex_resource(struct radeon_device *rdev,
+ 		 int format, int w, int h, int pitch,
+ 		 u64 gpu_addr, u32 size)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 sq_tex_resource_word0, sq_tex_resource_word1;
+ 	u32 sq_tex_resource_word4, sq_tex_resource_word7;
+ 
+@@ -208,16 +203,16 @@ set_tex_resource(struct radeon_device *rdev,
+ 	cp_set_surface_sync(rdev,
+ 			    PACKET3_TC_ACTION_ENA, size, gpu_addr);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, sq_tex_resource_word0);
+-	radeon_ring_write(rdev, sq_tex_resource_word1);
+-	radeon_ring_write(rdev, gpu_addr >> 8);
+-	radeon_ring_write(rdev, gpu_addr >> 8);
+-	radeon_ring_write(rdev, sq_tex_resource_word4);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, sq_tex_resource_word7);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, sq_tex_resource_word0);
++	radeon_ring_write(ring, sq_tex_resource_word1);
++	radeon_ring_write(ring, gpu_addr >> 8);
++	radeon_ring_write(ring, gpu_addr >> 8);
++	radeon_ring_write(ring, sq_tex_resource_word4);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, sq_tex_resource_word7);
+ }
+ 
+ /* emits 12 */
+@@ -225,53 +220,55 @@ static void
+ set_scissors(struct radeon_device *rdev, int x1, int y1,
+ 	     int x2, int y2)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	/* workaround some hw bugs */
+ 	if (x2 == 0)
+ 		x1 = 1;
+ 	if (y2 == 0)
+ 		y1 = 1;
+-	if (rdev->family == CHIP_CAYMAN) {
++	if (rdev->family >= CHIP_CAYMAN) {
+ 		if ((x2 == 1) && (y2 == 1))
+ 			x2 = 2;
+ 	}
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+-	radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+-	radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
+-	radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
++	radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
++	radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
++	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+-	radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+-	radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+-	radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
++	radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
++	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
++	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+-	radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+-	radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+-	radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
++	radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
++	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
++	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+ }
+ 
+ /* emits 10 */
+ static void
+ draw_auto(struct radeon_device *rdev)
+ {
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+-	radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+-	radeon_ring_write(rdev, DI_PT_RECTLIST);
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++	radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
++	radeon_ring_write(ring, DI_PT_RECTLIST);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
++	radeon_ring_write(ring,
+ #ifdef __BIG_ENDIAN
+ 			  (2 << 2) |
+ #endif
+ 			  DI_INDEX_SIZE_16_BIT);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
+-	radeon_ring_write(rdev, 1);
++	radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
++	radeon_ring_write(ring, 1);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+-	radeon_ring_write(rdev, 3);
+-	radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
++	radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
++	radeon_ring_write(ring, 3);
++	radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
+ 
+ }
+ 
+@@ -279,6 +276,7 @@ draw_auto(struct radeon_device *rdev)
+ static void
+ set_default_state(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
+ 	u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
+ 	u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
+@@ -292,8 +290,8 @@ set_default_state(struct radeon_device *rdev)
+ 	int dwords;
+ 
+ 	/* set clear context state */
+-	radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
++	radeon_ring_write(ring, 0);
+ 
+ 	if (rdev->family < CHIP_CAYMAN) {
+ 		switch (rdev->family) {
+@@ -550,60 +548,60 @@ set_default_state(struct radeon_device *rdev)
+ 					    NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
+ 
+ 		/* disable dyn gprs */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+-		radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+-		radeon_ring_write(rdev, 0);
++		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++		radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
++		radeon_ring_write(ring, 0);
+ 
+ 		/* setup LDS */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+-		radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+-		radeon_ring_write(rdev, 0x10001000);
++		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++		radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
++		radeon_ring_write(ring, 0x10001000);
+ 
+ 		/* SQ config */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+-		radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+-		radeon_ring_write(rdev, sq_config);
+-		radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
+-		radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
+-		radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
+-		radeon_ring_write(rdev, 0);
+-		radeon_ring_write(rdev, 0);
+-		radeon_ring_write(rdev, sq_thread_resource_mgmt);
+-		radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
+-		radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
+-		radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+-		radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
++		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
++		radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
++		radeon_ring_write(ring, sq_config);
++		radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
++		radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
++		radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
++		radeon_ring_write(ring, 0);
++		radeon_ring_write(ring, 0);
++		radeon_ring_write(ring, sq_thread_resource_mgmt);
++		radeon_ring_write(ring, sq_thread_resource_mgmt_2);
++		radeon_ring_write(ring, sq_stack_resource_mgmt_1);
++		radeon_ring_write(ring, sq_stack_resource_mgmt_2);
++		radeon_ring_write(ring, sq_stack_resource_mgmt_3);
+ 	}
+ 
+ 	/* CONTEXT_CONTROL */
+-	radeon_ring_write(rdev, 0xc0012800);
+-	radeon_ring_write(rdev, 0x80000000);
+-	radeon_ring_write(rdev, 0x80000000);
++	radeon_ring_write(ring, 0xc0012800);
++	radeon_ring_write(ring, 0x80000000);
++	radeon_ring_write(ring, 0x80000000);
+ 
+ 	/* SQ_VTX_BASE_VTX_LOC */
+-	radeon_ring_write(rdev, 0xc0026f00);
+-	radeon_ring_write(rdev, 0x00000000);
+-	radeon_ring_write(rdev, 0x00000000);
+-	radeon_ring_write(rdev, 0x00000000);
++	radeon_ring_write(ring, 0xc0026f00);
++	radeon_ring_write(ring, 0x00000000);
++	radeon_ring_write(ring, 0x00000000);
++	radeon_ring_write(ring, 0x00000000);
+ 
+ 	/* SET_SAMPLER */
+-	radeon_ring_write(rdev, 0xc0036e00);
+-	radeon_ring_write(rdev, 0x00000000);
+-	radeon_ring_write(rdev, 0x00000012);
+-	radeon_ring_write(rdev, 0x00000000);
+-	radeon_ring_write(rdev, 0x00000000);
++	radeon_ring_write(ring, 0xc0036e00);
++	radeon_ring_write(ring, 0x00000000);
++	radeon_ring_write(ring, 0x00000012);
++	radeon_ring_write(ring, 0x00000000);
++	radeon_ring_write(ring, 0x00000000);
+ 
+ 	/* set to DX10/11 mode */
+-	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+-	radeon_ring_write(rdev, 1);
++	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
++	radeon_ring_write(ring, 1);
+ 
+ 	/* emit an IB pointing at default state */
+ 	dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
+ 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+-	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+-	radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+-	radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
+-	radeon_ring_write(rdev, dwords);
++	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
++	radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
++	radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
++	radeon_ring_write(ring, dwords);
+ 
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+index 3a10399..f85c0af 100644
+--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
++++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+@@ -24,6 +24,7 @@
+  *     Alex Deucher <alexander.deucher at amd.com>
+  */
+ 
++#include <linux/bug.h>
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ 
+diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
+index cd4590a..2cbd369 100644
+--- a/drivers/gpu/drm/radeon/evergreen_cs.c
++++ b/drivers/gpu/drm/radeon/evergreen_cs.c
+@@ -31,6 +31,9 @@
+ #include "evergreen_reg_safe.h"
+ #include "cayman_reg_safe.h"
+ 
++#define MAX(a,b)                   (((a)>(b))?(a):(b))
++#define MIN(a,b)                   (((a)<(b))?(a):(b))
++
+ static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ 					  struct radeon_cs_reloc **cs_reloc);
+ 
+@@ -40,42 +43,47 @@ struct evergreen_cs_track {
+ 	u32			npipes;
+ 	u32			row_size;
+ 	/* value we track */
+-	u32			nsamples;
+-	u32			cb_color_base_last[12];
++	u32			nsamples;		/* unused */
+ 	struct radeon_bo	*cb_color_bo[12];
+ 	u32			cb_color_bo_offset[12];
+-	struct radeon_bo	*cb_color_fmask_bo[8];
+-	struct radeon_bo	*cb_color_cmask_bo[8];
++	struct radeon_bo	*cb_color_fmask_bo[8];	/* unused */
++	struct radeon_bo	*cb_color_cmask_bo[8];	/* unused */
+ 	u32			cb_color_info[12];
+ 	u32			cb_color_view[12];
+-	u32			cb_color_pitch_idx[12];
+-	u32			cb_color_slice_idx[12];
+-	u32			cb_color_dim_idx[12];
+-	u32			cb_color_dim[12];
+ 	u32			cb_color_pitch[12];
+ 	u32			cb_color_slice[12];
+-	u32			cb_color_cmask_slice[8];
+-	u32			cb_color_fmask_slice[8];
++	u32			cb_color_slice_idx[12];
++	u32			cb_color_attrib[12];
++	u32			cb_color_cmask_slice[8];/* unused */
++	u32			cb_color_fmask_slice[8];/* unused */
+ 	u32			cb_target_mask;
+-	u32			cb_shader_mask;
++	u32			cb_shader_mask; /* unused */
+ 	u32			vgt_strmout_config;
+ 	u32			vgt_strmout_buffer_config;
++	struct radeon_bo	*vgt_strmout_bo[4];
++	u32			vgt_strmout_bo_offset[4];
++	u32			vgt_strmout_size[4];
+ 	u32			db_depth_control;
+ 	u32			db_depth_view;
++	u32			db_depth_slice;
+ 	u32			db_depth_size;
+-	u32			db_depth_size_idx;
+ 	u32			db_z_info;
+-	u32			db_z_idx;
+ 	u32			db_z_read_offset;
+ 	u32			db_z_write_offset;
+ 	struct radeon_bo	*db_z_read_bo;
+ 	struct radeon_bo	*db_z_write_bo;
+ 	u32			db_s_info;
+-	u32			db_s_idx;
+ 	u32			db_s_read_offset;
+ 	u32			db_s_write_offset;
+ 	struct radeon_bo	*db_s_read_bo;
+ 	struct radeon_bo	*db_s_write_bo;
++	bool			sx_misc_kill_all_prims;
++	bool			cb_dirty;
++	bool			db_dirty;
++	bool			streamout_dirty;
++	u32			htile_offset;
++	u32			htile_surface;
++	struct radeon_bo	*htile_bo;
+ };
+ 
+ static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+@@ -103,19 +111,6 @@ static u32 evergreen_cs_get_num_banks(u32 nbanks)
+ 	}
+ }
+ 
+-static u32 evergreen_cs_get_tile_split(u32 row_size)
+-{
+-	switch (row_size) {
+-	case 1:
+-	default:
+-		return ADDR_SURF_TILE_SPLIT_1KB;
+-	case 2:
+-		return ADDR_SURF_TILE_SPLIT_2KB;
+-	case 4:
+-		return ADDR_SURF_TILE_SPLIT_4KB;
+-	}
+-}
+-
+ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
+ {
+ 	int i;
+@@ -128,50 +123,858 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
+ 	}
+ 
+ 	for (i = 0; i < 12; i++) {
+-		track->cb_color_base_last[i] = 0;
+ 		track->cb_color_bo[i] = NULL;
+ 		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ 		track->cb_color_info[i] = 0;
+-		track->cb_color_view[i] = 0;
+-		track->cb_color_pitch_idx[i] = 0;
+-		track->cb_color_slice_idx[i] = 0;
+-		track->cb_color_dim[i] = 0;
++		track->cb_color_view[i] = 0xFFFFFFFF;
+ 		track->cb_color_pitch[i] = 0;
+-		track->cb_color_slice[i] = 0;
+-		track->cb_color_dim[i] = 0;
++		track->cb_color_slice[i] = 0xfffffff;
++		track->cb_color_slice_idx[i] = 0;
+ 	}
+ 	track->cb_target_mask = 0xFFFFFFFF;
+ 	track->cb_shader_mask = 0xFFFFFFFF;
++	track->cb_dirty = true;
+ 
++	track->db_depth_slice = 0xffffffff;
+ 	track->db_depth_view = 0xFFFFC000;
+ 	track->db_depth_size = 0xFFFFFFFF;
+-	track->db_depth_size_idx = 0;
+ 	track->db_depth_control = 0xFFFFFFFF;
+ 	track->db_z_info = 0xFFFFFFFF;
+-	track->db_z_idx = 0xFFFFFFFF;
+ 	track->db_z_read_offset = 0xFFFFFFFF;
+ 	track->db_z_write_offset = 0xFFFFFFFF;
+ 	track->db_z_read_bo = NULL;
+ 	track->db_z_write_bo = NULL;
+ 	track->db_s_info = 0xFFFFFFFF;
+-	track->db_s_idx = 0xFFFFFFFF;
+ 	track->db_s_read_offset = 0xFFFFFFFF;
+ 	track->db_s_write_offset = 0xFFFFFFFF;
+ 	track->db_s_read_bo = NULL;
+ 	track->db_s_write_bo = NULL;
++	track->db_dirty = true;
++	track->htile_bo = NULL;
++	track->htile_offset = 0xFFFFFFFF;
++	track->htile_surface = 0;
++
++	for (i = 0; i < 4; i++) {
++		track->vgt_strmout_size[i] = 0;
++		track->vgt_strmout_bo[i] = NULL;
++		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
++	}
++	track->streamout_dirty = true;
++	track->sx_misc_kill_all_prims = false;
+ }
+ 
+-static int evergreen_cs_track_check(struct radeon_cs_parser *p)
++struct eg_surface {
++	/* value gathered from cs */
++	unsigned	nbx;
++	unsigned	nby;
++	unsigned	format;
++	unsigned	mode;
++	unsigned	nbanks;
++	unsigned	bankw;
++	unsigned	bankh;
++	unsigned	tsplit;
++	unsigned	mtilea;
++	unsigned	nsamples;
++	/* output value */
++	unsigned	bpe;
++	unsigned	layer_size;
++	unsigned	palign;
++	unsigned	halign;
++	unsigned long	base_align;
++};
++
++static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
++					  struct eg_surface *surf,
++					  const char *prefix)
++{
++	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
++	surf->base_align = surf->bpe;
++	surf->palign = 1;
++	surf->halign = 1;
++	return 0;
++}
++
++static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
++						  struct eg_surface *surf,
++						  const char *prefix)
++{
++	struct evergreen_cs_track *track = p->track;
++	unsigned palign;
++
++	palign = MAX(64, track->group_size / surf->bpe);
++	surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
++	surf->base_align = track->group_size;
++	surf->palign = palign;
++	surf->halign = 1;
++	if (surf->nbx & (palign - 1)) {
++		if (prefix) {
++			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
++				 __func__, __LINE__, prefix, surf->nbx, palign);
++		}
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
++				      struct eg_surface *surf,
++				      const char *prefix)
++{
++	struct evergreen_cs_track *track = p->track;
++	unsigned palign;
++
++	palign = track->group_size / (8 * surf->bpe * surf->nsamples);
++	palign = MAX(8, palign);
++	surf->layer_size = surf->nbx * surf->nby * surf->bpe;
++	surf->base_align = track->group_size;
++	surf->palign = palign;
++	surf->halign = 8;
++	if ((surf->nbx & (palign - 1))) {
++		if (prefix) {
++			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
++				 __func__, __LINE__, prefix, surf->nbx, palign,
++				 track->group_size, surf->bpe, surf->nsamples);
++		}
++		return -EINVAL;
++	}
++	if ((surf->nby & (8 - 1))) {
++		if (prefix) {
++			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
++				 __func__, __LINE__, prefix, surf->nby);
++		}
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
++				      struct eg_surface *surf,
++				      const char *prefix)
++{
++	struct evergreen_cs_track *track = p->track;
++	unsigned palign, halign, tileb, slice_pt;
++	unsigned mtile_pr, mtile_ps, mtileb;
++
++	tileb = 64 * surf->bpe * surf->nsamples;
++	slice_pt = 1;
++	if (tileb > surf->tsplit) {
++		slice_pt = tileb / surf->tsplit;
++	}
++	tileb = tileb / slice_pt;
++	/* macro tile width & height */
++	palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
++	halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
++	mtileb = (palign / 8) * (halign / 8) * tileb;;
++	mtile_pr = surf->nbx / palign;
++	mtile_ps = (mtile_pr * surf->nby) / halign;
++	surf->layer_size = mtile_ps * mtileb * slice_pt;
++	surf->base_align = (palign / 8) * (halign / 8) * tileb;
++	surf->palign = palign;
++	surf->halign = halign;
++
++	if ((surf->nbx & (palign - 1))) {
++		if (prefix) {
++			dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
++				 __func__, __LINE__, prefix, surf->nbx, palign);
++		}
++		return -EINVAL;
++	}
++	if ((surf->nby & (halign - 1))) {
++		if (prefix) {
++			dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
++				 __func__, __LINE__, prefix, surf->nby, halign);
++		}
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static int evergreen_surface_check(struct radeon_cs_parser *p,
++				   struct eg_surface *surf,
++				   const char *prefix)
++{
++	/* some common value computed here */
++	surf->bpe = r600_fmt_get_blocksize(surf->format);
++
++	switch (surf->mode) {
++	case ARRAY_LINEAR_GENERAL:
++		return evergreen_surface_check_linear(p, surf, prefix);
++	case ARRAY_LINEAR_ALIGNED:
++		return evergreen_surface_check_linear_aligned(p, surf, prefix);
++	case ARRAY_1D_TILED_THIN1:
++		return evergreen_surface_check_1d(p, surf, prefix);
++	case ARRAY_2D_TILED_THIN1:
++		return evergreen_surface_check_2d(p, surf, prefix);
++	default:
++		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
++				__func__, __LINE__, prefix, surf->mode);
++		return -EINVAL;
++	}
++	return -EINVAL;
++}
++
++static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
++					      struct eg_surface *surf,
++					      const char *prefix)
++{
++	switch (surf->mode) {
++	case ARRAY_2D_TILED_THIN1:
++		break;
++	case ARRAY_LINEAR_GENERAL:
++	case ARRAY_LINEAR_ALIGNED:
++	case ARRAY_1D_TILED_THIN1:
++		return 0;
++	default:
++		dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
++				__func__, __LINE__, prefix, surf->mode);
++		return -EINVAL;
++	}
++
++	switch (surf->nbanks) {
++	case 0: surf->nbanks = 2; break;
++	case 1: surf->nbanks = 4; break;
++	case 2: surf->nbanks = 8; break;
++	case 3: surf->nbanks = 16; break;
++	default:
++		dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
++			 __func__, __LINE__, prefix, surf->nbanks);
++		return -EINVAL;
++	}
++	switch (surf->bankw) {
++	case 0: surf->bankw = 1; break;
++	case 1: surf->bankw = 2; break;
++	case 2: surf->bankw = 4; break;
++	case 3: surf->bankw = 8; break;
++	default:
++		dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
++			 __func__, __LINE__, prefix, surf->bankw);
++		return -EINVAL;
++	}
++	switch (surf->bankh) {
++	case 0: surf->bankh = 1; break;
++	case 1: surf->bankh = 2; break;
++	case 2: surf->bankh = 4; break;
++	case 3: surf->bankh = 8; break;
++	default:
++		dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
++			 __func__, __LINE__, prefix, surf->bankh);
++		return -EINVAL;
++	}
++	switch (surf->mtilea) {
++	case 0: surf->mtilea = 1; break;
++	case 1: surf->mtilea = 2; break;
++	case 2: surf->mtilea = 4; break;
++	case 3: surf->mtilea = 8; break;
++	default:
++		dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
++			 __func__, __LINE__, prefix, surf->mtilea);
++		return -EINVAL;
++	}
++	switch (surf->tsplit) {
++	case 0: surf->tsplit = 64; break;
++	case 1: surf->tsplit = 128; break;
++	case 2: surf->tsplit = 256; break;
++	case 3: surf->tsplit = 512; break;
++	case 4: surf->tsplit = 1024; break;
++	case 5: surf->tsplit = 2048; break;
++	case 6: surf->tsplit = 4096; break;
++	default:
++		dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
++			 __func__, __LINE__, prefix, surf->tsplit);
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
++{
++	struct evergreen_cs_track *track = p->track;
++	struct eg_surface surf;
++	unsigned pitch, slice, mslice;
++	unsigned long offset;
++	int r;
++
++	mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
++	pitch = track->cb_color_pitch[id];
++	slice = track->cb_color_slice[id];
++	surf.nbx = (pitch + 1) * 8;
++	surf.nby = ((slice + 1) * 64) / surf.nbx;
++	surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
++	surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
++	surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
++	surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
++	surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
++	surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
++	surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
++	surf.nsamples = 1;
++
++	if (!r600_fmt_is_valid_color(surf.format)) {
++		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
++			 __func__, __LINE__, surf.format,
++			id, track->cb_color_info[id]);
++		return -EINVAL;
++	}
++
++	r = evergreen_surface_value_conv_check(p, &surf, "cb");
++	if (r) {
++		return r;
++	}
++
++	r = evergreen_surface_check(p, &surf, "cb");
++	if (r) {
++		dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
++			 __func__, __LINE__, id, track->cb_color_pitch[id],
++			 track->cb_color_slice[id], track->cb_color_attrib[id],
++			 track->cb_color_info[id]);
++		return r;
++	}
++
++	offset = track->cb_color_bo_offset[id] << 8;
++	if (offset & (surf.base_align - 1)) {
++		dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
++			 __func__, __LINE__, id, offset, surf.base_align);
++		return -EINVAL;
++	}
++
++	offset += surf.layer_size * mslice;
++	if (offset > radeon_bo_size(track->cb_color_bo[id])) {
++		/* old ddx are broken they allocate bo with w*h*bpp but
++		 * program slice with ALIGN(h, 8), catch this and patch
++		 * command stream.
++		 */
++		if (!surf.mode) {
++			volatile u32 *ib = p->ib->ptr;
++			unsigned long tmp, nby, bsize, size, min = 0;
++
++			/* find the height the ddx wants */
++			if (surf.nby > 8) {
++				min = surf.nby - 8;
++			}
++			bsize = radeon_bo_size(track->cb_color_bo[id]);
++			tmp = track->cb_color_bo_offset[id] << 8;
++			for (nby = surf.nby; nby > min; nby--) {
++				size = nby * surf.nbx * surf.bpe * surf.nsamples;
++				if ((tmp + size * mslice) <= bsize) {
++					break;
++				}
++			}
++			if (nby > min) {
++				surf.nby = nby;
++				slice = ((nby * surf.nbx) / 64) - 1;
++				if (!evergreen_surface_check(p, &surf, "cb")) {
++					/* check if this one works */
++					tmp += surf.layer_size * mslice;
++					if (tmp <= bsize) {
++						ib[track->cb_color_slice_idx[id]] = slice;
++						goto old_ddx_ok;
++					}
++				}
++			}
++		}
++		dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
++			 "offset %d, max layer %d, bo size %ld, slice %d)\n",
++			 __func__, __LINE__, id, surf.layer_size,
++			track->cb_color_bo_offset[id] << 8, mslice,
++			radeon_bo_size(track->cb_color_bo[id]), slice);
++		dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
++			 __func__, __LINE__, surf.nbx, surf.nby,
++			surf.mode, surf.bpe, surf.nsamples,
++			surf.bankw, surf.bankh,
++			surf.tsplit, surf.mtilea);
++		return -EINVAL;
++	}
++old_ddx_ok:
++
++	return 0;
++}
++
++static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
++						unsigned nbx, unsigned nby)
++{
++	struct evergreen_cs_track *track = p->track;
++	unsigned long size;
++
++	if (track->htile_bo == NULL) {
++		dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
++				__func__, __LINE__, track->db_z_info);
++		return -EINVAL;
++	}
++
++	if (G_028ABC_LINEAR(track->htile_surface)) {
++		/* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
++		nbx = round_up(nbx, 16 * 8);
++		/* height is npipes htiles aligned == npipes * 8 pixel aligned */
++		nby = round_up(nby, track->npipes * 8);
++	} else {
++		switch (track->npipes) {
++		case 8:
++			nbx = round_up(nbx, 64 * 8);
++			nby = round_up(nby, 64 * 8);
++			break;
++		case 4:
++			nbx = round_up(nbx, 64 * 8);
++			nby = round_up(nby, 32 * 8);
++			break;
++		case 2:
++			nbx = round_up(nbx, 32 * 8);
++			nby = round_up(nby, 32 * 8);
++			break;
++		case 1:
++			nbx = round_up(nbx, 32 * 8);
++			nby = round_up(nby, 16 * 8);
++			break;
++		default:
++			dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
++					__func__, __LINE__, track->npipes);
++			return -EINVAL;
++		}
++	}
++	/* compute number of htile */
++	nbx = nbx / 8;
++	nby = nby / 8;
++	size = nbx * nby * 4;
++	size += track->htile_offset;
++
++	if (size > radeon_bo_size(track->htile_bo)) {
++		dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
++				__func__, __LINE__, radeon_bo_size(track->htile_bo),
++				size, nbx, nby);
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
++{
++	struct evergreen_cs_track *track = p->track;
++	struct eg_surface surf;
++	unsigned pitch, slice, mslice;
++	unsigned long offset;
++	int r;
++
++	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
++	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
++	slice = track->db_depth_slice;
++	surf.nbx = (pitch + 1) * 8;
++	surf.nby = ((slice + 1) * 64) / surf.nbx;
++	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
++	surf.format = G_028044_FORMAT(track->db_s_info);
++	surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
++	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
++	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
++	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
++	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
++	surf.nsamples = 1;
++
++	if (surf.format != 1) {
++		dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
++			 __func__, __LINE__, surf.format);
++		return -EINVAL;
++	}
++	/* replace by color format so we can use same code */
++	surf.format = V_028C70_COLOR_8;
++
++	r = evergreen_surface_value_conv_check(p, &surf, "stencil");
++	if (r) {
++		return r;
++	}
++
++	r = evergreen_surface_check(p, &surf, NULL);
++	if (r) {
++		/* old userspace doesn't compute proper depth/stencil alignment
++		 * check that alignment against a bigger byte per elements and
++		 * only report if that alignment is wrong too.
++		 */
++		surf.format = V_028C70_COLOR_8_8_8_8;
++		r = evergreen_surface_check(p, &surf, "stencil");
++		if (r) {
++			dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
++				 __func__, __LINE__, track->db_depth_size,
++				 track->db_depth_slice, track->db_s_info, track->db_z_info);
++		}
++		return r;
++	}
++
++	offset = track->db_s_read_offset << 8;
++	if (offset & (surf.base_align - 1)) {
++		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
++			 __func__, __LINE__, offset, surf.base_align);
++		return -EINVAL;
++	}
++	offset += surf.layer_size * mslice;
++	if (offset > radeon_bo_size(track->db_s_read_bo)) {
++		dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
++			 "offset %ld, max layer %d, bo size %ld)\n",
++			 __func__, __LINE__, surf.layer_size,
++			(unsigned long)track->db_s_read_offset << 8, mslice,
++			radeon_bo_size(track->db_s_read_bo));
++		dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
++			 __func__, __LINE__, track->db_depth_size,
++			 track->db_depth_slice, track->db_s_info, track->db_z_info);
++		return -EINVAL;
++	}
++
++	offset = track->db_s_write_offset << 8;
++	if (offset & (surf.base_align - 1)) {
++		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
++			 __func__, __LINE__, offset, surf.base_align);
++		return -EINVAL;
++	}
++	offset += surf.layer_size * mslice;
++	if (offset > radeon_bo_size(track->db_s_write_bo)) {
++		dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
++			 "offset %ld, max layer %d, bo size %ld)\n",
++			 __func__, __LINE__, surf.layer_size,
++			(unsigned long)track->db_s_write_offset << 8, mslice,
++			radeon_bo_size(track->db_s_write_bo));
++		return -EINVAL;
++	}
++
++	/* hyperz */
++	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
++		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
++		if (r) {
++			return r;
++		}
++	}
++
++	return 0;
++}
++
++static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
+ {
+ 	struct evergreen_cs_track *track = p->track;
++	struct eg_surface surf;
++	unsigned pitch, slice, mslice;
++	unsigned long offset;
++	int r;
++
++	mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
++	pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
++	slice = track->db_depth_slice;
++	surf.nbx = (pitch + 1) * 8;
++	surf.nby = ((slice + 1) * 64) / surf.nbx;
++	surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
++	surf.format = G_028040_FORMAT(track->db_z_info);
++	surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
++	surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
++	surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
++	surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
++	surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
++	surf.nsamples = 1;
++
++	switch (surf.format) {
++	case V_028040_Z_16:
++		surf.format = V_028C70_COLOR_16;
++		break;
++	case V_028040_Z_24:
++	case V_028040_Z_32_FLOAT:
++		surf.format = V_028C70_COLOR_8_8_8_8;
++		break;
++	default:
++		dev_warn(p->dev, "%s:%d depth invalid format %d\n",
++			 __func__, __LINE__, surf.format);
++		return -EINVAL;
++	}
++
++	r = evergreen_surface_value_conv_check(p, &surf, "depth");
++	if (r) {
++		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
++			 __func__, __LINE__, track->db_depth_size,
++			 track->db_depth_slice, track->db_z_info);
++		return r;
++	}
+ 
+-	/* we don't support stream out buffer yet */
+-	if (track->vgt_strmout_config || track->vgt_strmout_buffer_config) {
+-		dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
++	r = evergreen_surface_check(p, &surf, "depth");
++	if (r) {
++		dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
++			 __func__, __LINE__, track->db_depth_size,
++			 track->db_depth_slice, track->db_z_info);
++		return r;
++	}
++
++	offset = track->db_z_read_offset << 8;
++	if (offset & (surf.base_align - 1)) {
++		dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
++			 __func__, __LINE__, offset, surf.base_align);
++		return -EINVAL;
++	}
++	offset += surf.layer_size * mslice;
++	if (offset > radeon_bo_size(track->db_z_read_bo)) {
++		dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
++			 "offset %ld, max layer %d, bo size %ld)\n",
++			 __func__, __LINE__, surf.layer_size,
++			(unsigned long)track->db_z_read_offset << 8, mslice,
++			radeon_bo_size(track->db_z_read_bo));
++		return -EINVAL;
++	}
++
++	offset = track->db_z_write_offset << 8;
++	if (offset & (surf.base_align - 1)) {
++		dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
++			 __func__, __LINE__, offset, surf.base_align);
++		return -EINVAL;
++	}
++	offset += surf.layer_size * mslice;
++	if (offset > radeon_bo_size(track->db_z_write_bo)) {
++		dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
++			 "offset %ld, max layer %d, bo size %ld)\n",
++			 __func__, __LINE__, surf.layer_size,
++			(unsigned long)track->db_z_write_offset << 8, mslice,
++			radeon_bo_size(track->db_z_write_bo));
+ 		return -EINVAL;
+ 	}
+ 
+-	/* XXX fill in */
++	/* hyperz */
++	if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
++		r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
++		if (r) {
++			return r;
++		}
++	}
++
++	return 0;
++}
++
++static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
++					       struct radeon_bo *texture,
++					       struct radeon_bo *mipmap,
++					       unsigned idx)
++{
++	struct eg_surface surf;
++	unsigned long toffset, moffset;
++	unsigned dim, llevel, mslice, width, height, depth, i;
++	u32 texdw[8];
++	int r;
++
++	texdw[0] = radeon_get_ib_value(p, idx + 0);
++	texdw[1] = radeon_get_ib_value(p, idx + 1);
++	texdw[2] = radeon_get_ib_value(p, idx + 2);
++	texdw[3] = radeon_get_ib_value(p, idx + 3);
++	texdw[4] = radeon_get_ib_value(p, idx + 4);
++	texdw[5] = radeon_get_ib_value(p, idx + 5);
++	texdw[6] = radeon_get_ib_value(p, idx + 6);
++	texdw[7] = radeon_get_ib_value(p, idx + 7);
++	dim = G_030000_DIM(texdw[0]);
++	llevel = G_030014_LAST_LEVEL(texdw[5]);
++	mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
++	width = G_030000_TEX_WIDTH(texdw[0]) + 1;
++	height =  G_030004_TEX_HEIGHT(texdw[1]) + 1;
++	depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
++	surf.format = G_03001C_DATA_FORMAT(texdw[7]);
++	surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
++	surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
++	surf.nby = r600_fmt_get_nblocksy(surf.format, height);
++	surf.mode = G_030004_ARRAY_MODE(texdw[1]);
++	surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
++	surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
++	surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
++	surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
++	surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
++	surf.nsamples = 1;
++	toffset = texdw[2] << 8;
++	moffset = texdw[3] << 8;
++
++	if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
++		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
++			 __func__, __LINE__, surf.format);
++		return -EINVAL;
++	}
++	switch (dim) {
++	case V_030000_SQ_TEX_DIM_1D:
++	case V_030000_SQ_TEX_DIM_2D:
++	case V_030000_SQ_TEX_DIM_CUBEMAP:
++	case V_030000_SQ_TEX_DIM_1D_ARRAY:
++	case V_030000_SQ_TEX_DIM_2D_ARRAY:
++		depth = 1;
++	case V_030000_SQ_TEX_DIM_3D:
++		break;
++	default:
++		dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
++			 __func__, __LINE__, dim);
++		return -EINVAL;
++	}
++
++	r = evergreen_surface_value_conv_check(p, &surf, "texture");
++	if (r) {
++		return r;
++	}
++
++	/* align height */
++	evergreen_surface_check(p, &surf, NULL);
++	surf.nby = ALIGN(surf.nby, surf.halign);
++
++	r = evergreen_surface_check(p, &surf, "texture");
++	if (r) {
++		dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
++			 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
++			 texdw[5], texdw[6], texdw[7]);
++		return r;
++	}
++
++	/* check texture size */
++	if (toffset & (surf.base_align - 1)) {
++		dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
++			 __func__, __LINE__, toffset, surf.base_align);
++		return -EINVAL;
++	}
++	if (moffset & (surf.base_align - 1)) {
++		dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
++			 __func__, __LINE__, moffset, surf.base_align);
++		return -EINVAL;
++	}
++	if (dim == SQ_TEX_DIM_3D) {
++		toffset += surf.layer_size * depth;
++	} else {
++		toffset += surf.layer_size * mslice;
++	}
++	if (toffset > radeon_bo_size(texture)) {
++		dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
++			 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
++			 __func__, __LINE__, surf.layer_size,
++			(unsigned long)texdw[2] << 8, mslice,
++			depth, radeon_bo_size(texture),
++			surf.nbx, surf.nby);
++		return -EINVAL;
++	}
++
++	/* check mipmap size */
++	for (i = 1; i <= llevel; i++) {
++		unsigned w, h, d;
++
++		w = r600_mip_minify(width, i);
++		h = r600_mip_minify(height, i);
++		d = r600_mip_minify(depth, i);
++		surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
++		surf.nby = r600_fmt_get_nblocksy(surf.format, h);
++
++		switch (surf.mode) {
++		case ARRAY_2D_TILED_THIN1:
++			if (surf.nbx < surf.palign || surf.nby < surf.halign) {
++				surf.mode = ARRAY_1D_TILED_THIN1;
++			}
++			/* recompute alignment */
++			evergreen_surface_check(p, &surf, NULL);
++			break;
++		case ARRAY_LINEAR_GENERAL:
++		case ARRAY_LINEAR_ALIGNED:
++		case ARRAY_1D_TILED_THIN1:
++			break;
++		default:
++			dev_warn(p->dev, "%s:%d invalid array mode %d\n",
++				 __func__, __LINE__, surf.mode);
++			return -EINVAL;
++		}
++		surf.nbx = ALIGN(surf.nbx, surf.palign);
++		surf.nby = ALIGN(surf.nby, surf.halign);
++
++		r = evergreen_surface_check(p, &surf, "mipmap");
++		if (r) {
++			return r;
++		}
++
++		if (dim == SQ_TEX_DIM_3D) {
++			moffset += surf.layer_size * d;
++		} else {
++			moffset += surf.layer_size * mslice;
++		}
++		if (moffset > radeon_bo_size(mipmap)) {
++			dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
++					"offset %ld, coffset %ld, max layer %d, depth %d, "
++					"bo size %ld) level0 (%d %d %d)\n",
++					__func__, __LINE__, i, surf.layer_size,
++					(unsigned long)texdw[3] << 8, moffset, mslice,
++					d, radeon_bo_size(mipmap),
++					width, height, depth);
++			dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
++				 __func__, __LINE__, surf.nbx, surf.nby,
++				surf.mode, surf.bpe, surf.nsamples,
++				surf.bankw, surf.bankh,
++				surf.tsplit, surf.mtilea);
++			return -EINVAL;
++		}
++	}
++
++	return 0;
++}
++
++static int evergreen_cs_track_check(struct radeon_cs_parser *p)
++{
++	struct evergreen_cs_track *track = p->track;
++	unsigned tmp, i;
++	int r;
++	unsigned buffer_mask = 0;
++
++	/* check streamout */
++	if (track->streamout_dirty && track->vgt_strmout_config) {
++		for (i = 0; i < 4; i++) {
++			if (track->vgt_strmout_config & (1 << i)) {
++				buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
++			}
++		}
++
++		for (i = 0; i < 4; i++) {
++			if (buffer_mask & (1 << i)) {
++				if (track->vgt_strmout_bo[i]) {
++					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
++							(u64)track->vgt_strmout_size[i];
++					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
++						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
++							  i, offset,
++							  radeon_bo_size(track->vgt_strmout_bo[i]));
++						return -EINVAL;
++					}
++				} else {
++					dev_warn(p->dev, "No buffer for streamout %d\n", i);
++					return -EINVAL;
++				}
++			}
++		}
++		track->streamout_dirty = false;
++	}
++
++	if (track->sx_misc_kill_all_prims)
++		return 0;
++
++	/* check that we have a cb for each enabled target
++	 */
++	if (track->cb_dirty) {
++		tmp = track->cb_target_mask;
++		for (i = 0; i < 8; i++) {
++			if ((tmp >> (i * 4)) & 0xF) {
++				/* at least one component is enabled */
++				if (track->cb_color_bo[i] == NULL) {
++					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
++						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
++					return -EINVAL;
++				}
++				/* check cb */
++				r = evergreen_cs_track_validate_cb(p, i);
++				if (r) {
++					return r;
++				}
++			}
++		}
++		track->cb_dirty = false;
++	}
++
++	if (track->db_dirty) {
++		/* Check stencil buffer */
++		if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
++			r = evergreen_cs_track_validate_stencil(p);
++			if (r)
++				return r;
++		}
++		/* Check depth buffer */
++		if (G_028800_Z_ENABLE(track->db_depth_control)) {
++			r = evergreen_cs_track_validate_depth(p);
++			if (r)
++				return r;
++		}
++		track->db_dirty = false;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -503,6 +1306,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		break;
+ 	case DB_DEPTH_CONTROL:
+ 		track->db_depth_control = radeon_get_ib_value(p, idx);
++		track->db_dirty = true;
+ 		break;
+ 	case CAYMAN_DB_EQAA:
+ 		if (p->rdev->family < CHIP_CAYMAN) {
+@@ -520,7 +1324,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		break;
+ 	case DB_Z_INFO:
+ 		track->db_z_info = radeon_get_ib_value(p, idx);
+-		if (!p->keep_tiling_flags) {
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ 			r = evergreen_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+ 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+@@ -532,20 +1336,35 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 			ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ 			track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
++				unsigned bankw, bankh, mtaspect, tile_split;
++
++				evergreen_tiling_fields(reloc->lobj.tiling_flags,
++							&bankw, &bankh, &mtaspect,
++							&tile_split);
+ 				ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+-				ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
++				ib[idx] |= DB_TILE_SPLIT(tile_split) |
++						DB_BANK_WIDTH(bankw) |
++						DB_BANK_HEIGHT(bankh) |
++						DB_MACRO_TILE_ASPECT(mtaspect);
+ 			}
+ 		}
++		track->db_dirty = true;
+ 		break;
+ 	case DB_STENCIL_INFO:
+ 		track->db_s_info = radeon_get_ib_value(p, idx);
++		track->db_dirty = true;
+ 		break;
+ 	case DB_DEPTH_VIEW:
+ 		track->db_depth_view = radeon_get_ib_value(p, idx);
++		track->db_dirty = true;
+ 		break;
+ 	case DB_DEPTH_SIZE:
+ 		track->db_depth_size = radeon_get_ib_value(p, idx);
+-		track->db_depth_size_idx = idx;
++		track->db_dirty = true;
++		break;
++	case R_02805C_DB_DEPTH_SLICE:
++		track->db_depth_slice = radeon_get_ib_value(p, idx);
++		track->db_dirty = true;
+ 		break;
+ 	case DB_Z_READ_BASE:
+ 		r = evergreen_cs_packet_next_reloc(p, &reloc);
+@@ -557,6 +1376,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		track->db_z_read_offset = radeon_get_ib_value(p, idx);
+ 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 		track->db_z_read_bo = reloc->robj;
++		track->db_dirty = true;
+ 		break;
+ 	case DB_Z_WRITE_BASE:
+ 		r = evergreen_cs_packet_next_reloc(p, &reloc);
+@@ -568,6 +1388,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		track->db_z_write_offset = radeon_get_ib_value(p, idx);
+ 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 		track->db_z_write_bo = reloc->robj;
++		track->db_dirty = true;
+ 		break;
+ 	case DB_STENCIL_READ_BASE:
+ 		r = evergreen_cs_packet_next_reloc(p, &reloc);
+@@ -579,6 +1400,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		track->db_s_read_offset = radeon_get_ib_value(p, idx);
+ 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 		track->db_s_read_bo = reloc->robj;
++		track->db_dirty = true;
+ 		break;
+ 	case DB_STENCIL_WRITE_BASE:
+ 		r = evergreen_cs_packet_next_reloc(p, &reloc);
+@@ -590,18 +1412,56 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		track->db_s_write_offset = radeon_get_ib_value(p, idx);
+ 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 		track->db_s_write_bo = reloc->robj;
++		track->db_dirty = true;
+ 		break;
+ 	case VGT_STRMOUT_CONFIG:
+ 		track->vgt_strmout_config = radeon_get_ib_value(p, idx);
++		track->streamout_dirty = true;
+ 		break;
+ 	case VGT_STRMOUT_BUFFER_CONFIG:
+ 		track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
++		track->streamout_dirty = true;
+ 		break;
++	case VGT_STRMOUT_BUFFER_BASE_0:
++	case VGT_STRMOUT_BUFFER_BASE_1:
++	case VGT_STRMOUT_BUFFER_BASE_2:
++	case VGT_STRMOUT_BUFFER_BASE_3:
++		r = evergreen_cs_packet_next_reloc(p, &reloc);
++		if (r) {
++			dev_warn(p->dev, "bad SET_CONTEXT_REG "
++					"0x%04X\n", reg);
++			return -EINVAL;
++		}
++		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
++		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
++		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
++		track->vgt_strmout_bo[tmp] = reloc->robj;
++		track->streamout_dirty = true;
++		break;
++	case VGT_STRMOUT_BUFFER_SIZE_0:
++	case VGT_STRMOUT_BUFFER_SIZE_1:
++	case VGT_STRMOUT_BUFFER_SIZE_2:
++	case VGT_STRMOUT_BUFFER_SIZE_3:
++		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
++		/* size in register is DWs, convert to bytes */
++		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
++		track->streamout_dirty = true;
++		break;
++	case CP_COHER_BASE:
++		r = evergreen_cs_packet_next_reloc(p, &reloc);
++		if (r) {
++			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
++					"0x%04X\n", reg);
++			return -EINVAL;
++		}
++		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 	case CB_TARGET_MASK:
+ 		track->cb_target_mask = radeon_get_ib_value(p, idx);
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_SHADER_MASK:
+ 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
++		track->cb_dirty = true;
+ 		break;
+ 	case PA_SC_AA_CONFIG:
+ 		if (p->rdev->family >= CHIP_CAYMAN) {
+@@ -631,6 +1491,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	case CB_COLOR7_VIEW:
+ 		tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
+ 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_COLOR8_VIEW:
+ 	case CB_COLOR9_VIEW:
+@@ -638,6 +1499,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	case CB_COLOR11_VIEW:
+ 		tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
+ 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_COLOR0_INFO:
+ 	case CB_COLOR1_INFO:
+@@ -649,7 +1511,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	case CB_COLOR7_INFO:
+ 		tmp = (reg - CB_COLOR0_INFO) / 0x3c;
+ 		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+-		if (!p->keep_tiling_flags) {
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ 			r = evergreen_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+ 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+@@ -659,6 +1521,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ 			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ 		}
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_COLOR8_INFO:
+ 	case CB_COLOR9_INFO:
+@@ -666,7 +1529,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	case CB_COLOR11_INFO:
+ 		tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
+ 		track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+-		if (!p->keep_tiling_flags) {
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ 			r = evergreen_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+ 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
+@@ -676,6 +1539,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 			ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ 			track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ 		}
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_COLOR0_PITCH:
+ 	case CB_COLOR1_PITCH:
+@@ -687,7 +1551,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	case CB_COLOR7_PITCH:
+ 		tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
+ 		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+-		track->cb_color_pitch_idx[tmp] = idx;
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_COLOR8_PITCH:
+ 	case CB_COLOR9_PITCH:
+@@ -695,7 +1559,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	case CB_COLOR11_PITCH:
+ 		tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
+ 		track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
+-		track->cb_color_pitch_idx[tmp] = idx;
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_COLOR0_SLICE:
+ 	case CB_COLOR1_SLICE:
+@@ -708,6 +1572,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
+ 		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ 		track->cb_color_slice_idx[tmp] = idx;
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_COLOR8_SLICE:
+ 	case CB_COLOR9_SLICE:
+@@ -716,6 +1581,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
+ 		track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ 		track->cb_color_slice_idx[tmp] = idx;
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_COLOR0_ATTRIB:
+ 	case CB_COLOR1_ATTRIB:
+@@ -725,6 +1591,30 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	case CB_COLOR5_ATTRIB:
+ 	case CB_COLOR6_ATTRIB:
+ 	case CB_COLOR7_ATTRIB:
++		r = evergreen_cs_packet_next_reloc(p, &reloc);
++		if (r) {
++			dev_warn(p->dev, "bad SET_CONTEXT_REG "
++					"0x%04X\n", reg);
++			return -EINVAL;
++		}
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
++			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
++				unsigned bankw, bankh, mtaspect, tile_split;
++
++				evergreen_tiling_fields(reloc->lobj.tiling_flags,
++							&bankw, &bankh, &mtaspect,
++							&tile_split);
++				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
++				ib[idx] |= CB_TILE_SPLIT(tile_split) |
++					   CB_BANK_WIDTH(bankw) |
++					   CB_BANK_HEIGHT(bankh) |
++					   CB_MACRO_TILE_ASPECT(mtaspect);
++			}
++		}
++		tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
++		track->cb_color_attrib[tmp] = ib[idx];
++		track->cb_dirty = true;
++		break;
+ 	case CB_COLOR8_ATTRIB:
+ 	case CB_COLOR9_ATTRIB:
+ 	case CB_COLOR10_ATTRIB:
+@@ -735,30 +1625,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 					"0x%04X\n", reg);
+ 			return -EINVAL;
+ 		}
+-		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+-			ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+-			ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
++			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
++				unsigned bankw, bankh, mtaspect, tile_split;
++
++				evergreen_tiling_fields(reloc->lobj.tiling_flags,
++							&bankw, &bankh, &mtaspect,
++							&tile_split);
++				ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
++				ib[idx] |= CB_TILE_SPLIT(tile_split) |
++					   CB_BANK_WIDTH(bankw) |
++					   CB_BANK_HEIGHT(bankh) |
++					   CB_MACRO_TILE_ASPECT(mtaspect);
++			}
+ 		}
+-		break;
+-	case CB_COLOR0_DIM:
+-	case CB_COLOR1_DIM:
+-	case CB_COLOR2_DIM:
+-	case CB_COLOR3_DIM:
+-	case CB_COLOR4_DIM:
+-	case CB_COLOR5_DIM:
+-	case CB_COLOR6_DIM:
+-	case CB_COLOR7_DIM:
+-		tmp = (reg - CB_COLOR0_DIM) / 0x3c;
+-		track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
+-		track->cb_color_dim_idx[tmp] = idx;
+-		break;
+-	case CB_COLOR8_DIM:
+-	case CB_COLOR9_DIM:
+-	case CB_COLOR10_DIM:
+-	case CB_COLOR11_DIM:
+-		tmp = ((reg - CB_COLOR8_DIM) / 0x1c) + 8;
+-		track->cb_color_dim[tmp] = radeon_get_ib_value(p, idx);
+-		track->cb_color_dim_idx[tmp] = idx;
++		tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
++		track->cb_color_attrib[tmp] = ib[idx];
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_COLOR0_FMASK:
+ 	case CB_COLOR1_FMASK:
+@@ -833,8 +1716,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		tmp = (reg - CB_COLOR0_BASE) / 0x3c;
+ 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+-		track->cb_color_base_last[tmp] = ib[idx];
+ 		track->cb_color_bo[tmp] = reloc->robj;
++		track->cb_dirty = true;
+ 		break;
+ 	case CB_COLOR8_BASE:
+ 	case CB_COLOR9_BASE:
+@@ -849,8 +1732,25 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
+ 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+-		track->cb_color_base_last[tmp] = ib[idx];
+ 		track->cb_color_bo[tmp] = reloc->robj;
++		track->cb_dirty = true;
++		break;
++	case DB_HTILE_DATA_BASE:
++		r = evergreen_cs_packet_next_reloc(p, &reloc);
++		if (r) {
++			dev_warn(p->dev, "bad SET_CONTEXT_REG "
++					"0x%04X\n", reg);
++			return -EINVAL;
++		}
++		track->htile_offset = radeon_get_ib_value(p, idx);
++		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
++		track->htile_bo = reloc->robj;
++		track->db_dirty = true;
++		break;
++	case DB_HTILE_SURFACE:
++		/* 8x8 only */
++		track->htile_surface = radeon_get_ib_value(p, idx);
++		track->db_dirty = true;
+ 		break;
+ 	case CB_IMMED0_BASE:
+ 	case CB_IMMED1_BASE:
+@@ -864,7 +1764,6 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	case CB_IMMED9_BASE:
+ 	case CB_IMMED10_BASE:
+ 	case CB_IMMED11_BASE:
+-	case DB_HTILE_DATA_BASE:
+ 	case SQ_PGM_START_FS:
+ 	case SQ_PGM_START_ES:
+ 	case SQ_PGM_START_VS:
+@@ -989,6 +1888,9 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		}
+ 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 		break;
++	case SX_MISC:
++		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
++		break;
+ 	default:
+ 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ 		return -EINVAL;
+@@ -996,22 +1898,30 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	return 0;
+ }
+ 
+-/**
+- * evergreen_check_texture_resource() - check if register is authorized or not
+- * @p: parser structure holding parsing context
+- * @idx: index into the cs buffer
+- * @texture: texture's bo structure
+- * @mipmap: mipmap's bo structure
+- *
+- * This function will check that the resource has valid field and that
+- * the texture and mipmap bo object are big enough to cover this resource.
+- */
+-static int evergreen_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+-						   struct radeon_bo *texture,
+-						   struct radeon_bo *mipmap)
++static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ {
+-	/* XXX fill in */
+-	return 0;
++	u32 last_reg, m, i;
++
++	if (p->rdev->family >= CHIP_CAYMAN)
++		last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
++	else
++		last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
++
++	i = (reg >> 7);
++	if (i >= last_reg) {
++		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
++		return false;
++	}
++	m = 1 << ((reg >> 2) & 31);
++	if (p->rdev->family >= CHIP_CAYMAN) {
++		if (!(cayman_reg_safe_bm[i] & m))
++			return true;
++	} else {
++		if (!(evergreen_reg_safe_bm[i] & m))
++			return true;
++	}
++	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
++	return false;
+ }
+ 
+ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+@@ -1036,6 +1946,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 	{
+ 		int pred_op;
+ 		int tmp;
++		uint64_t offset;
++
+ 		if (pkt->count != 1) {
+ 			DRM_ERROR("bad SET PREDICATION\n");
+ 			return -EINVAL;
+@@ -1059,8 +1971,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 			return -EINVAL;
+ 		}
+ 
+-		ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-		ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
++		offset = reloc->lobj.gpu_offset +
++		         (idx_value & 0xfffffff0) +
++		         ((u64)(tmp & 0xff) << 32);
++
++		ib[idx + 0] = offset;
++		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ 	}
+ 	break;
+ 	case PACKET3_CONTEXT_CONTROL:
+@@ -1088,6 +2004,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 		}
+ 		break;
+ 	case PACKET3_INDEX_BASE:
++	{
++		uint64_t offset;
++
+ 		if (pkt->count != 1) {
+ 			DRM_ERROR("bad INDEX_BASE\n");
+ 			return -EINVAL;
+@@ -1097,15 +2016,24 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 			DRM_ERROR("bad INDEX_BASE\n");
+ 			return -EINVAL;
+ 		}
+-		ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-		ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++		offset = reloc->lobj.gpu_offset +
++		         idx_value +
++		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
++
++		ib[idx+0] = offset;
++		ib[idx+1] = upper_32_bits(offset) & 0xff;
++
+ 		r = evergreen_cs_track_check(p);
+ 		if (r) {
+ 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ 			return r;
+ 		}
+ 		break;
++	}
+ 	case PACKET3_DRAW_INDEX:
++	{
++		uint64_t offset;
+ 		if (pkt->count != 3) {
+ 			DRM_ERROR("bad DRAW_INDEX\n");
+ 			return -EINVAL;
+@@ -1115,15 +2043,25 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 			DRM_ERROR("bad DRAW_INDEX\n");
+ 			return -EINVAL;
+ 		}
+-		ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-		ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++		offset = reloc->lobj.gpu_offset +
++		         idx_value +
++		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
++
++		ib[idx+0] = offset;
++		ib[idx+1] = upper_32_bits(offset) & 0xff;
++
+ 		r = evergreen_cs_track_check(p);
+ 		if (r) {
+ 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ 			return r;
+ 		}
+ 		break;
++	}
+ 	case PACKET3_DRAW_INDEX_2:
++	{
++		uint64_t offset;
++
+ 		if (pkt->count != 4) {
+ 			DRM_ERROR("bad DRAW_INDEX_2\n");
+ 			return -EINVAL;
+@@ -1133,14 +2071,21 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 			DRM_ERROR("bad DRAW_INDEX_2\n");
+ 			return -EINVAL;
+ 		}
+-		ib[idx+1] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-		ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++		offset = reloc->lobj.gpu_offset +
++		         radeon_get_ib_value(p, idx+1) +
++		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
++
++		ib[idx+1] = offset;
++		ib[idx+2] = upper_32_bits(offset) & 0xff;
++
+ 		r = evergreen_cs_track_check(p);
+ 		if (r) {
+ 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ 			return r;
+ 		}
+ 		break;
++	}
+ 	case PACKET3_DRAW_INDEX_AUTO:
+ 		if (pkt->count != 1) {
+ 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+@@ -1231,13 +2176,20 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 		}
+ 		/* bit 4 is reg (0) or mem (1) */
+ 		if (idx_value & 0x10) {
++			uint64_t offset;
++
+ 			r = evergreen_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+ 				DRM_ERROR("bad WAIT_REG_MEM\n");
+ 				return -EINVAL;
+ 			}
+-			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++			offset = reloc->lobj.gpu_offset +
++			         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
++			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
++
++			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
++			ib[idx+2] = upper_32_bits(offset) & 0xff;
+ 		}
+ 		break;
+ 	case PACKET3_SURFACE_SYNC:
+@@ -1262,16 +2214,25 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 			return -EINVAL;
+ 		}
+ 		if (pkt->count) {
++			uint64_t offset;
++
+ 			r = evergreen_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+ 				DRM_ERROR("bad EVENT_WRITE\n");
+ 				return -EINVAL;
+ 			}
+-			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++			offset = reloc->lobj.gpu_offset +
++			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
++			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
++
++			ib[idx+1] = offset & 0xfffffff8;
++			ib[idx+2] = upper_32_bits(offset) & 0xff;
+ 		}
+ 		break;
+ 	case PACKET3_EVENT_WRITE_EOP:
++	{
++		uint64_t offset;
++
+ 		if (pkt->count != 4) {
+ 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ 			return -EINVAL;
+@@ -1281,10 +2242,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ 			return -EINVAL;
+ 		}
+-		ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-		ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++		offset = reloc->lobj.gpu_offset +
++		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
++		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
++
++		ib[idx+1] = offset & 0xfffffffc;
++		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ 		break;
++	}
+ 	case PACKET3_EVENT_WRITE_EOS:
++	{
++		uint64_t offset;
++
+ 		if (pkt->count != 3) {
+ 			DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ 			return -EINVAL;
+@@ -1294,9 +2264,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 			DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ 			return -EINVAL;
+ 		}
+-		ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-		ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++		offset = reloc->lobj.gpu_offset +
++		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
++		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
++
++		ib[idx+1] = offset & 0xfffffffc;
++		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ 		break;
++	}
+ 	case PACKET3_SET_CONFIG_REG:
+ 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+ 		end_reg = 4 * pkt->count + start_reg - 4;
+@@ -1344,6 +2320,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 		}
+ 		for (i = 0; i < (pkt->count / 8); i++) {
+ 			struct radeon_bo *texture, *mipmap;
++			u32 toffset, moffset;
+ 			u32 size, offset;
+ 
+ 			switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
+@@ -1354,32 +2331,42 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 					DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ 					return -EINVAL;
+ 				}
+-				ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+-				if (!p->keep_tiling_flags) {
++				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ 					ib[idx+1+(i*8)+1] |=
+ 						TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ 					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+-						ib[idx+1+(i*8)+6] |=
+-							TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
++						unsigned bankw, bankh, mtaspect, tile_split;
++
++						evergreen_tiling_fields(reloc->lobj.tiling_flags,
++									&bankw, &bankh, &mtaspect,
++									&tile_split);
++						ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
+ 						ib[idx+1+(i*8)+7] |=
++							TEX_BANK_WIDTH(bankw) |
++							TEX_BANK_HEIGHT(bankh) |
++							MACRO_TILE_ASPECT(mtaspect) |
+ 							TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ 					}
+ 				}
+ 				texture = reloc->robj;
++				toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 				/* tex mip base */
+ 				r = evergreen_cs_packet_next_reloc(p, &reloc);
+ 				if (r) {
+ 					DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ 					return -EINVAL;
+ 				}
+-				ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
++				moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 				mipmap = reloc->robj;
+-				r = evergreen_check_texture_resource(p,  idx+1+(i*8),
+-						texture, mipmap);
++				r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
+ 				if (r)
+ 					return r;
++				ib[idx+1+(i*8)+2] += toffset;
++				ib[idx+1+(i*8)+3] += moffset;
+ 				break;
+ 			case SQ_TEX_VTX_VALID_BUFFER:
++			{
++				uint64_t offset64;
+ 				/* vtx base */
+ 				r = evergreen_cs_packet_next_reloc(p, &reloc);
+ 				if (r) {
+@@ -1391,11 +2378,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+ 					/* force size to size of the buffer */
+ 					dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+-					ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj);
++					ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
+ 				}
+-				ib[idx+1+(i*8)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
+-				ib[idx+1+(i*8)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++				offset64 = reloc->lobj.gpu_offset + offset;
++				ib[idx+1+(i*8)+0] = offset64;
++				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
++						    (upper_32_bits(offset64) & 0xff);
+ 				break;
++			}
+ 			case SQ_TEX_VTX_INVALID_TEXTURE:
+ 			case SQ_TEX_VTX_INVALID_BUFFER:
+ 			default:
+@@ -1451,6 +2442,104 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
+ 			return -EINVAL;
+ 		}
+ 		break;
++	case PACKET3_STRMOUT_BUFFER_UPDATE:
++		if (pkt->count != 4) {
++			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
++			return -EINVAL;
++		}
++		/* Updating memory at DST_ADDRESS. */
++		if (idx_value & 0x1) {
++			u64 offset;
++			r = evergreen_cs_packet_next_reloc(p, &reloc);
++			if (r) {
++				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
++				return -EINVAL;
++			}
++			offset = radeon_get_ib_value(p, idx+1);
++			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
++			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
++				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
++					  offset + 4, radeon_bo_size(reloc->robj));
++				return -EINVAL;
++			}
++			offset += reloc->lobj.gpu_offset;
++			ib[idx+1] = offset;
++			ib[idx+2] = upper_32_bits(offset) & 0xff;
++		}
++		/* Reading data from SRC_ADDRESS. */
++		if (((idx_value >> 1) & 0x3) == 2) {
++			u64 offset;
++			r = evergreen_cs_packet_next_reloc(p, &reloc);
++			if (r) {
++				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
++				return -EINVAL;
++			}
++			offset = radeon_get_ib_value(p, idx+3);
++			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
++			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
++				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
++					  offset + 4, radeon_bo_size(reloc->robj));
++				return -EINVAL;
++			}
++			offset += reloc->lobj.gpu_offset;
++			ib[idx+3] = offset;
++			ib[idx+4] = upper_32_bits(offset) & 0xff;
++		}
++		break;
++	case PACKET3_COPY_DW:
++		if (pkt->count != 4) {
++			DRM_ERROR("bad COPY_DW (invalid count)\n");
++			return -EINVAL;
++		}
++		if (idx_value & 0x1) {
++			u64 offset;
++			/* SRC is memory. */
++			r = evergreen_cs_packet_next_reloc(p, &reloc);
++			if (r) {
++				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
++				return -EINVAL;
++			}
++			offset = radeon_get_ib_value(p, idx+1);
++			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
++			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
++				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
++					  offset + 4, radeon_bo_size(reloc->robj));
++				return -EINVAL;
++			}
++			offset += reloc->lobj.gpu_offset;
++			ib[idx+1] = offset;
++			ib[idx+2] = upper_32_bits(offset) & 0xff;
++		} else {
++			/* SRC is a reg. */
++			reg = radeon_get_ib_value(p, idx+1) << 2;
++			if (!evergreen_is_safe_reg(p, reg, idx+1))
++				return -EINVAL;
++		}
++		if (idx_value & 0x2) {
++			u64 offset;
++			/* DST is memory. */
++			r = evergreen_cs_packet_next_reloc(p, &reloc);
++			if (r) {
++				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
++				return -EINVAL;
++			}
++			offset = radeon_get_ib_value(p, idx+3);
++			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
++			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
++				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
++					  offset + 4, radeon_bo_size(reloc->robj));
++				return -EINVAL;
++			}
++			offset += reloc->lobj.gpu_offset;
++			ib[idx+3] = offset;
++			ib[idx+4] = upper_32_bits(offset) & 0xff;
++		} else {
++			/* DST is a reg. */
++			reg = radeon_get_ib_value(p, idx+3) << 2;
++			if (!evergreen_is_safe_reg(p, reg, idx+3))
++				return -EINVAL;
++		}
++		break;
+ 	case PACKET3_NOP:
+ 		break;
+ 	default:
+@@ -1572,3 +2661,247 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
+ 	return 0;
+ }
+ 
++/* vm parser */
++static bool evergreen_vm_reg_valid(u32 reg)
++{
++	/* context regs are fine */
++	if (reg >= 0x28000)
++		return true;
++
++	/* check config regs */
++	switch (reg) {
++	case WAIT_UNTIL:
++	case GRBM_GFX_INDEX:
++	case CP_STRMOUT_CNTL:
++	case CP_COHER_CNTL:
++	case CP_COHER_SIZE:
++	case VGT_VTX_VECT_EJECT_REG:
++	case VGT_CACHE_INVALIDATION:
++	case VGT_GS_VERTEX_REUSE:
++	case VGT_PRIMITIVE_TYPE:
++	case VGT_INDEX_TYPE:
++	case VGT_NUM_INDICES:
++	case VGT_NUM_INSTANCES:
++	case VGT_COMPUTE_DIM_X:
++	case VGT_COMPUTE_DIM_Y:
++	case VGT_COMPUTE_DIM_Z:
++	case VGT_COMPUTE_START_X:
++	case VGT_COMPUTE_START_Y:
++	case VGT_COMPUTE_START_Z:
++	case VGT_COMPUTE_INDEX:
++	case VGT_COMPUTE_THREAD_GROUP_SIZE:
++	case VGT_HS_OFFCHIP_PARAM:
++	case PA_CL_ENHANCE:
++	case PA_SU_LINE_STIPPLE_VALUE:
++	case PA_SC_LINE_STIPPLE_STATE:
++	case PA_SC_ENHANCE:
++	case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
++	case SQ_DYN_GPR_SIMD_LOCK_EN:
++	case SQ_CONFIG:
++	case SQ_GPR_RESOURCE_MGMT_1:
++	case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
++	case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
++	case SQ_CONST_MEM_BASE:
++	case SQ_STATIC_THREAD_MGMT_1:
++	case SQ_STATIC_THREAD_MGMT_2:
++	case SQ_STATIC_THREAD_MGMT_3:
++	case SPI_CONFIG_CNTL:
++	case SPI_CONFIG_CNTL_1:
++	case TA_CNTL_AUX:
++	case DB_DEBUG:
++	case DB_DEBUG2:
++	case DB_DEBUG3:
++	case DB_DEBUG4:
++	case DB_WATERMARKS:
++	case TD_PS_BORDER_COLOR_INDEX:
++	case TD_PS_BORDER_COLOR_RED:
++	case TD_PS_BORDER_COLOR_GREEN:
++	case TD_PS_BORDER_COLOR_BLUE:
++	case TD_PS_BORDER_COLOR_ALPHA:
++	case TD_VS_BORDER_COLOR_INDEX:
++	case TD_VS_BORDER_COLOR_RED:
++	case TD_VS_BORDER_COLOR_GREEN:
++	case TD_VS_BORDER_COLOR_BLUE:
++	case TD_VS_BORDER_COLOR_ALPHA:
++	case TD_GS_BORDER_COLOR_INDEX:
++	case TD_GS_BORDER_COLOR_RED:
++	case TD_GS_BORDER_COLOR_GREEN:
++	case TD_GS_BORDER_COLOR_BLUE:
++	case TD_GS_BORDER_COLOR_ALPHA:
++	case TD_HS_BORDER_COLOR_INDEX:
++	case TD_HS_BORDER_COLOR_RED:
++	case TD_HS_BORDER_COLOR_GREEN:
++	case TD_HS_BORDER_COLOR_BLUE:
++	case TD_HS_BORDER_COLOR_ALPHA:
++	case TD_LS_BORDER_COLOR_INDEX:
++	case TD_LS_BORDER_COLOR_RED:
++	case TD_LS_BORDER_COLOR_GREEN:
++	case TD_LS_BORDER_COLOR_BLUE:
++	case TD_LS_BORDER_COLOR_ALPHA:
++	case TD_CS_BORDER_COLOR_INDEX:
++	case TD_CS_BORDER_COLOR_RED:
++	case TD_CS_BORDER_COLOR_GREEN:
++	case TD_CS_BORDER_COLOR_BLUE:
++	case TD_CS_BORDER_COLOR_ALPHA:
++	case SQ_ESGS_RING_SIZE:
++	case SQ_GSVS_RING_SIZE:
++	case SQ_ESTMP_RING_SIZE:
++	case SQ_GSTMP_RING_SIZE:
++	case SQ_HSTMP_RING_SIZE:
++	case SQ_LSTMP_RING_SIZE:
++	case SQ_PSTMP_RING_SIZE:
++	case SQ_VSTMP_RING_SIZE:
++	case SQ_ESGS_RING_ITEMSIZE:
++	case SQ_ESTMP_RING_ITEMSIZE:
++	case SQ_GSTMP_RING_ITEMSIZE:
++	case SQ_GSVS_RING_ITEMSIZE:
++	case SQ_GS_VERT_ITEMSIZE:
++	case SQ_GS_VERT_ITEMSIZE_1:
++	case SQ_GS_VERT_ITEMSIZE_2:
++	case SQ_GS_VERT_ITEMSIZE_3:
++	case SQ_GSVS_RING_OFFSET_1:
++	case SQ_GSVS_RING_OFFSET_2:
++	case SQ_GSVS_RING_OFFSET_3:
++	case SQ_HSTMP_RING_ITEMSIZE:
++	case SQ_LSTMP_RING_ITEMSIZE:
++	case SQ_PSTMP_RING_ITEMSIZE:
++	case SQ_VSTMP_RING_ITEMSIZE:
++	case VGT_TF_RING_SIZE:
++	case SQ_ESGS_RING_BASE:
++	case SQ_GSVS_RING_BASE:
++	case SQ_ESTMP_RING_BASE:
++	case SQ_GSTMP_RING_BASE:
++	case SQ_HSTMP_RING_BASE:
++	case SQ_LSTMP_RING_BASE:
++	case SQ_PSTMP_RING_BASE:
++	case SQ_VSTMP_RING_BASE:
++	case CAYMAN_VGT_OFFCHIP_LDS_BASE:
++	case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
++		return true;
++	default:
++		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
++		return false;
++	}
++}
++
++static int evergreen_vm_packet3_check(struct radeon_device *rdev,
++				      u32 *ib, struct radeon_cs_packet *pkt)
++{
++	u32 idx = pkt->idx + 1;
++	u32 idx_value = ib[idx];
++	u32 start_reg, end_reg, reg, i;
++
++	switch (pkt->opcode) {
++	case PACKET3_NOP:
++	case PACKET3_SET_BASE:
++	case PACKET3_CLEAR_STATE:
++	case PACKET3_INDEX_BUFFER_SIZE:
++	case PACKET3_DISPATCH_DIRECT:
++	case PACKET3_DISPATCH_INDIRECT:
++	case PACKET3_MODE_CONTROL:
++	case PACKET3_SET_PREDICATION:
++	case PACKET3_COND_EXEC:
++	case PACKET3_PRED_EXEC:
++	case PACKET3_DRAW_INDIRECT:
++	case PACKET3_DRAW_INDEX_INDIRECT:
++	case PACKET3_INDEX_BASE:
++	case PACKET3_DRAW_INDEX_2:
++	case PACKET3_CONTEXT_CONTROL:
++	case PACKET3_DRAW_INDEX_OFFSET:
++	case PACKET3_INDEX_TYPE:
++	case PACKET3_DRAW_INDEX:
++	case PACKET3_DRAW_INDEX_AUTO:
++	case PACKET3_DRAW_INDEX_IMMD:
++	case PACKET3_NUM_INSTANCES:
++	case PACKET3_DRAW_INDEX_MULTI_AUTO:
++	case PACKET3_STRMOUT_BUFFER_UPDATE:
++	case PACKET3_DRAW_INDEX_OFFSET_2:
++	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
++	case PACKET3_MPEG_INDEX:
++	case PACKET3_WAIT_REG_MEM:
++	case PACKET3_MEM_WRITE:
++	case PACKET3_SURFACE_SYNC:
++	case PACKET3_EVENT_WRITE:
++	case PACKET3_EVENT_WRITE_EOP:
++	case PACKET3_EVENT_WRITE_EOS:
++	case PACKET3_SET_CONTEXT_REG:
++	case PACKET3_SET_BOOL_CONST:
++	case PACKET3_SET_LOOP_CONST:
++	case PACKET3_SET_RESOURCE:
++	case PACKET3_SET_SAMPLER:
++	case PACKET3_SET_CTL_CONST:
++	case PACKET3_SET_RESOURCE_OFFSET:
++	case PACKET3_SET_CONTEXT_REG_INDIRECT:
++	case PACKET3_SET_RESOURCE_INDIRECT:
++	case CAYMAN_PACKET3_DEALLOC_STATE:
++		break;
++	case PACKET3_COND_WRITE:
++		if (idx_value & 0x100) {
++			reg = ib[idx + 5] * 4;
++			if (!evergreen_vm_reg_valid(reg))
++				return -EINVAL;
++		}
++		break;
++	case PACKET3_COPY_DW:
++		if (idx_value & 0x2) {
++			reg = ib[idx + 3] * 4;
++			if (!evergreen_vm_reg_valid(reg))
++				return -EINVAL;
++		}
++		break;
++	case PACKET3_SET_CONFIG_REG:
++		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
++		end_reg = 4 * pkt->count + start_reg - 4;
++		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
++		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
++		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
++			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
++			return -EINVAL;
++		}
++		for (i = 0; i < pkt->count; i++) {
++			reg = start_reg + (4 * i);
++			if (!evergreen_vm_reg_valid(reg))
++				return -EINVAL;
++		}
++		break;
++	default:
++		return -EINVAL;
++	}
++	return 0;
++}
++
++int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
++{
++	int ret = 0;
++	u32 idx = 0;
++	struct radeon_cs_packet pkt;
++
++	do {
++		pkt.idx = idx;
++		pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
++		pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
++		pkt.one_reg_wr = 0;
++		switch (pkt.type) {
++		case PACKET_TYPE0:
++			dev_err(rdev->dev, "Packet0 not allowed!\n");
++			ret = -EINVAL;
++			break;
++		case PACKET_TYPE2:
++			idx += 1;
++			break;
++		case PACKET_TYPE3:
++			pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
++			ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
++			idx += pkt.count + 2;
++			break;
++		default:
++			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
++			ret = -EINVAL;
++			break;
++		}
++		if (ret)
++			break;
++	} while (idx < ib->length_dw);
++
++	return ret;
++}
+diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
+index e022776..34a0e85 100644
+--- a/drivers/gpu/drm/radeon/evergreen_reg.h
++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
+@@ -35,6 +35,14 @@
+ #define EVERGREEN_P1PLL_SS_CNTL                         0x414
+ #define EVERGREEN_P2PLL_SS_CNTL                         0x454
+ #       define EVERGREEN_PxPLL_SS_EN                    (1 << 12)
++
++#define EVERGREEN_AUDIO_PLL1_MUL			0x5b0
++#define EVERGREEN_AUDIO_PLL1_DIV			0x5b4
++#define EVERGREEN_AUDIO_PLL1_UNK			0x5bc
++
++#define EVERGREEN_AUDIO_ENABLE				0x5e78
++#define EVERGREEN_AUDIO_VENDOR_ID			0x5ec0
++
+ /* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
+ #define EVERGREEN_GRPH_ENABLE                           0x6800
+ #define EVERGREEN_GRPH_CONTROL                          0x6804
+@@ -223,4 +231,9 @@
+ #define EVERGREEN_DC_GPIO_HPD_EN                        0x64b8
+ #define EVERGREEN_DC_GPIO_HPD_Y                         0x64bc
+ 
++/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
++#define EVERGREEN_HDMI_BASE				0x7030
++
++#define EVERGREEN_HDMI_CONFIG_OFFSET			0xf0
++
+ #endif
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index fe44a95..81e744f 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -81,6 +81,11 @@
+ #define		FB_READ_EN					(1 << 0)
+ #define		FB_WRITE_EN					(1 << 1)
+ 
++#define	CP_STRMOUT_CNTL					0x84FC
++
++#define	CP_COHER_CNTL					0x85F0
++#define	CP_COHER_SIZE					0x85F4
++#define	CP_COHER_BASE					0x85F8
+ #define CP_ME_CNTL					0x86D8
+ #define		CP_ME_HALT					(1 << 28)
+ #define		CP_PFP_HALT					(1 << 26)
+@@ -112,6 +117,7 @@
+ #define	CP_RB_WPTR_ADDR_HI				0xC11C
+ #define	CP_RB_WPTR_DELAY				0x8704
+ #define	CP_SEM_WAIT_TIMER				0x85BC
++#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
+ #define	CP_DEBUG					0xC1FC
+ 
+ 
+@@ -250,6 +256,7 @@
+ #define	PA_CL_ENHANCE					0x8A14
+ #define		CLIP_VTX_REORDER_ENA				(1 << 0)
+ #define		NUM_CLIP_SEQ(x)					((x) << 1)
++#define	PA_SC_ENHANCE					0x8BF0
+ #define PA_SC_AA_CONFIG					0x28C04
+ #define         MSAA_NUM_SAMPLES_SHIFT                  0
+ #define         MSAA_NUM_SAMPLES_MASK                   0x3
+@@ -328,6 +335,8 @@
+ #define	SQ_GPR_RESOURCE_MGMT_3				0x8C0C
+ #define		NUM_HS_GPRS(x)					((x) << 0)
+ #define		NUM_LS_GPRS(x)					((x) << 16)
++#define	SQ_GLOBAL_GPR_RESOURCE_MGMT_1			0x8C10
++#define	SQ_GLOBAL_GPR_RESOURCE_MGMT_2			0x8C14
+ #define	SQ_THREAD_RESOURCE_MGMT				0x8C18
+ #define		NUM_PS_THREADS(x)				((x) << 0)
+ #define		NUM_VS_THREADS(x)				((x) << 8)
+@@ -346,6 +355,10 @@
+ #define		NUM_HS_STACK_ENTRIES(x)				((x) << 0)
+ #define		NUM_LS_STACK_ENTRIES(x)				((x) << 16)
+ #define	SQ_DYN_GPR_CNTL_PS_FLUSH_REQ    		0x8D8C
++#define	SQ_DYN_GPR_SIMD_LOCK_EN    			0x8D94
++#define	SQ_STATIC_THREAD_MGMT_1    			0x8E20
++#define	SQ_STATIC_THREAD_MGMT_2    			0x8E24
++#define	SQ_STATIC_THREAD_MGMT_3    			0x8E28
+ #define	SQ_LDS_RESOURCE_MGMT    			0x8E2C
+ 
+ #define	SQ_MS_FIFO_SIZES				0x8CF0
+@@ -700,6 +713,7 @@
+ #define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
+ #define	PACKET3_MEM_SEMAPHORE				0x39
+ #define	PACKET3_MPEG_INDEX				0x3A
++#define	PACKET3_COPY_DW					0x3B
+ #define	PACKET3_WAIT_REG_MEM				0x3C
+ #define	PACKET3_MEM_WRITE				0x3D
+ #define	PACKET3_INDIRECT_BUFFER				0x32
+@@ -777,6 +791,8 @@
+ #define			SQ_TEX_VTX_VALID_TEXTURE			0x2
+ #define			SQ_TEX_VTX_VALID_BUFFER				0x3
+ 
++#define VGT_VTX_VECT_EJECT_REG				0x88b0
++
+ #define SQ_CONST_MEM_BASE				0x8df8
+ 
+ #define SQ_ESGS_RING_BASE				0x8c40
+@@ -901,23 +917,160 @@
+ #define PA_SC_SCREEN_SCISSOR_TL                         0x28030
+ #define PA_SC_GENERIC_SCISSOR_TL                        0x28240
+ #define PA_SC_WINDOW_SCISSOR_TL                         0x28204
+-#define VGT_PRIMITIVE_TYPE                              0x8958
+ 
++#define VGT_PRIMITIVE_TYPE                              0x8958
++#define VGT_INDEX_TYPE                                  0x895C
++
++#define VGT_NUM_INDICES                                 0x8970
++
++#define VGT_COMPUTE_DIM_X                               0x8990
++#define VGT_COMPUTE_DIM_Y                               0x8994
++#define VGT_COMPUTE_DIM_Z                               0x8998
++#define VGT_COMPUTE_START_X                             0x899C
++#define VGT_COMPUTE_START_Y                             0x89A0
++#define VGT_COMPUTE_START_Z                             0x89A4
++#define VGT_COMPUTE_INDEX                               0x89A8
++#define VGT_COMPUTE_THREAD_GROUP_SIZE                   0x89AC
++#define VGT_HS_OFFCHIP_PARAM                            0x89B0
++
++#define DB_DEBUG					0x9830
++#define DB_DEBUG2					0x9834
++#define DB_DEBUG3					0x9838
++#define DB_DEBUG4					0x983C
++#define DB_WATERMARKS					0x9854
+ #define DB_DEPTH_CONTROL				0x28800
++#define R_028800_DB_DEPTH_CONTROL                    0x028800
++#define   S_028800_STENCIL_ENABLE(x)                   (((x) & 0x1) << 0)
++#define   G_028800_STENCIL_ENABLE(x)                   (((x) >> 0) & 0x1)
++#define   C_028800_STENCIL_ENABLE                      0xFFFFFFFE
++#define   S_028800_Z_ENABLE(x)                         (((x) & 0x1) << 1)
++#define   G_028800_Z_ENABLE(x)                         (((x) >> 1) & 0x1)
++#define   C_028800_Z_ENABLE                            0xFFFFFFFD
++#define   S_028800_Z_WRITE_ENABLE(x)                   (((x) & 0x1) << 2)
++#define   G_028800_Z_WRITE_ENABLE(x)                   (((x) >> 2) & 0x1)
++#define   C_028800_Z_WRITE_ENABLE                      0xFFFFFFFB
++#define   S_028800_ZFUNC(x)                            (((x) & 0x7) << 4)
++#define   G_028800_ZFUNC(x)                            (((x) >> 4) & 0x7)
++#define   C_028800_ZFUNC                               0xFFFFFF8F
++#define   S_028800_BACKFACE_ENABLE(x)                  (((x) & 0x1) << 7)
++#define   G_028800_BACKFACE_ENABLE(x)                  (((x) >> 7) & 0x1)
++#define   C_028800_BACKFACE_ENABLE                     0xFFFFFF7F
++#define   S_028800_STENCILFUNC(x)                      (((x) & 0x7) << 8)
++#define   G_028800_STENCILFUNC(x)                      (((x) >> 8) & 0x7)
++#define   C_028800_STENCILFUNC                         0xFFFFF8FF
++#define     V_028800_STENCILFUNC_NEVER                 0x00000000
++#define     V_028800_STENCILFUNC_LESS                  0x00000001
++#define     V_028800_STENCILFUNC_EQUAL                 0x00000002
++#define     V_028800_STENCILFUNC_LEQUAL                0x00000003
++#define     V_028800_STENCILFUNC_GREATER               0x00000004
++#define     V_028800_STENCILFUNC_NOTEQUAL              0x00000005
++#define     V_028800_STENCILFUNC_GEQUAL                0x00000006
++#define     V_028800_STENCILFUNC_ALWAYS                0x00000007
++#define   S_028800_STENCILFAIL(x)                      (((x) & 0x7) << 11)
++#define   G_028800_STENCILFAIL(x)                      (((x) >> 11) & 0x7)
++#define   C_028800_STENCILFAIL                         0xFFFFC7FF
++#define     V_028800_STENCIL_KEEP                      0x00000000
++#define     V_028800_STENCIL_ZERO                      0x00000001
++#define     V_028800_STENCIL_REPLACE                   0x00000002
++#define     V_028800_STENCIL_INCR                      0x00000003
++#define     V_028800_STENCIL_DECR                      0x00000004
++#define     V_028800_STENCIL_INVERT                    0x00000005
++#define     V_028800_STENCIL_INCR_WRAP                 0x00000006
++#define     V_028800_STENCIL_DECR_WRAP                 0x00000007
++#define   S_028800_STENCILZPASS(x)                     (((x) & 0x7) << 14)
++#define   G_028800_STENCILZPASS(x)                     (((x) >> 14) & 0x7)
++#define   C_028800_STENCILZPASS                        0xFFFE3FFF
++#define   S_028800_STENCILZFAIL(x)                     (((x) & 0x7) << 17)
++#define   G_028800_STENCILZFAIL(x)                     (((x) >> 17) & 0x7)
++#define   C_028800_STENCILZFAIL                        0xFFF1FFFF
++#define   S_028800_STENCILFUNC_BF(x)                   (((x) & 0x7) << 20)
++#define   G_028800_STENCILFUNC_BF(x)                   (((x) >> 20) & 0x7)
++#define   C_028800_STENCILFUNC_BF                      0xFF8FFFFF
++#define   S_028800_STENCILFAIL_BF(x)                   (((x) & 0x7) << 23)
++#define   G_028800_STENCILFAIL_BF(x)                   (((x) >> 23) & 0x7)
++#define   C_028800_STENCILFAIL_BF                      0xFC7FFFFF
++#define   S_028800_STENCILZPASS_BF(x)                  (((x) & 0x7) << 26)
++#define   G_028800_STENCILZPASS_BF(x)                  (((x) >> 26) & 0x7)
++#define   C_028800_STENCILZPASS_BF                     0xE3FFFFFF
++#define   S_028800_STENCILZFAIL_BF(x)                  (((x) & 0x7) << 29)
++#define   G_028800_STENCILZFAIL_BF(x)                  (((x) >> 29) & 0x7)
++#define   C_028800_STENCILZFAIL_BF                     0x1FFFFFFF
+ #define DB_DEPTH_VIEW					0x28008
++#define R_028008_DB_DEPTH_VIEW                       0x00028008
++#define   S_028008_SLICE_START(x)                      (((x) & 0x7FF) << 0)
++#define   G_028008_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
++#define   C_028008_SLICE_START                         0xFFFFF800
++#define   S_028008_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
++#define   G_028008_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
++#define   C_028008_SLICE_MAX                           0xFF001FFF
+ #define DB_HTILE_DATA_BASE				0x28014
++#define DB_HTILE_SURFACE				0x28abc
++#define   S_028ABC_HTILE_WIDTH(x)                      (((x) & 0x1) << 0)
++#define   G_028ABC_HTILE_WIDTH(x)                      (((x) >> 0) & 0x1)
++#define   C_028ABC_HTILE_WIDTH                         0xFFFFFFFE
++#define   S_028ABC_HTILE_HEIGHT(x)                      (((x) & 0x1) << 1)
++#define   G_028ABC_HTILE_HEIGHT(x)                      (((x) >> 1) & 0x1)
++#define   C_028ABC_HTILE_HEIGHT                         0xFFFFFFFD
++#define   G_028ABC_LINEAR(x)                           (((x) >> 2) & 0x1)
+ #define DB_Z_INFO					0x28040
+ #       define Z_ARRAY_MODE(x)                          ((x) << 4)
+ #       define DB_TILE_SPLIT(x)                         (((x) & 0x7) << 8)
+ #       define DB_NUM_BANKS(x)                          (((x) & 0x3) << 12)
+ #       define DB_BANK_WIDTH(x)                         (((x) & 0x3) << 16)
+ #       define DB_BANK_HEIGHT(x)                        (((x) & 0x3) << 20)
++#       define DB_MACRO_TILE_ASPECT(x)                  (((x) & 0x3) << 24)
++#define R_028040_DB_Z_INFO                       0x028040
++#define   S_028040_FORMAT(x)                           (((x) & 0x3) << 0)
++#define   G_028040_FORMAT(x)                           (((x) >> 0) & 0x3)
++#define   C_028040_FORMAT                              0xFFFFFFFC
++#define     V_028040_Z_INVALID                     0x00000000
++#define     V_028040_Z_16                          0x00000001
++#define     V_028040_Z_24                          0x00000002
++#define     V_028040_Z_32_FLOAT                    0x00000003
++#define   S_028040_ARRAY_MODE(x)                       (((x) & 0xF) << 4)
++#define   G_028040_ARRAY_MODE(x)                       (((x) >> 4) & 0xF)
++#define   C_028040_ARRAY_MODE                          0xFFFFFF0F
++#define   S_028040_READ_SIZE(x)                        (((x) & 0x1) << 28)
++#define   G_028040_READ_SIZE(x)                        (((x) >> 28) & 0x1)
++#define   C_028040_READ_SIZE                           0xEFFFFFFF
++#define   S_028040_TILE_SURFACE_ENABLE(x)              (((x) & 0x1) << 29)
++#define   G_028040_TILE_SURFACE_ENABLE(x)              (((x) >> 29) & 0x1)
++#define   C_028040_TILE_SURFACE_ENABLE                 0xDFFFFFFF
++#define   S_028040_ZRANGE_PRECISION(x)                 (((x) & 0x1) << 31)
++#define   G_028040_ZRANGE_PRECISION(x)                 (((x) >> 31) & 0x1)
++#define   C_028040_ZRANGE_PRECISION                    0x7FFFFFFF
++#define   S_028040_TILE_SPLIT(x)                       (((x) & 0x7) << 8)
++#define   G_028040_TILE_SPLIT(x)                       (((x) >> 8) & 0x7)
++#define   S_028040_NUM_BANKS(x)                        (((x) & 0x3) << 12)
++#define   G_028040_NUM_BANKS(x)                        (((x) >> 12) & 0x3)
++#define   S_028040_BANK_WIDTH(x)                       (((x) & 0x3) << 16)
++#define   G_028040_BANK_WIDTH(x)                       (((x) >> 16) & 0x3)
++#define   S_028040_BANK_HEIGHT(x)                      (((x) & 0x3) << 20)
++#define   G_028040_BANK_HEIGHT(x)                      (((x) >> 20) & 0x3)
++#define   S_028040_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 24)
++#define   G_028040_MACRO_TILE_ASPECT(x)                (((x) >> 24) & 0x3)
+ #define DB_STENCIL_INFO					0x28044
++#define R_028044_DB_STENCIL_INFO                     0x028044
++#define   S_028044_FORMAT(x)                           (((x) & 0x1) << 0)
++#define   G_028044_FORMAT(x)                           (((x) >> 0) & 0x1)
++#define   C_028044_FORMAT                              0xFFFFFFFE
++#define   G_028044_TILE_SPLIT(x)                       (((x) >> 8) & 0x7)
+ #define DB_Z_READ_BASE					0x28048
+ #define DB_STENCIL_READ_BASE				0x2804c
+ #define DB_Z_WRITE_BASE					0x28050
+ #define DB_STENCIL_WRITE_BASE				0x28054
+ #define DB_DEPTH_SIZE					0x28058
++#define R_028058_DB_DEPTH_SIZE                       0x028058
++#define   S_028058_PITCH_TILE_MAX(x)                   (((x) & 0x7FF) << 0)
++#define   G_028058_PITCH_TILE_MAX(x)                   (((x) >> 0) & 0x7FF)
++#define   C_028058_PITCH_TILE_MAX                      0xFFFFF800
++#define   S_028058_HEIGHT_TILE_MAX(x)                   (((x) & 0x7FF) << 11)
++#define   G_028058_HEIGHT_TILE_MAX(x)                   (((x) >> 11) & 0x7FF)
++#define   C_028058_HEIGHT_TILE_MAX                      0xFFC007FF
++#define R_02805C_DB_DEPTH_SLICE                      0x02805C
++#define   S_02805C_SLICE_TILE_MAX(x)                   (((x) & 0x3FFFFF) << 0)
++#define   G_02805C_SLICE_TILE_MAX(x)                   (((x) >> 0) & 0x3FFFFF)
++#define   C_02805C_SLICE_TILE_MAX                      0xFFC00000
+ 
+ #define SQ_PGM_START_PS					0x28840
+ #define SQ_PGM_START_VS					0x2885c
+@@ -927,6 +1080,14 @@
+ #define SQ_PGM_START_HS					0x288b8
+ #define SQ_PGM_START_LS					0x288d0
+ 
++#define	VGT_STRMOUT_BUFFER_BASE_0			0x28AD8
++#define	VGT_STRMOUT_BUFFER_BASE_1			0x28AE8
++#define	VGT_STRMOUT_BUFFER_BASE_2			0x28AF8
++#define	VGT_STRMOUT_BUFFER_BASE_3			0x28B08
++#define VGT_STRMOUT_BUFFER_SIZE_0			0x28AD0
++#define VGT_STRMOUT_BUFFER_SIZE_1			0x28AE0
++#define VGT_STRMOUT_BUFFER_SIZE_2			0x28AF0
++#define VGT_STRMOUT_BUFFER_SIZE_3			0x28B00
+ #define VGT_STRMOUT_CONFIG				0x28b94
+ #define VGT_STRMOUT_BUFFER_CONFIG			0x28b98
+ 
+@@ -953,6 +1114,114 @@
+ #define	CB_COLOR0_PITCH					0x28c64
+ #define	CB_COLOR0_SLICE					0x28c68
+ #define	CB_COLOR0_VIEW					0x28c6c
++#define R_028C6C_CB_COLOR0_VIEW                      0x00028C6C
++#define   S_028C6C_SLICE_START(x)                      (((x) & 0x7FF) << 0)
++#define   G_028C6C_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
++#define   C_028C6C_SLICE_START                         0xFFFFF800
++#define   S_028C6C_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
++#define   G_028C6C_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
++#define   C_028C6C_SLICE_MAX                           0xFF001FFF
++#define R_028C70_CB_COLOR0_INFO                      0x028C70
++#define   S_028C70_ENDIAN(x)                           (((x) & 0x3) << 0)
++#define   G_028C70_ENDIAN(x)                           (((x) >> 0) & 0x3)
++#define   C_028C70_ENDIAN                              0xFFFFFFFC
++#define   S_028C70_FORMAT(x)                           (((x) & 0x3F) << 2)
++#define   G_028C70_FORMAT(x)                           (((x) >> 2) & 0x3F)
++#define   C_028C70_FORMAT                              0xFFFFFF03
++#define     V_028C70_COLOR_INVALID                     0x00000000
++#define     V_028C70_COLOR_8                           0x00000001
++#define     V_028C70_COLOR_4_4                         0x00000002
++#define     V_028C70_COLOR_3_3_2                       0x00000003
++#define     V_028C70_COLOR_16                          0x00000005
++#define     V_028C70_COLOR_16_FLOAT                    0x00000006
++#define     V_028C70_COLOR_8_8                         0x00000007
++#define     V_028C70_COLOR_5_6_5                       0x00000008
++#define     V_028C70_COLOR_6_5_5                       0x00000009
++#define     V_028C70_COLOR_1_5_5_5                     0x0000000A
++#define     V_028C70_COLOR_4_4_4_4                     0x0000000B
++#define     V_028C70_COLOR_5_5_5_1                     0x0000000C
++#define     V_028C70_COLOR_32                          0x0000000D
++#define     V_028C70_COLOR_32_FLOAT                    0x0000000E
++#define     V_028C70_COLOR_16_16                       0x0000000F
++#define     V_028C70_COLOR_16_16_FLOAT                 0x00000010
++#define     V_028C70_COLOR_8_24                        0x00000011
++#define     V_028C70_COLOR_8_24_FLOAT                  0x00000012
++#define     V_028C70_COLOR_24_8                        0x00000013
++#define     V_028C70_COLOR_24_8_FLOAT                  0x00000014
++#define     V_028C70_COLOR_10_11_11                    0x00000015
++#define     V_028C70_COLOR_10_11_11_FLOAT              0x00000016
++#define     V_028C70_COLOR_11_11_10                    0x00000017
++#define     V_028C70_COLOR_11_11_10_FLOAT              0x00000018
++#define     V_028C70_COLOR_2_10_10_10                  0x00000019
++#define     V_028C70_COLOR_8_8_8_8                     0x0000001A
++#define     V_028C70_COLOR_10_10_10_2                  0x0000001B
++#define     V_028C70_COLOR_X24_8_32_FLOAT              0x0000001C
++#define     V_028C70_COLOR_32_32                       0x0000001D
++#define     V_028C70_COLOR_32_32_FLOAT                 0x0000001E
++#define     V_028C70_COLOR_16_16_16_16                 0x0000001F
++#define     V_028C70_COLOR_16_16_16_16_FLOAT           0x00000020
++#define     V_028C70_COLOR_32_32_32_32                 0x00000022
++#define     V_028C70_COLOR_32_32_32_32_FLOAT           0x00000023
++#define     V_028C70_COLOR_32_32_32_FLOAT              0x00000030
++#define   S_028C70_ARRAY_MODE(x)                       (((x) & 0xF) << 8)
++#define   G_028C70_ARRAY_MODE(x)                       (((x) >> 8) & 0xF)
++#define   C_028C70_ARRAY_MODE                          0xFFFFF0FF
++#define     V_028C70_ARRAY_LINEAR_GENERAL              0x00000000
++#define     V_028C70_ARRAY_LINEAR_ALIGNED              0x00000001
++#define     V_028C70_ARRAY_1D_TILED_THIN1              0x00000002
++#define     V_028C70_ARRAY_2D_TILED_THIN1              0x00000004
++#define   S_028C70_NUMBER_TYPE(x)                      (((x) & 0x7) << 12)
++#define   G_028C70_NUMBER_TYPE(x)                      (((x) >> 12) & 0x7)
++#define   C_028C70_NUMBER_TYPE                         0xFFFF8FFF
++#define     V_028C70_NUMBER_UNORM                      0x00000000
++#define     V_028C70_NUMBER_SNORM                      0x00000001
++#define     V_028C70_NUMBER_USCALED                    0x00000002
++#define     V_028C70_NUMBER_SSCALED                    0x00000003
++#define     V_028C70_NUMBER_UINT                       0x00000004
++#define     V_028C70_NUMBER_SINT                       0x00000005
++#define     V_028C70_NUMBER_SRGB                       0x00000006
++#define     V_028C70_NUMBER_FLOAT                      0x00000007
++#define   S_028C70_COMP_SWAP(x)                        (((x) & 0x3) << 15)
++#define   G_028C70_COMP_SWAP(x)                        (((x) >> 15) & 0x3)
++#define   C_028C70_COMP_SWAP                           0xFFFE7FFF
++#define     V_028C70_SWAP_STD                          0x00000000
++#define     V_028C70_SWAP_ALT                          0x00000001
++#define     V_028C70_SWAP_STD_REV                      0x00000002
++#define     V_028C70_SWAP_ALT_REV                      0x00000003
++#define   S_028C70_FAST_CLEAR(x)                       (((x) & 0x1) << 17)
++#define   G_028C70_FAST_CLEAR(x)                       (((x) >> 17) & 0x1)
++#define   C_028C70_FAST_CLEAR                          0xFFFDFFFF
++#define   S_028C70_COMPRESSION(x)                      (((x) & 0x3) << 18)
++#define   G_028C70_COMPRESSION(x)                      (((x) >> 18) & 0x3)
++#define   C_028C70_COMPRESSION                         0xFFF3FFFF
++#define   S_028C70_BLEND_CLAMP(x)                      (((x) & 0x1) << 19)
++#define   G_028C70_BLEND_CLAMP(x)                      (((x) >> 19) & 0x1)
++#define   C_028C70_BLEND_CLAMP                         0xFFF7FFFF
++#define   S_028C70_BLEND_BYPASS(x)                     (((x) & 0x1) << 20)
++#define   G_028C70_BLEND_BYPASS(x)                     (((x) >> 20) & 0x1)
++#define   C_028C70_BLEND_BYPASS                        0xFFEFFFFF
++#define   S_028C70_SIMPLE_FLOAT(x)                     (((x) & 0x1) << 21)
++#define   G_028C70_SIMPLE_FLOAT(x)                     (((x) >> 21) & 0x1)
++#define   C_028C70_SIMPLE_FLOAT                        0xFFDFFFFF
++#define   S_028C70_ROUND_MODE(x)                       (((x) & 0x1) << 22)
++#define   G_028C70_ROUND_MODE(x)                       (((x) >> 22) & 0x1)
++#define   C_028C70_ROUND_MODE                          0xFFBFFFFF
++#define   S_028C70_TILE_COMPACT(x)                     (((x) & 0x1) << 23)
++#define   G_028C70_TILE_COMPACT(x)                     (((x) >> 23) & 0x1)
++#define   C_028C70_TILE_COMPACT                        0xFF7FFFFF
++#define   S_028C70_SOURCE_FORMAT(x)                    (((x) & 0x3) << 24)
++#define   G_028C70_SOURCE_FORMAT(x)                    (((x) >> 24) & 0x3)
++#define   C_028C70_SOURCE_FORMAT                       0xFCFFFFFF
++#define     V_028C70_EXPORT_4C_32BPC                   0x0
++#define     V_028C70_EXPORT_4C_16BPC                   0x1
++#define     V_028C70_EXPORT_2C_32BPC                   0x2 /* Do not use */
++#define   S_028C70_RAT(x)                              (((x) & 0x1) << 26)
++#define   G_028C70_RAT(x)                              (((x) >> 26) & 0x1)
++#define   C_028C70_RAT                                 0xFBFFFFFF
++#define   S_028C70_RESOURCE_TYPE(x)                    (((x) & 0x7) << 27)
++#define   G_028C70_RESOURCE_TYPE(x)                    (((x) >> 27) & 0x7)
++#define   C_028C70_RESOURCE_TYPE                       0xC7FFFFFF
++
+ #define	CB_COLOR0_INFO					0x28c70
+ #	define CB_FORMAT(x)				((x) << 2)
+ #       define CB_ARRAY_MODE(x)                         ((x) << 8)
+@@ -963,6 +1232,20 @@
+ #	define CB_SOURCE_FORMAT(x)			((x) << 24)
+ #	define CB_SF_EXPORT_FULL			0
+ #	define CB_SF_EXPORT_NORM			1
++#define R_028C74_CB_COLOR0_ATTRIB                      0x028C74
++#define   S_028C74_NON_DISP_TILING_ORDER(x)            (((x) & 0x1) << 4)
++#define   G_028C74_NON_DISP_TILING_ORDER(x)            (((x) >> 4) & 0x1)
++#define   C_028C74_NON_DISP_TILING_ORDER               0xFFFFFFEF
++#define   S_028C74_TILE_SPLIT(x)                       (((x) & 0xf) << 5)
++#define   G_028C74_TILE_SPLIT(x)                       (((x) >> 5) & 0xf)
++#define   S_028C74_NUM_BANKS(x)                        (((x) & 0x3) << 10)
++#define   G_028C74_NUM_BANKS(x)                        (((x) >> 10) & 0x3)
++#define   S_028C74_BANK_WIDTH(x)                       (((x) & 0x3) << 13)
++#define   G_028C74_BANK_WIDTH(x)                       (((x) >> 13) & 0x3)
++#define   S_028C74_BANK_HEIGHT(x)                      (((x) & 0x3) << 16)
++#define   G_028C74_BANK_HEIGHT(x)                      (((x) >> 16) & 0x3)
++#define   S_028C74_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 19)
++#define   G_028C74_MACRO_TILE_ASPECT(x)                (((x) >> 19) & 0x3)
+ #define	CB_COLOR0_ATTRIB				0x28c74
+ #       define CB_TILE_SPLIT(x)                         (((x) & 0x7) << 5)
+ #       define ADDR_SURF_TILE_SPLIT_64B                 0
+@@ -987,6 +1270,7 @@
+ #       define ADDR_SURF_BANK_HEIGHT_2                  1
+ #       define ADDR_SURF_BANK_HEIGHT_4                  2
+ #       define ADDR_SURF_BANK_HEIGHT_8                  3
++#       define CB_MACRO_TILE_ASPECT(x)                  (((x) & 0x3) << 19)
+ #define	CB_COLOR0_DIM					0x28c78
+ /* only CB0-7 blocks have these regs */
+ #define	CB_COLOR0_CMASK					0x28c7c
+@@ -1175,9 +1459,144 @@
+ #define SQ_TEX_RESOURCE_WORD6_0                         0x30018
+ #       define TEX_TILE_SPLIT(x)                        (((x) & 0x7) << 29)
+ #define SQ_TEX_RESOURCE_WORD7_0                         0x3001c
++#       define MACRO_TILE_ASPECT(x)                     (((x) & 0x3) << 6)
+ #       define TEX_BANK_WIDTH(x)                        (((x) & 0x3) << 8)
+ #       define TEX_BANK_HEIGHT(x)                       (((x) & 0x3) << 10)
+ #       define TEX_NUM_BANKS(x)                         (((x) & 0x3) << 16)
++#define R_030000_SQ_TEX_RESOURCE_WORD0_0             0x030000
++#define   S_030000_DIM(x)                              (((x) & 0x7) << 0)
++#define   G_030000_DIM(x)                              (((x) >> 0) & 0x7)
++#define   C_030000_DIM                                 0xFFFFFFF8
++#define     V_030000_SQ_TEX_DIM_1D                     0x00000000
++#define     V_030000_SQ_TEX_DIM_2D                     0x00000001
++#define     V_030000_SQ_TEX_DIM_3D                     0x00000002
++#define     V_030000_SQ_TEX_DIM_CUBEMAP                0x00000003
++#define     V_030000_SQ_TEX_DIM_1D_ARRAY               0x00000004
++#define     V_030000_SQ_TEX_DIM_2D_ARRAY               0x00000005
++#define     V_030000_SQ_TEX_DIM_2D_MSAA                0x00000006
++#define     V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA          0x00000007
++#define   S_030000_NON_DISP_TILING_ORDER(x)            (((x) & 0x1) << 5)
++#define   G_030000_NON_DISP_TILING_ORDER(x)            (((x) >> 5) & 0x1)
++#define   C_030000_NON_DISP_TILING_ORDER               0xFFFFFFDF
++#define   S_030000_PITCH(x)                            (((x) & 0xFFF) << 6)
++#define   G_030000_PITCH(x)                            (((x) >> 6) & 0xFFF)
++#define   C_030000_PITCH                               0xFFFC003F
++#define   S_030000_TEX_WIDTH(x)                        (((x) & 0x3FFF) << 18)
++#define   G_030000_TEX_WIDTH(x)                        (((x) >> 18) & 0x3FFF)
++#define   C_030000_TEX_WIDTH                           0x0003FFFF
++#define R_030004_SQ_TEX_RESOURCE_WORD1_0             0x030004
++#define   S_030004_TEX_HEIGHT(x)                       (((x) & 0x3FFF) << 0)
++#define   G_030004_TEX_HEIGHT(x)                       (((x) >> 0) & 0x3FFF)
++#define   C_030004_TEX_HEIGHT                          0xFFFFC000
++#define   S_030004_TEX_DEPTH(x)                        (((x) & 0x1FFF) << 14)
++#define   G_030004_TEX_DEPTH(x)                        (((x) >> 14) & 0x1FFF)
++#define   C_030004_TEX_DEPTH                           0xF8003FFF
++#define   S_030004_ARRAY_MODE(x)                       (((x) & 0xF) << 28)
++#define   G_030004_ARRAY_MODE(x)                       (((x) >> 28) & 0xF)
++#define   C_030004_ARRAY_MODE                          0x0FFFFFFF
++#define R_030008_SQ_TEX_RESOURCE_WORD2_0             0x030008
++#define   S_030008_BASE_ADDRESS(x)                     (((x) & 0xFFFFFFFF) << 0)
++#define   G_030008_BASE_ADDRESS(x)                     (((x) >> 0) & 0xFFFFFFFF)
++#define   C_030008_BASE_ADDRESS                        0x00000000
++#define R_03000C_SQ_TEX_RESOURCE_WORD3_0             0x03000C
++#define   S_03000C_MIP_ADDRESS(x)                      (((x) & 0xFFFFFFFF) << 0)
++#define   G_03000C_MIP_ADDRESS(x)                      (((x) >> 0) & 0xFFFFFFFF)
++#define   C_03000C_MIP_ADDRESS                         0x00000000
++#define R_030010_SQ_TEX_RESOURCE_WORD4_0             0x030010
++#define   S_030010_FORMAT_COMP_X(x)                    (((x) & 0x3) << 0)
++#define   G_030010_FORMAT_COMP_X(x)                    (((x) >> 0) & 0x3)
++#define   C_030010_FORMAT_COMP_X                       0xFFFFFFFC
++#define     V_030010_SQ_FORMAT_COMP_UNSIGNED           0x00000000
++#define     V_030010_SQ_FORMAT_COMP_SIGNED             0x00000001
++#define     V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED    0x00000002
++#define   S_030010_FORMAT_COMP_Y(x)                    (((x) & 0x3) << 2)
++#define   G_030010_FORMAT_COMP_Y(x)                    (((x) >> 2) & 0x3)
++#define   C_030010_FORMAT_COMP_Y                       0xFFFFFFF3
++#define   S_030010_FORMAT_COMP_Z(x)                    (((x) & 0x3) << 4)
++#define   G_030010_FORMAT_COMP_Z(x)                    (((x) >> 4) & 0x3)
++#define   C_030010_FORMAT_COMP_Z                       0xFFFFFFCF
++#define   S_030010_FORMAT_COMP_W(x)                    (((x) & 0x3) << 6)
++#define   G_030010_FORMAT_COMP_W(x)                    (((x) >> 6) & 0x3)
++#define   C_030010_FORMAT_COMP_W                       0xFFFFFF3F
++#define   S_030010_NUM_FORMAT_ALL(x)                   (((x) & 0x3) << 8)
++#define   G_030010_NUM_FORMAT_ALL(x)                   (((x) >> 8) & 0x3)
++#define   C_030010_NUM_FORMAT_ALL                      0xFFFFFCFF
++#define     V_030010_SQ_NUM_FORMAT_NORM                0x00000000
++#define     V_030010_SQ_NUM_FORMAT_INT                 0x00000001
++#define     V_030010_SQ_NUM_FORMAT_SCALED              0x00000002
++#define   S_030010_SRF_MODE_ALL(x)                     (((x) & 0x1) << 10)
++#define   G_030010_SRF_MODE_ALL(x)                     (((x) >> 10) & 0x1)
++#define   C_030010_SRF_MODE_ALL                        0xFFFFFBFF
++#define     V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE     0x00000000
++#define     V_030010_SRF_MODE_NO_ZERO                  0x00000001
++#define   S_030010_FORCE_DEGAMMA(x)                    (((x) & 0x1) << 11)
++#define   G_030010_FORCE_DEGAMMA(x)                    (((x) >> 11) & 0x1)
++#define   C_030010_FORCE_DEGAMMA                       0xFFFFF7FF
++#define   S_030010_ENDIAN_SWAP(x)                      (((x) & 0x3) << 12)
++#define   G_030010_ENDIAN_SWAP(x)                      (((x) >> 12) & 0x3)
++#define   C_030010_ENDIAN_SWAP                         0xFFFFCFFF
++#define   S_030010_DST_SEL_X(x)                        (((x) & 0x7) << 16)
++#define   G_030010_DST_SEL_X(x)                        (((x) >> 16) & 0x7)
++#define   C_030010_DST_SEL_X                           0xFFF8FFFF
++#define     V_030010_SQ_SEL_X                          0x00000000
++#define     V_030010_SQ_SEL_Y                          0x00000001
++#define     V_030010_SQ_SEL_Z                          0x00000002
++#define     V_030010_SQ_SEL_W                          0x00000003
++#define     V_030010_SQ_SEL_0                          0x00000004
++#define     V_030010_SQ_SEL_1                          0x00000005
++#define   S_030010_DST_SEL_Y(x)                        (((x) & 0x7) << 19)
++#define   G_030010_DST_SEL_Y(x)                        (((x) >> 19) & 0x7)
++#define   C_030010_DST_SEL_Y                           0xFFC7FFFF
++#define   S_030010_DST_SEL_Z(x)                        (((x) & 0x7) << 22)
++#define   G_030010_DST_SEL_Z(x)                        (((x) >> 22) & 0x7)
++#define   C_030010_DST_SEL_Z                           0xFE3FFFFF
++#define   S_030010_DST_SEL_W(x)                        (((x) & 0x7) << 25)
++#define   G_030010_DST_SEL_W(x)                        (((x) >> 25) & 0x7)
++#define   C_030010_DST_SEL_W                           0xF1FFFFFF
++#define   S_030010_BASE_LEVEL(x)                       (((x) & 0xF) << 28)
++#define   G_030010_BASE_LEVEL(x)                       (((x) >> 28) & 0xF)
++#define   C_030010_BASE_LEVEL                          0x0FFFFFFF
++#define R_030014_SQ_TEX_RESOURCE_WORD5_0             0x030014
++#define   S_030014_LAST_LEVEL(x)                       (((x) & 0xF) << 0)
++#define   G_030014_LAST_LEVEL(x)                       (((x) >> 0) & 0xF)
++#define   C_030014_LAST_LEVEL                          0xFFFFFFF0
++#define   S_030014_BASE_ARRAY(x)                       (((x) & 0x1FFF) << 4)
++#define   G_030014_BASE_ARRAY(x)                       (((x) >> 4) & 0x1FFF)
++#define   C_030014_BASE_ARRAY                          0xFFFE000F
++#define   S_030014_LAST_ARRAY(x)                       (((x) & 0x1FFF) << 17)
++#define   G_030014_LAST_ARRAY(x)                       (((x) >> 17) & 0x1FFF)
++#define   C_030014_LAST_ARRAY                          0xC001FFFF
++#define R_030018_SQ_TEX_RESOURCE_WORD6_0             0x030018
++#define   S_030018_MAX_ANISO(x)                        (((x) & 0x7) << 0)
++#define   G_030018_MAX_ANISO(x)                        (((x) >> 0) & 0x7)
++#define   C_030018_MAX_ANISO                           0xFFFFFFF8
++#define   S_030018_PERF_MODULATION(x)                  (((x) & 0x7) << 3)
++#define   G_030018_PERF_MODULATION(x)                  (((x) >> 3) & 0x7)
++#define   C_030018_PERF_MODULATION                     0xFFFFFFC7
++#define   S_030018_INTERLACED(x)                       (((x) & 0x1) << 6)
++#define   G_030018_INTERLACED(x)                       (((x) >> 6) & 0x1)
++#define   C_030018_INTERLACED                          0xFFFFFFBF
++#define   S_030018_TILE_SPLIT(x)                       (((x) & 0x7) << 29)
++#define   G_030018_TILE_SPLIT(x)                       (((x) >> 29) & 0x7)
++#define R_03001C_SQ_TEX_RESOURCE_WORD7_0             0x03001C
++#define   S_03001C_MACRO_TILE_ASPECT(x)                (((x) & 0x3) << 6)
++#define   G_03001C_MACRO_TILE_ASPECT(x)                (((x) >> 6) & 0x3)
++#define   S_03001C_BANK_WIDTH(x)                       (((x) & 0x3) << 8)
++#define   G_03001C_BANK_WIDTH(x)                       (((x) >> 8) & 0x3)
++#define   S_03001C_BANK_HEIGHT(x)                      (((x) & 0x3) << 10)
++#define   G_03001C_BANK_HEIGHT(x)                      (((x) >> 10) & 0x3)
++#define   S_03001C_NUM_BANKS(x)                        (((x) & 0x3) << 16)
++#define   G_03001C_NUM_BANKS(x)                        (((x) >> 16) & 0x3)
++#define   S_03001C_TYPE(x)                             (((x) & 0x3) << 30)
++#define   G_03001C_TYPE(x)                             (((x) >> 30) & 0x3)
++#define   C_03001C_TYPE                                0x3FFFFFFF
++#define     V_03001C_SQ_TEX_VTX_INVALID_TEXTURE        0x00000000
++#define     V_03001C_SQ_TEX_VTX_INVALID_BUFFER         0x00000001
++#define     V_03001C_SQ_TEX_VTX_VALID_TEXTURE          0x00000002
++#define     V_03001C_SQ_TEX_VTX_VALID_BUFFER           0x00000003
++#define   S_03001C_DATA_FORMAT(x)                      (((x) & 0x3F) << 0)
++#define   G_03001C_DATA_FORMAT(x)                      (((x) >> 0) & 0x3F)
++#define   C_03001C_DATA_FORMAT                         0xFFFFFFC0
+ 
+ #define SQ_VTX_CONSTANT_WORD0_0				0x30000
+ #define SQ_VTX_CONSTANT_WORD1_0				0x30004
+@@ -1198,8 +1617,40 @@
+ #define SQ_VTX_CONSTANT_WORD6_0                         0x30018
+ #define SQ_VTX_CONSTANT_WORD7_0                         0x3001c
+ 
++#define TD_PS_BORDER_COLOR_INDEX                        0xA400
++#define TD_PS_BORDER_COLOR_RED                          0xA404
++#define TD_PS_BORDER_COLOR_GREEN                        0xA408
++#define TD_PS_BORDER_COLOR_BLUE                         0xA40C
++#define TD_PS_BORDER_COLOR_ALPHA                        0xA410
++#define TD_VS_BORDER_COLOR_INDEX                        0xA414
++#define TD_VS_BORDER_COLOR_RED                          0xA418
++#define TD_VS_BORDER_COLOR_GREEN                        0xA41C
++#define TD_VS_BORDER_COLOR_BLUE                         0xA420
++#define TD_VS_BORDER_COLOR_ALPHA                        0xA424
++#define TD_GS_BORDER_COLOR_INDEX                        0xA428
++#define TD_GS_BORDER_COLOR_RED                          0xA42C
++#define TD_GS_BORDER_COLOR_GREEN                        0xA430
++#define TD_GS_BORDER_COLOR_BLUE                         0xA434
++#define TD_GS_BORDER_COLOR_ALPHA                        0xA438
++#define TD_HS_BORDER_COLOR_INDEX                        0xA43C
++#define TD_HS_BORDER_COLOR_RED                          0xA440
++#define TD_HS_BORDER_COLOR_GREEN                        0xA444
++#define TD_HS_BORDER_COLOR_BLUE                         0xA448
++#define TD_HS_BORDER_COLOR_ALPHA                        0xA44C
++#define TD_LS_BORDER_COLOR_INDEX                        0xA450
++#define TD_LS_BORDER_COLOR_RED                          0xA454
++#define TD_LS_BORDER_COLOR_GREEN                        0xA458
++#define TD_LS_BORDER_COLOR_BLUE                         0xA45C
++#define TD_LS_BORDER_COLOR_ALPHA                        0xA460
++#define TD_CS_BORDER_COLOR_INDEX                        0xA464
++#define TD_CS_BORDER_COLOR_RED                          0xA468
++#define TD_CS_BORDER_COLOR_GREEN                        0xA46C
++#define TD_CS_BORDER_COLOR_BLUE                         0xA470
++#define TD_CS_BORDER_COLOR_ALPHA                        0xA474
++
+ /* cayman 3D regs */
+-#define CAYMAN_VGT_OFFCHIP_LDS_BASE			0x89B0
++#define CAYMAN_VGT_OFFCHIP_LDS_BASE			0x89B4
++#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS			0x8E48
+ #define CAYMAN_DB_EQAA					0x28804
+ #define CAYMAN_DB_DEPTH_INFO				0x2803C
+ #define CAYMAN_PA_SC_AA_CONFIG				0x28BE0
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 636255b..9934c9d 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -42,6 +42,8 @@ extern void evergreen_irq_suspend(struct radeon_device *rdev);
+ extern int evergreen_mc_init(struct radeon_device *rdev);
+ extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
+ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
++extern void si_rlc_fini(struct radeon_device *rdev);
++extern int si_rlc_init(struct radeon_device *rdev);
+ 
+ #define EVERGREEN_PFP_UCODE_SIZE 1120
+ #define EVERGREEN_PM4_UCODE_SIZE 1376
+@@ -53,6 +55,8 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+ #define CAYMAN_RLC_UCODE_SIZE 1024
+ #define CAYMAN_MC_UCODE_SIZE 6037
+ 
++#define ARUBA_RLC_UCODE_SIZE 1536
++
+ /* Firmware Names */
+ MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
+ MODULE_FIRMWARE("radeon/BARTS_me.bin");
+@@ -68,6 +72,9 @@ MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
+ MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
+ MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
+ MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
++MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
++MODULE_FIRMWARE("radeon/ARUBA_me.bin");
++MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
+ 
+ #define BTC_IO_MC_REGS_SIZE 29
+ 
+@@ -326,6 +333,15 @@ int ni_init_microcode(struct radeon_device *rdev)
+ 		rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
+ 		mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
+ 		break;
++	case CHIP_ARUBA:
++		chip_name = "ARUBA";
++		rlc_chip_name = "ARUBA";
++		/* pfp/me same size as CAYMAN */
++		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
++		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
++		rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
++		mc_req_size = 0;
++		break;
+ 	default: BUG();
+ 	}
+ 
+@@ -365,15 +381,18 @@ int ni_init_microcode(struct radeon_device *rdev)
+ 		err = -EINVAL;
+ 	}
+ 
+-	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+-	err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+-	if (err)
+-		goto out;
+-	if (rdev->mc_fw->size != mc_req_size) {
+-		printk(KERN_ERR
+-		       "ni_mc: Bogus length %zu in firmware \"%s\"\n",
+-		       rdev->mc_fw->size, fw_name);
+-		err = -EINVAL;
++	/* no MC ucode on TN */
++	if (!(rdev->flags & RADEON_IS_IGP)) {
++		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++		err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
++		if (err)
++			goto out;
++		if (rdev->mc_fw->size != mc_req_size) {
++			printk(KERN_ERR
++			       "ni_mc: Bogus length %zu in firmware \"%s\"\n",
++			       rdev->mc_fw->size, fw_name);
++			err = -EINVAL;
++		}
+ 	}
+ out:
+ 	platform_device_unregister(pdev);
+@@ -478,6 +497,7 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ 	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
+ 	switch (rdev->family) {
+ 	case CHIP_CAYMAN:
++	case CHIP_ARUBA:
+ 		force_no_swizzle = true;
+ 		break;
+ 	default:
+@@ -610,7 +630,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ 
+ 	switch (rdev->family) {
+ 	case CHIP_CAYMAN:
+-	default:
+ 		rdev->config.cayman.max_shader_engines = 2;
+ 		rdev->config.cayman.max_pipes_per_simd = 4;
+ 		rdev->config.cayman.max_tile_pipes = 8;
+@@ -632,6 +651,56 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+ 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ 		break;
++	case CHIP_ARUBA:
++	default:
++		rdev->config.cayman.max_shader_engines = 1;
++		rdev->config.cayman.max_pipes_per_simd = 4;
++		rdev->config.cayman.max_tile_pipes = 2;
++		if ((rdev->pdev->device == 0x9900) ||
++		    (rdev->pdev->device == 0x9901) ||
++		    (rdev->pdev->device == 0x9905) ||
++		    (rdev->pdev->device == 0x9906) ||
++		    (rdev->pdev->device == 0x9907) ||
++		    (rdev->pdev->device == 0x9908) ||
++		    (rdev->pdev->device == 0x9909) ||
++		    (rdev->pdev->device == 0x9910) ||
++		    (rdev->pdev->device == 0x9917)) {
++			rdev->config.cayman.max_simds_per_se = 6;
++			rdev->config.cayman.max_backends_per_se = 2;
++		} else if ((rdev->pdev->device == 0x9903) ||
++			   (rdev->pdev->device == 0x9904) ||
++			   (rdev->pdev->device == 0x990A) ||
++			   (rdev->pdev->device == 0x9913) ||
++			   (rdev->pdev->device == 0x9918)) {
++			rdev->config.cayman.max_simds_per_se = 4;
++			rdev->config.cayman.max_backends_per_se = 2;
++		} else if ((rdev->pdev->device == 0x9919) ||
++			   (rdev->pdev->device == 0x9990) ||
++			   (rdev->pdev->device == 0x9991) ||
++			   (rdev->pdev->device == 0x9994) ||
++			   (rdev->pdev->device == 0x99A0)) {
++			rdev->config.cayman.max_simds_per_se = 3;
++			rdev->config.cayman.max_backends_per_se = 1;
++		} else {
++			rdev->config.cayman.max_simds_per_se = 2;
++			rdev->config.cayman.max_backends_per_se = 1;
++		}
++		rdev->config.cayman.max_texture_channel_caches = 2;
++		rdev->config.cayman.max_gprs = 256;
++		rdev->config.cayman.max_threads = 256;
++		rdev->config.cayman.max_gs_threads = 32;
++		rdev->config.cayman.max_stack_entries = 512;
++		rdev->config.cayman.sx_num_of_sets = 8;
++		rdev->config.cayman.sx_max_export_size = 256;
++		rdev->config.cayman.sx_max_export_pos_size = 64;
++		rdev->config.cayman.sx_max_export_smx_size = 192;
++		rdev->config.cayman.max_hw_contexts = 8;
++		rdev->config.cayman.sq_num_cf_insts = 2;
++
++		rdev->config.cayman.sc_prim_fifo_size = 0x40;
++		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
++		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
++		break;
+ 	}
+ 
+ 	/* Initialize HDP */
+@@ -652,7 +721,9 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ 
+ 	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
+ 	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+-	cgts_tcc_disable = 0xff000000;
++	cgts_tcc_disable = 0xffff0000;
++	for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
++		cgts_tcc_disable &= ~(1 << (16 + i));
+ 	gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
+ 	gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
+ 	cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
+@@ -804,17 +875,23 @@ static void cayman_gpu_init(struct radeon_device *rdev)
+ 		rdev->config.cayman.tile_config |= (3 << 0);
+ 		break;
+ 	}
+-	switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+-	case 0: /* four banks */
+-		rdev->config.cayman.tile_config |= 0 << 4;
+-		break;
+-	case 1: /* eight banks */
++
++	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
++	if (rdev->flags & RADEON_IS_IGP)
+ 		rdev->config.cayman.tile_config |= 1 << 4;
+-		break;
+-	case 2: /* sixteen banks */
+-	default:
+-		rdev->config.cayman.tile_config |= 2 << 4;
+-		break;
++	else {
++		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
++		case 0: /* four banks */
++			rdev->config.cayman.tile_config |= 0 << 4;
++			break;
++		case 1: /* eight banks */
++			rdev->config.cayman.tile_config |= 1 << 4;
++			break;
++		case 2: /* sixteen banks */
++		default:
++			rdev->config.cayman.tile_config |= 2 << 4;
++			break;
++		}
+ 	}
+ 	rdev->config.cayman.tile_config |=
+ 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
+@@ -944,7 +1021,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
+ 
+ int cayman_pcie_gart_enable(struct radeon_device *rdev)
+ {
+-	int r;
++	int i, r;
+ 
+ 	if (rdev->gart.robj == NULL) {
+ 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+@@ -955,9 +1032,12 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
+ 		return r;
+ 	radeon_gart_restore(rdev);
+ 	/* Setup TLB control */
+-	WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
++	WREG32(MC_VM_MX_L1_TLB_CNTL,
++	       (0xA << 7) |
++	       ENABLE_L1_TLB |
+ 	       ENABLE_L1_FRAGMENT_PROCESSING |
+ 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
++	       ENABLE_ADVANCED_DRIVER_MODEL |
+ 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ 	/* Setup L2 cache */
+ 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+@@ -977,9 +1057,26 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
+ 	WREG32(VM_CONTEXT0_CNTL2, 0);
+ 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+-	/* disable context1-7 */
++
++	WREG32(0x15D4, 0);
++	WREG32(0x15D8, 0);
++	WREG32(0x15DC, 0);
++
++	/* empty context1-7 */
++	for (i = 1; i < 8; i++) {
++		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
++		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
++		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
++			rdev->gart.table_addr >> 12);
++	}
++
++	/* enable context1-7 */
++	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
++	       (u32)(rdev->dummy_page.addr >> 12));
+ 	WREG32(VM_CONTEXT1_CNTL2, 0);
+ 	WREG32(VM_CONTEXT1_CNTL, 0);
++	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
++				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ 
+ 	cayman_pcie_gart_tlb_flush(rdev);
+ 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+@@ -1016,9 +1113,69 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev)
+ 	radeon_gart_fini(rdev);
+ }
+ 
++void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
++			      int ring, u32 cp_int_cntl)
++{
++	u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
++
++	WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
++	WREG32(CP_INT_CNTL, cp_int_cntl);
++}
++
+ /*
+  * CP.
+  */
++void cayman_fence_ring_emit(struct radeon_device *rdev,
++			    struct radeon_fence *fence)
++{
++	struct radeon_ring *ring = &rdev->ring[fence->ring];
++	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
++
++	/* flush read cache over gart for this vmid */
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
++	radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
++	radeon_ring_write(ring, 0xFFFFFFFF);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 10); /* poll interval */
++	/* EVENT_WRITE_EOP - flush caches, send int */
++	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
++	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
++	radeon_ring_write(ring, addr & 0xffffffff);
++	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
++	radeon_ring_write(ring, fence->seq);
++	radeon_ring_write(ring, 0);
++}
++
++void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
++{
++	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
++
++	/* set to DX10/11 mode */
++	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
++	radeon_ring_write(ring, 1);
++	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
++	radeon_ring_write(ring,
++#ifdef __BIG_ENDIAN
++			  (2 << 0) |
++#endif
++			  (ib->gpu_addr & 0xFFFFFFFC));
++	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
++	radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
++
++	/* flush read cache over gart for this vmid */
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
++	radeon_ring_write(ring, ib->vm_id);
++	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
++	radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
++	radeon_ring_write(ring, 0xFFFFFFFF);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 10); /* poll interval */
++}
++
+ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
+ {
+ 	if (enable)
+@@ -1059,63 +1216,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
+ 
+ static int cayman_cp_start(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	int r, i;
+ 
+-	r = radeon_ring_lock(rdev, 7);
++	r = radeon_ring_lock(rdev, ring, 7);
+ 	if (r) {
+ 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ 		return r;
+ 	}
+-	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+-	radeon_ring_write(rdev, 0x1);
+-	radeon_ring_write(rdev, 0x0);
+-	radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
+-	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
++	radeon_ring_write(ring, 0x1);
++	radeon_ring_write(ring, 0x0);
++	radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
++	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_unlock_commit(rdev, ring);
+ 
+ 	cayman_cp_enable(rdev, true);
+ 
+-	r = radeon_ring_lock(rdev, cayman_default_size + 19);
++	r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
+ 	if (r) {
+ 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ 		return r;
+ 	}
+ 
+ 	/* setup clear context state */
+-	radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+-	radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
++	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
++	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ 
+ 	for (i = 0; i < cayman_default_size; i++)
+-		radeon_ring_write(rdev, cayman_default_state[i]);
++		radeon_ring_write(ring, cayman_default_state[i]);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+-	radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
++	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
++	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ 
+ 	/* set clear context state */
+-	radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
++	radeon_ring_write(ring, 0);
+ 
+ 	/* SQ_VTX_BASE_VTX_LOC */
+-	radeon_ring_write(rdev, 0xc0026f00);
+-	radeon_ring_write(rdev, 0x00000000);
+-	radeon_ring_write(rdev, 0x00000000);
+-	radeon_ring_write(rdev, 0x00000000);
++	radeon_ring_write(ring, 0xc0026f00);
++	radeon_ring_write(ring, 0x00000000);
++	radeon_ring_write(ring, 0x00000000);
++	radeon_ring_write(ring, 0x00000000);
+ 
+ 	/* Clear consts */
+-	radeon_ring_write(rdev, 0xc0036f00);
+-	radeon_ring_write(rdev, 0x00000bc4);
+-	radeon_ring_write(rdev, 0xffffffff);
+-	radeon_ring_write(rdev, 0xffffffff);
+-	radeon_ring_write(rdev, 0xffffffff);
++	radeon_ring_write(ring, 0xc0036f00);
++	radeon_ring_write(ring, 0x00000bc4);
++	radeon_ring_write(ring, 0xffffffff);
++	radeon_ring_write(ring, 0xffffffff);
++	radeon_ring_write(ring, 0xffffffff);
+ 
+-	radeon_ring_write(rdev, 0xc0026900);
+-	radeon_ring_write(rdev, 0x00000316);
+-	radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+-	radeon_ring_write(rdev, 0x00000010); /*  */
++	radeon_ring_write(ring, 0xc0026900);
++	radeon_ring_write(ring, 0x00000316);
++	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
++	radeon_ring_write(ring, 0x00000010); /*  */
+ 
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_unlock_commit(rdev, ring);
+ 
+ 	/* XXX init other rings */
+ 
+@@ -1125,11 +1283,12 @@ static int cayman_cp_start(struct radeon_device *rdev)
+ static void cayman_cp_fini(struct radeon_device *rdev)
+ {
+ 	cayman_cp_enable(rdev, false);
+-	radeon_ring_fini(rdev);
++	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ }
+ 
+ int cayman_cp_resume(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring;
+ 	u32 tmp;
+ 	u32 rb_bufsz;
+ 	int r;
+@@ -1146,7 +1305,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
+ 	WREG32(GRBM_SOFT_RESET, 0);
+ 	RREG32(GRBM_SOFT_RESET);
+ 
+-	WREG32(CP_SEM_WAIT_TIMER, 0x4);
++	WREG32(CP_SEM_WAIT_TIMER, 0x0);
++	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
+ 
+ 	/* Set the write pointer delay */
+ 	WREG32(CP_RB_WPTR_DELAY, 0);
+@@ -1155,7 +1315,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
+ 
+ 	/* ring 0 - compute and gfx */
+ 	/* Set ring buffer size */
+-	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
++	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++	rb_bufsz = drm_order(ring->ring_size / 8);
+ 	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ #ifdef __BIG_ENDIAN
+ 	tmp |= BUF_SWAP_32BIT;
+@@ -1164,8 +1325,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
+ 
+ 	/* Initialize the ring buffer's read and write pointers */
+ 	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+-	rdev->cp.wptr = 0;
+-	WREG32(CP_RB0_WPTR, rdev->cp.wptr);
++	ring->wptr = 0;
++	WREG32(CP_RB0_WPTR, ring->wptr);
+ 
+ 	/* set the wb address wether it's enabled or not */
+ 	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+@@ -1182,13 +1343,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
+ 	mdelay(1);
+ 	WREG32(CP_RB0_CNTL, tmp);
+ 
+-	WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
++	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+ 
+-	rdev->cp.rptr = RREG32(CP_RB0_RPTR);
++	ring->rptr = RREG32(CP_RB0_RPTR);
+ 
+ 	/* ring1  - compute only */
+ 	/* Set ring buffer size */
+-	rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
++	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
++	rb_bufsz = drm_order(ring->ring_size / 8);
+ 	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ #ifdef __BIG_ENDIAN
+ 	tmp |= BUF_SWAP_32BIT;
+@@ -1197,8 +1359,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
+ 
+ 	/* Initialize the ring buffer's read and write pointers */
+ 	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+-	rdev->cp1.wptr = 0;
+-	WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
++	ring->wptr = 0;
++	WREG32(CP_RB1_WPTR, ring->wptr);
+ 
+ 	/* set the wb address wether it's enabled or not */
+ 	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
+@@ -1207,13 +1369,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
+ 	mdelay(1);
+ 	WREG32(CP_RB1_CNTL, tmp);
+ 
+-	WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
++	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+ 
+-	rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
++	ring->rptr = RREG32(CP_RB1_RPTR);
+ 
+ 	/* ring2 - compute only */
+ 	/* Set ring buffer size */
+-	rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
++	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
++	rb_bufsz = drm_order(ring->ring_size / 8);
+ 	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ #ifdef __BIG_ENDIAN
+ 	tmp |= BUF_SWAP_32BIT;
+@@ -1222,8 +1385,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
+ 
+ 	/* Initialize the ring buffer's read and write pointers */
+ 	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+-	rdev->cp2.wptr = 0;
+-	WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
++	ring->wptr = 0;
++	WREG32(CP_RB2_WPTR, ring->wptr);
+ 
+ 	/* set the wb address wether it's enabled or not */
+ 	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
+@@ -1232,28 +1395,28 @@ int cayman_cp_resume(struct radeon_device *rdev)
+ 	mdelay(1);
+ 	WREG32(CP_RB2_CNTL, tmp);
+ 
+-	WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
++	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+ 
+-	rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
++	ring->rptr = RREG32(CP_RB2_RPTR);
+ 
+ 	/* start the rings */
+ 	cayman_cp_start(rdev);
+-	rdev->cp.ready = true;
+-	rdev->cp1.ready = true;
+-	rdev->cp2.ready = true;
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
++	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
++	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ 	/* this only test cp0 */
+-	r = radeon_ring_test(rdev);
++	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ 	if (r) {
+-		rdev->cp.ready = false;
+-		rdev->cp1.ready = false;
+-		rdev->cp2.ready = false;
++		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
++		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
++		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ 		return r;
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-bool cayman_gpu_is_lockup(struct radeon_device *rdev)
++bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	u32 srbm_status;
+ 	u32 grbm_status;
+@@ -1266,20 +1429,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev)
+ 	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+ 	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+ 	if (!(grbm_status & GUI_ACTIVE)) {
+-		r100_gpu_lockup_update(lockup, &rdev->cp);
++		r100_gpu_lockup_update(lockup, ring);
+ 		return false;
+ 	}
+ 	/* force CP activities */
+-	r = radeon_ring_lock(rdev, 2);
++	r = radeon_ring_lock(rdev, ring, 2);
+ 	if (!r) {
+ 		/* PACKET2 NOP */
+-		radeon_ring_write(rdev, 0x80000000);
+-		radeon_ring_write(rdev, 0x80000000);
+-		radeon_ring_unlock_commit(rdev);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_unlock_commit(rdev, ring);
+ 	}
+ 	/* XXX deal with CP0,1,2 */
+-	rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+-	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
++	ring->rptr = RREG32(ring->rptr_reg);
++	return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ }
+ 
+ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
+@@ -1299,6 +1462,15 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
+ 		RREG32(GRBM_STATUS_SE1));
+ 	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
+ 		RREG32(SRBM_STATUS));
++	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
++		 RREG32(0x14F8));
++	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
++		 RREG32(0x14D8));
++	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
++		 RREG32(0x14FC));
++	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
++		 RREG32(0x14DC));
++
+ 	evergreen_mc_stop(rdev, &save);
+ 	if (evergreen_mc_wait_for_idle(rdev)) {
+ 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+@@ -1329,6 +1501,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
+ 	(void)RREG32(GRBM_SOFT_RESET);
+ 	/* Wait a little for things to settle down */
+ 	udelay(50);
++
+ 	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
+ 		RREG32(GRBM_STATUS));
+ 	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
+@@ -1348,23 +1521,35 @@ int cayman_asic_reset(struct radeon_device *rdev)
+ 
+ static int cayman_startup(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	int r;
+ 
+ 	/* enable pcie gen2 link */
+ 	evergreen_pcie_gen2_enable(rdev);
+ 
+-	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+-		r = ni_init_microcode(rdev);
++	if (rdev->flags & RADEON_IS_IGP) {
++		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++			r = ni_init_microcode(rdev);
++			if (r) {
++				DRM_ERROR("Failed to load firmware!\n");
++				return r;
++			}
++		}
++	} else {
++		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
++			r = ni_init_microcode(rdev);
++			if (r) {
++				DRM_ERROR("Failed to load firmware!\n");
++				return r;
++			}
++		}
++
++		r = ni_mc_load_microcode(rdev);
+ 		if (r) {
+-			DRM_ERROR("Failed to load firmware!\n");
++			DRM_ERROR("Failed to load MC firmware!\n");
+ 			return r;
+ 		}
+ 	}
+-	r = ni_mc_load_microcode(rdev);
+-	if (r) {
+-		DRM_ERROR("Failed to load MC firmware!\n");
+-		return r;
+-	}
+ 
+ 	r = r600_vram_scratch_init(rdev);
+ 	if (r)
+@@ -1379,15 +1564,42 @@ static int cayman_startup(struct radeon_device *rdev)
+ 	r = evergreen_blit_init(rdev);
+ 	if (r) {
+ 		r600_blit_fini(rdev);
+-		rdev->asic->copy = NULL;
++		rdev->asic->copy.copy = NULL;
+ 		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ 	}
+ 
++	/* allocate rlc buffers */
++	if (rdev->flags & RADEON_IS_IGP) {
++		r = si_rlc_init(rdev);
++		if (r) {
++			DRM_ERROR("Failed to init rlc BOs!\n");
++			return r;
++		}
++	}
++
+ 	/* allocate wb buffer */
+ 	r = radeon_wb_init(rdev);
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
++	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
++	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	r = r600_irq_init(rdev);
+ 	if (r) {
+@@ -1397,7 +1609,9 @@ static int cayman_startup(struct radeon_device *rdev)
+ 	}
+ 	evergreen_irq_set(rdev);
+ 
+-	r = radeon_ring_init(rdev, rdev->cp.ring_size);
++	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
++			     CP_RB0_RPTR, CP_RB0_WPTR,
++			     0, 0xfffff, RADEON_CP_PACKET2);
+ 	if (r)
+ 		return r;
+ 	r = cayman_cp_load_microcode(rdev);
+@@ -1407,6 +1621,21 @@ static int cayman_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
++	if (r) {
++		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
++		return r;
++	}
++
++	r = radeon_vm_manager_start(rdev);
++	if (r)
++		return r;
++
+ 	return 0;
+ }
+ 
+@@ -1421,32 +1650,27 @@ int cayman_resume(struct radeon_device *rdev)
+ 	/* post card */
+ 	atom_asic_init(rdev->mode_info.atom_context);
+ 
++	rdev->accel_working = true;
+ 	r = cayman_startup(rdev);
+ 	if (r) {
+ 		DRM_ERROR("cayman startup failed on resume\n");
++		rdev->accel_working = false;
+ 		return r;
+ 	}
+-
+-	r = r600_ib_test(rdev);
+-	if (r) {
+-		DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+-		return r;
+-	}
+-
+ 	return r;
+-
+ }
+ 
+ int cayman_suspend(struct radeon_device *rdev)
+ {
+ 	/* FIXME: we should wait for ring to be empty */
++	radeon_ib_pool_suspend(rdev);
++	radeon_vm_manager_suspend(rdev);
++	r600_blit_suspend(rdev);
+ 	cayman_cp_enable(rdev, false);
+-	rdev->cp.ready = false;
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ 	evergreen_irq_suspend(rdev);
+ 	radeon_wb_disable(rdev);
+ 	cayman_pcie_gart_disable(rdev);
+-	r600_blit_suspend(rdev);
+-
+ 	return 0;
+ }
+ 
+@@ -1458,6 +1682,7 @@ int cayman_suspend(struct radeon_device *rdev)
+  */
+ int cayman_init(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	int r;
+ 
+ 	/* This don't do much */
+@@ -1510,8 +1735,8 @@ int cayman_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
+-	rdev->cp.ring_obj = NULL;
+-	r600_ring_init(rdev, 1024 * 1024);
++	ring->ring_obj = NULL;
++	r600_ring_init(rdev, ring, 1024 * 1024);
+ 
+ 	rdev->ih.ring_obj = NULL;
+ 	r600_ih_ring_init(rdev, 64 * 1024);
+@@ -1520,35 +1745,40 @@ int cayman_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++	r = radeon_vm_manager_init(rdev);
++	if (r) {
++		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
++	}
++
+ 	r = cayman_startup(rdev);
+ 	if (r) {
+ 		dev_err(rdev->dev, "disabling GPU acceleration\n");
+ 		cayman_cp_fini(rdev);
+ 		r600_irq_fini(rdev);
++		if (rdev->flags & RADEON_IS_IGP)
++			si_rlc_fini(rdev);
+ 		radeon_wb_fini(rdev);
++		r100_ib_fini(rdev);
++		radeon_vm_manager_fini(rdev);
+ 		radeon_irq_kms_fini(rdev);
+ 		cayman_pcie_gart_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+-	if (rdev->accel_working) {
+-		r = radeon_ib_pool_init(rdev);
+-		if (r) {
+-			DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+-			rdev->accel_working = false;
+-		}
+-		r = r600_ib_test(rdev);
+-		if (r) {
+-			DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+-			rdev->accel_working = false;
+-		}
+-	}
+ 
+ 	/* Don't start up if the MC ucode is missing.
+ 	 * The default clocks and voltages before the MC ucode
+ 	 * is loaded are not suffient for advanced operations.
++	 *
++	 * We can skip this check for TN, because there is no MC
++	 * ucode.
+ 	 */
+-	if (!rdev->mc_fw) {
++	if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+ 		DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ 		return -EINVAL;
+ 	}
+@@ -1561,12 +1791,16 @@ void cayman_fini(struct radeon_device *rdev)
+ 	r600_blit_fini(rdev);
+ 	cayman_cp_fini(rdev);
+ 	r600_irq_fini(rdev);
++	if (rdev->flags & RADEON_IS_IGP)
++		si_rlc_fini(rdev);
+ 	radeon_wb_fini(rdev);
+-	radeon_ib_pool_fini(rdev);
++	radeon_vm_manager_fini(rdev);
++	r100_ib_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	cayman_pcie_gart_fini(rdev);
+ 	r600_vram_scratch_fini(rdev);
+ 	radeon_gem_fini(rdev);
++	radeon_semaphore_driver_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+ 	radeon_bo_fini(rdev);
+ 	radeon_atombios_fini(rdev);
+@@ -1574,3 +1808,89 @@ void cayman_fini(struct radeon_device *rdev)
+ 	rdev->bios = NULL;
+ }
+ 
++/*
++ * vm
++ */
++int cayman_vm_init(struct radeon_device *rdev)
++{
++	/* number of VMs */
++	rdev->vm_manager.nvm = 8;
++	/* base offset of vram pages */
++	if (rdev->flags & RADEON_IS_IGP) {
++		u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
++		tmp <<= 22;
++		rdev->vm_manager.vram_base_offset = tmp;
++	} else
++		rdev->vm_manager.vram_base_offset = 0;
++	return 0;
++}
++
++void cayman_vm_fini(struct radeon_device *rdev)
++{
++}
++
++int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
++{
++	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
++	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
++	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
++	/* flush hdp cache */
++	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
++	/* bits 0-7 are the VM contexts0-7 */
++	WREG32(VM_INVALIDATE_REQUEST, 1 << id);
++	return 0;
++}
++
++void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
++{
++	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
++	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
++	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
++	/* flush hdp cache */
++	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
++	/* bits 0-7 are the VM contexts0-7 */
++	WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
++}
++
++void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
++{
++	if (vm->id == -1)
++		return;
++
++	/* flush hdp cache */
++	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
++	/* bits 0-7 are the VM contexts0-7 */
++	WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
++}
++
++#define R600_PTE_VALID     (1 << 0)
++#define R600_PTE_SYSTEM    (1 << 1)
++#define R600_PTE_SNOOPED   (1 << 2)
++#define R600_PTE_READABLE  (1 << 5)
++#define R600_PTE_WRITEABLE (1 << 6)
++
++uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
++			      struct radeon_vm *vm,
++			      uint32_t flags)
++{
++	uint32_t r600_flags = 0;
++
++	r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
++	r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
++	r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
++	if (flags & RADEON_VM_PAGE_SYSTEM) {
++		r600_flags |= R600_PTE_SYSTEM;
++		r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
++	}
++	return r600_flags;
++}
++
++void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
++			unsigned pfn, uint64_t addr, uint32_t flags)
++{
++	void __iomem *ptr = (void *)vm->pt;
++
++	addr = addr & 0xFFFFFFFFFFFFF000ULL;
++	addr |= flags;
++	writeq(addr, ptr + (pfn * 8));
++}
+diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
+index 4672869..2aa7046 100644
+--- a/drivers/gpu/drm/radeon/nid.h
++++ b/drivers/gpu/drm/radeon/nid.h
+@@ -42,6 +42,9 @@
+ #define CAYMAN_MAX_TCC_MASK          0xFF
+ 
+ #define DMIF_ADDR_CONFIG  				0xBD4
++#define	SRBM_GFX_CNTL				        0x0E44
++#define		RINGID(x)					(((x) & 0x3) << 0)
++#define		VMID(x)						(((x) & 0x7) << 0)
+ #define	SRBM_STATUS				        0x0E50
+ 
+ #define VM_CONTEXT0_REQUEST_RESPONSE			0x1470
+@@ -103,6 +106,7 @@
+ #define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
+ #define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
+ #define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
++#define	FUS_MC_VM_FB_OFFSET				0x2068
+ 
+ #define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+ #define	MC_ARB_RAMCFG					0x2760
+@@ -219,6 +223,8 @@
+ #define	SCRATCH_UMSK					0x8540
+ #define	SCRATCH_ADDR					0x8544
+ #define	CP_SEM_WAIT_TIMER				0x85BC
++#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
++#define	CP_COHER_CNTL2					0x85E8
+ #define CP_ME_CNTL					0x86D8
+ #define		CP_ME_HALT					(1 << 28)
+ #define		CP_PFP_HALT					(1 << 26)
+@@ -394,6 +400,12 @@
+ #define	CP_RB0_RPTR_ADDR				0xC10C
+ #define	CP_RB0_RPTR_ADDR_HI				0xC110
+ #define	CP_RB0_WPTR					0xC114
++
++#define CP_INT_CNTL                                     0xC124
++#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
++#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
++#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
++
+ #define	CP_RB1_BASE					0xC180
+ #define	CP_RB1_CNTL					0xC184
+ #define	CP_RB1_RPTR_ADDR				0xC188
+@@ -411,6 +423,10 @@
+ #define	CP_ME_RAM_DATA					0xC160
+ #define	CP_DEBUG					0xC1FC
+ 
++#define VGT_EVENT_INITIATOR                             0x28a90
++#       define CACHE_FLUSH_AND_INV_EVENT_TS                     (0x14 << 0)
++#       define CACHE_FLUSH_AND_INV_EVENT                        (0x16 << 0)
++
+ /*
+  * PM4
+  */
+@@ -445,6 +461,7 @@
+ #define	PACKET3_DISPATCH_DIRECT				0x15
+ #define	PACKET3_DISPATCH_INDIRECT			0x16
+ #define	PACKET3_INDIRECT_BUFFER_END			0x17
++#define	PACKET3_MODE_CONTROL				0x18
+ #define	PACKET3_SET_PREDICATION				0x20
+ #define	PACKET3_REG_RMW					0x21
+ #define	PACKET3_COND_EXEC				0x22
+@@ -494,7 +511,27 @@
+ #define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+ #define	PACKET3_COND_WRITE				0x45
+ #define	PACKET3_EVENT_WRITE				0x46
++#define		EVENT_TYPE(x)                           ((x) << 0)
++#define		EVENT_INDEX(x)                          ((x) << 8)
++                /* 0 - any non-TS event
++		 * 1 - ZPASS_DONE
++		 * 2 - SAMPLE_PIPELINESTAT
++		 * 3 - SAMPLE_STREAMOUTSTAT*
++		 * 4 - *S_PARTIAL_FLUSH
++		 * 5 - TS events
++		 */
+ #define	PACKET3_EVENT_WRITE_EOP				0x47
++#define		DATA_SEL(x)                             ((x) << 29)
++                /* 0 - discard
++		 * 1 - send low 32bit data
++		 * 2 - send 64bit data
++		 * 3 - send 64bit counter value
++		 */
++#define		INT_SEL(x)                              ((x) << 24)
++                /* 0 - none
++		 * 1 - interrupt only (DATA_SEL = 0)
++		 * 2 - interrupt when data write is confirmed
++		 */
+ #define	PACKET3_EVENT_WRITE_EOS				0x48
+ #define	PACKET3_PREAMBLE_CNTL				0x4A
+ #              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index fad7cd1..fe33d35 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -65,6 +65,40 @@ MODULE_FIRMWARE(FIRMWARE_R520);
+ 
+ #include "r100_track.h"
+ 
++void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
++{
++	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
++	int i;
++
++	if (radeon_crtc->crtc_id == 0) {
++		if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
++			for (i = 0; i < rdev->usec_timeout; i++) {
++				if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
++					break;
++				udelay(1);
++			}
++			for (i = 0; i < rdev->usec_timeout; i++) {
++				if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
++					break;
++				udelay(1);
++			}
++		}
++	} else {
++		if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
++			for (i = 0; i < rdev->usec_timeout; i++) {
++				if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
++					break;
++				udelay(1);
++			}
++			for (i = 0; i < rdev->usec_timeout; i++) {
++				if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
++					break;
++				udelay(1);
++			}
++		}
++	}
++}
++
+ /* This files gather functions specifics to:
+  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+  */
+@@ -87,23 +121,27 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+ 		r100_cs_dump_packet(p, pkt);
+ 		return r;
+ 	}
++
+ 	value = radeon_get_ib_value(p, idx);
+ 	tmp = value & 0x003fffff;
+ 	tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+ 
+-	if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+-		tile_flags |= RADEON_DST_TILE_MACRO;
+-	if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+-		if (reg == RADEON_SRC_PITCH_OFFSET) {
+-			DRM_ERROR("Cannot src blit from microtiled surface\n");
+-			r100_cs_dump_packet(p, pkt);
+-			return -EINVAL;
++	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
++		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
++			tile_flags |= RADEON_DST_TILE_MACRO;
++		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
++			if (reg == RADEON_SRC_PITCH_OFFSET) {
++				DRM_ERROR("Cannot src blit from microtiled surface\n");
++				r100_cs_dump_packet(p, pkt);
++				return -EINVAL;
++			}
++			tile_flags |= RADEON_DST_TILE_MICRO;
+ 		}
+-		tile_flags |= RADEON_DST_TILE_MICRO;
+-	}
+ 
+-	tmp |= tile_flags;
+-	p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
++		tmp |= tile_flags;
++		p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
++	} else
++		p->ib->ptr[idx] = (value & 0xffc00000) | tmp;
+ 	return 0;
+ }
+ 
+@@ -412,7 +450,7 @@ void r100_pm_misc(struct radeon_device *rdev)
+ 	/* set pcie lanes */
+ 	if ((rdev->flags & RADEON_IS_PCIE) &&
+ 	    !(rdev->flags & RADEON_IS_IGP) &&
+-	    rdev->asic->set_pcie_lanes &&
++	    rdev->asic->pm.set_pcie_lanes &&
+ 	    (ps->pcie_lanes !=
+ 	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ 		radeon_set_pcie_lanes(rdev,
+@@ -592,8 +630,8 @@ int r100_pci_gart_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+-	rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+-	rdev->asic->gart_set_page = &r100_pci_gart_set_page;
++	rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
++	rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+ 	return radeon_gart_table_ram_alloc(rdev);
+ }
+ 
+@@ -667,7 +705,7 @@ int r100_irq_set(struct radeon_device *rdev)
+ 		WREG32(R_000040_GEN_INT_CNTL, 0);
+ 		return -EINVAL;
+ 	}
+-	if (rdev->irq.sw_int) {
++	if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ 		tmp |= RADEON_SW_INT_ENABLE;
+ 	}
+ 	if (rdev->irq.gui_idle) {
+@@ -739,7 +777,7 @@ int r100_irq_process(struct radeon_device *rdev)
+ 	while (status) {
+ 		/* SW interrupt */
+ 		if (status & RADEON_SW_INT_TEST) {
+-			radeon_fence_process(rdev);
++			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ 		}
+ 		/* gui idle interrupt */
+ 		if (status & RADEON_GUI_IDLE_STAT) {
+@@ -809,25 +847,36 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
+ void r100_fence_ring_emit(struct radeon_device *rdev,
+ 			  struct radeon_fence *fence)
+ {
++	struct radeon_ring *ring = &rdev->ring[fence->ring];
++
+ 	/* We have to make sure that caches are flushed before
+ 	 * CPU might read something from VRAM. */
+-	radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
+-	radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
++	radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
++	radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
+ 	/* Wait until IDLE & CLEAN */
+-	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+-	radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+-	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+-	radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
++	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
++	radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
++	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
++	radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+ 				RADEON_HDP_READ_BUFFER_INVALIDATE);
+-	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+-	radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
++	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
++	radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+ 	/* Emit fence sequence & fire IRQ */
+-	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
+-	radeon_ring_write(rdev, fence->seq);
+-	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
+-	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
++	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
++	radeon_ring_write(ring, fence->seq);
++	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
++	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
++}
++
++void r100_semaphore_ring_emit(struct radeon_device *rdev,
++			      struct radeon_ring *ring,
++			      struct radeon_semaphore *semaphore,
++			      bool emit_wait)
++{
++	/* Unused on older asics, since we don't have semaphores or multiple rings */
++	BUG();
+ }
+ 
+ int r100_copy_blit(struct radeon_device *rdev,
+@@ -836,6 +885,7 @@ int r100_copy_blit(struct radeon_device *rdev,
+ 		   unsigned num_gpu_pages,
+ 		   struct radeon_fence *fence)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	uint32_t cur_pages;
+ 	uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
+ 	uint32_t pitch;
+@@ -853,7 +903,7 @@ int r100_copy_blit(struct radeon_device *rdev,
+ 
+ 	/* Ask for enough room for blit + flush + fence */
+ 	ndw = 64 + (10 * num_loops);
+-	r = radeon_ring_lock(rdev, ndw);
++	r = radeon_ring_lock(rdev, ring, ndw);
+ 	if (r) {
+ 		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
+ 		return -EINVAL;
+@@ -867,8 +917,8 @@ int r100_copy_blit(struct radeon_device *rdev,
+ 
+ 		/* pages are in Y direction - height
+ 		   page width in X direction - width */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
+-		radeon_ring_write(rdev,
++		radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
++		radeon_ring_write(ring,
+ 				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ 				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ 				  RADEON_GMC_SRC_CLIPPING |
+@@ -880,26 +930,26 @@ int r100_copy_blit(struct radeon_device *rdev,
+ 				  RADEON_DP_SRC_SOURCE_MEMORY |
+ 				  RADEON_GMC_CLR_CMP_CNTL_DIS |
+ 				  RADEON_GMC_WR_MSK_DIS);
+-		radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
+-		radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
+-		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
+-		radeon_ring_write(rdev, 0);
+-		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
+-		radeon_ring_write(rdev, num_gpu_pages);
+-		radeon_ring_write(rdev, num_gpu_pages);
+-		radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
+-	}
+-	radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
+-	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+-	radeon_ring_write(rdev,
++		radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
++		radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
++		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
++		radeon_ring_write(ring, 0);
++		radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
++		radeon_ring_write(ring, num_gpu_pages);
++		radeon_ring_write(ring, num_gpu_pages);
++		radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
++	}
++	radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
++	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
++	radeon_ring_write(ring,
+ 			  RADEON_WAIT_2D_IDLECLEAN |
+ 			  RADEON_WAIT_HOST_IDLECLEAN |
+ 			  RADEON_WAIT_DMA_GUI_IDLE);
+ 	if (fence) {
+ 		r = radeon_fence_emit(rdev, fence);
+ 	}
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_unlock_commit(rdev, ring);
+ 	return r;
+ }
+ 
+@@ -918,21 +968,21 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
+ 	return -1;
+ }
+ 
+-void r100_ring_start(struct radeon_device *rdev)
++void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	int r;
+ 
+-	r = radeon_ring_lock(rdev, 2);
++	r = radeon_ring_lock(rdev, ring, 2);
+ 	if (r) {
+ 		return;
+ 	}
+-	radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
++	radeon_ring_write(ring,
+ 			  RADEON_ISYNC_ANY2D_IDLE3D |
+ 			  RADEON_ISYNC_ANY3D_IDLE2D |
+ 			  RADEON_ISYNC_WAIT_IDLEGUI |
+ 			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_unlock_commit(rdev, ring);
+ }
+ 
+ 
+@@ -1033,6 +1083,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
+ 
+ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	unsigned rb_bufsz;
+ 	unsigned rb_blksz;
+ 	unsigned max_fetch;
+@@ -1058,7 +1109,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+ 	rb_bufsz = drm_order(ring_size / 8);
+ 	ring_size = (1 << (rb_bufsz + 1)) * 4;
+ 	r100_cp_load_microcode(rdev);
+-	r = radeon_ring_init(rdev, ring_size);
++	r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
++			     RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
++			     0, 0x7fffff, RADEON_CP_PACKET2);
+ 	if (r) {
+ 		return r;
+ 	}
+@@ -1067,7 +1120,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+ 	rb_blksz = 9;
+ 	/* cp will read 128bytes at a time (4 dwords) */
+ 	max_fetch = 1;
+-	rdev->cp.align_mask = 16 - 1;
++	ring->align_mask = 16 - 1;
+ 	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
+ 	pre_write_timer = 64;
+ 	/* Force CP_RB_WPTR write if written more than one time before the
+@@ -1097,13 +1150,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+ 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
+ 
+ 	/* Set ring address */
+-	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
+-	WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
++	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
++	WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
+ 	/* Force read & write ptr to 0 */
+ 	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
+ 	WREG32(RADEON_CP_RB_RPTR_WR, 0);
+-	rdev->cp.wptr = 0;
+-	WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
++	ring->wptr = 0;
++	WREG32(RADEON_CP_RB_WPTR, ring->wptr);
+ 
+ 	/* set the wb address whether it's enabled or not */
+ 	WREG32(R_00070C_CP_RB_RPTR_ADDR,
+@@ -1119,7 +1172,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+ 
+ 	WREG32(RADEON_CP_RB_CNTL, tmp);
+ 	udelay(10);
+-	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
++	ring->rptr = RREG32(RADEON_CP_RB_RPTR);
+ 	/* Set cp mode to bus mastering & enable cp*/
+ 	WREG32(RADEON_CP_CSQ_MODE,
+ 	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
+@@ -1127,13 +1180,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
+ 	WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
+ 	WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
+ 	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
+-	radeon_ring_start(rdev);
+-	r = radeon_ring_test(rdev);
++	radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
++	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+ 	if (r) {
+ 		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
+ 		return r;
+ 	}
+-	rdev->cp.ready = true;
++	ring->ready = true;
+ 	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+ 	return 0;
+ }
+@@ -1145,7 +1198,7 @@ void r100_cp_fini(struct radeon_device *rdev)
+ 	}
+ 	/* Disable ring */
+ 	r100_cp_disable(rdev);
+-	radeon_ring_fini(rdev);
++	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ 	DRM_INFO("radeon: cp finalized\n");
+ }
+ 
+@@ -1153,7 +1206,7 @@ void r100_cp_disable(struct radeon_device *rdev)
+ {
+ 	/* Disable ring */
+ 	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+-	rdev->cp.ready = false;
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ 	WREG32(RADEON_CP_CSQ_MODE, 0);
+ 	WREG32(RADEON_CP_CSQ_CNTL, 0);
+ 	WREG32(R_000770_SCRATCH_UMSK, 0);
+@@ -1163,13 +1216,6 @@ void r100_cp_disable(struct radeon_device *rdev)
+ 	}
+ }
+ 
+-void r100_cp_commit(struct radeon_device *rdev)
+-{
+-	WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
+-	(void)RREG32(RADEON_CP_RB_WPTR);
+-}
+-
+-
+ /*
+  * CS functions
+  */
+@@ -1543,7 +1589,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
+ 			r100_cs_dump_packet(p, pkt);
+ 			return r;
+ 		}
+-		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
++			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
++				tile_flags |= RADEON_TXO_MACRO_TILE;
++			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
++				tile_flags |= RADEON_TXO_MICRO_TILE_X2;
++
++			tmp = idx_value & ~(0x7 << 2);
++			tmp |= tile_flags;
++			ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
++		} else
++			ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ 		track->textures[i].robj = reloc->robj;
+ 		track->tex_dirty = true;
+ 		break;
+@@ -1614,15 +1670,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
+ 			r100_cs_dump_packet(p, pkt);
+ 			return r;
+ 		}
+-
+-		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+-			tile_flags |= RADEON_COLOR_TILE_ENABLE;
+-		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+-			tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+-
+-		tmp = idx_value & ~(0x7 << 16);
+-		tmp |= tile_flags;
+-		ib[idx] = tmp;
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
++			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
++				tile_flags |= RADEON_COLOR_TILE_ENABLE;
++			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
++				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
++
++			tmp = idx_value & ~(0x7 << 16);
++			tmp |= tile_flags;
++			ib[idx] = tmp;
++		} else
++			ib[idx] = idx_value;
+ 
+ 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+ 		track->cb_dirty = true;
+@@ -2097,9 +2155,9 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
+ 	return -1;
+ }
+ 
+-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
++void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
+ {
+-	lockup->last_cp_rptr = cp->rptr;
++	lockup->last_cp_rptr = ring->rptr;
+ 	lockup->last_jiffies = jiffies;
+ }
+ 
+@@ -2124,20 +2182,20 @@ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp
+  * false positive when CP is just gived nothing to do.
+  *
+  **/
+-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
++bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
+ {
+ 	unsigned long cjiffies, elapsed;
+ 
+ 	cjiffies = jiffies;
+ 	if (!time_after(cjiffies, lockup->last_jiffies)) {
+ 		/* likely a wrap around */
+-		lockup->last_cp_rptr = cp->rptr;
++		lockup->last_cp_rptr = ring->rptr;
+ 		lockup->last_jiffies = jiffies;
+ 		return false;
+ 	}
+-	if (cp->rptr != lockup->last_cp_rptr) {
++	if (ring->rptr != lockup->last_cp_rptr) {
+ 		/* CP is still working no lockup */
+-		lockup->last_cp_rptr = cp->rptr;
++		lockup->last_cp_rptr = ring->rptr;
+ 		lockup->last_jiffies = jiffies;
+ 		return false;
+ 	}
+@@ -2150,32 +2208,31 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
+ 	return false;
+ }
+ 
+-bool r100_gpu_is_lockup(struct radeon_device *rdev)
++bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	u32 rbbm_status;
+ 	int r;
+ 
+ 	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ 	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+-		r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
++		r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
+ 		return false;
+ 	}
+ 	/* force CP activities */
+-	r = radeon_ring_lock(rdev, 2);
++	r = radeon_ring_lock(rdev, ring, 2);
+ 	if (!r) {
+ 		/* PACKET2 NOP */
+-		radeon_ring_write(rdev, 0x80000000);
+-		radeon_ring_write(rdev, 0x80000000);
+-		radeon_ring_unlock_commit(rdev);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_unlock_commit(rdev, ring);
+ 	}
+-	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+-	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
++	ring->rptr = RREG32(ring->rptr_reg);
++	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
+ }
+ 
+ void r100_bm_disable(struct radeon_device *rdev)
+ {
+ 	u32 tmp;
+-	u16 tmp16;
+ 
+ 	/* disable bus mastering */
+ 	tmp = RREG32(R_000030_BUS_CNTL);
+@@ -2186,8 +2243,7 @@ void r100_bm_disable(struct radeon_device *rdev)
+ 	WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
+ 	tmp = RREG32(RADEON_BUS_CNTL);
+ 	mdelay(1);
+-	pci_read_config_word(rdev->pdev, 0x4, &tmp16);
+-	pci_write_config_word(rdev->pdev, 0x4, tmp16 & 0xFFFB);
++	pci_clear_master(rdev->pdev);
+ 	mdelay(1);
+ }
+ 
+@@ -2497,7 +2553,7 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
+ 	 * or the chip could hang on a subsequent access
+ 	 */
+ 	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
+-		udelay(5000);
++		mdelay(5);
+ 	}
+ 
+ 	/* This function is required to workaround a hardware bug in some (all?)
+@@ -2578,21 +2634,22 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+ 	struct drm_device *dev = node->minor->dev;
+ 	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	uint32_t rdp, wdp;
+ 	unsigned count, i, j;
+ 
+-	radeon_ring_free_size(rdev);
++	radeon_ring_free_size(rdev, ring);
+ 	rdp = RREG32(RADEON_CP_RB_RPTR);
+ 	wdp = RREG32(RADEON_CP_RB_WPTR);
+-	count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
++	count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
+ 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
+ 	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
+ 	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
+-	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
++	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+ 	seq_printf(m, "%u dwords in ring\n", count);
+ 	for (j = 0; j <= count; j++) {
+-		i = (rdp + j) & rdev->cp.ptr_mask;
+-		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
++		i = (rdp + j) & ring->ptr_mask;
++		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ 	}
+ 	return 0;
+ }
+@@ -3634,7 +3691,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
+ 	}
+ }
+ 
+-int r100_ring_test(struct radeon_device *rdev)
++int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	uint32_t scratch;
+ 	uint32_t tmp = 0;
+@@ -3647,15 +3704,15 @@ int r100_ring_test(struct radeon_device *rdev)
+ 		return r;
+ 	}
+ 	WREG32(scratch, 0xCAFEDEAD);
+-	r = radeon_ring_lock(rdev, 2);
++	r = radeon_ring_lock(rdev, ring, 2);
+ 	if (r) {
+ 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ 		radeon_scratch_free(rdev, scratch);
+ 		return r;
+ 	}
+-	radeon_ring_write(rdev, PACKET0(scratch, 0));
+-	radeon_ring_write(rdev, 0xDEADBEEF);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_write(ring, PACKET0(scratch, 0));
++	radeon_ring_write(ring, 0xDEADBEEF);
++	radeon_ring_unlock_commit(rdev, ring);
+ 	for (i = 0; i < rdev->usec_timeout; i++) {
+ 		tmp = RREG32(scratch);
+ 		if (tmp == 0xDEADBEEF) {
+@@ -3676,12 +3733,14 @@ int r100_ring_test(struct radeon_device *rdev)
+ 
+ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+ {
+-	radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
+-	radeon_ring_write(rdev, ib->gpu_addr);
+-	radeon_ring_write(rdev, ib->length_dw);
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++
++	radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
++	radeon_ring_write(ring, ib->gpu_addr);
++	radeon_ring_write(ring, ib->length_dw);
+ }
+ 
+-int r100_ib_test(struct radeon_device *rdev)
++int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	struct radeon_ib *ib;
+ 	uint32_t scratch;
+@@ -3695,7 +3754,7 @@ int r100_ib_test(struct radeon_device *rdev)
+ 		return r;
+ 	}
+ 	WREG32(scratch, 0xCAFEDEAD);
+-	r = radeon_ib_get(rdev, &ib);
++	r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
+ 	if (r) {
+ 		return r;
+ 	}
+@@ -3739,34 +3798,16 @@ int r100_ib_test(struct radeon_device *rdev)
+ 
+ void r100_ib_fini(struct radeon_device *rdev)
+ {
++	radeon_ib_pool_suspend(rdev);
+ 	radeon_ib_pool_fini(rdev);
+ }
+ 
+-int r100_ib_init(struct radeon_device *rdev)
+-{
+-	int r;
+-
+-	r = radeon_ib_pool_init(rdev);
+-	if (r) {
+-		dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
+-		r100_ib_fini(rdev);
+-		return r;
+-	}
+-	r = r100_ib_test(rdev);
+-	if (r) {
+-		dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+-		r100_ib_fini(rdev);
+-		return r;
+-	}
+-	return 0;
+-}
+-
+ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
+ {
+ 	/* Shutdown CP we shouldn't need to do that but better be safe than
+ 	 * sorry
+ 	 */
+-	rdev->cp.ready = false;
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ 	WREG32(R_000740_CP_CSQ_CNTL, 0);
+ 
+ 	/* Save few CRTC registers */
+@@ -3904,6 +3945,12 @@ static int r100_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	r100_irq_set(rdev);
+ 	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+@@ -3913,16 +3960,25 @@ static int r100_startup(struct radeon_device *rdev)
+ 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ 		return r;
+ 	}
+-	r = r100_ib_init(rdev);
++
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ 	if (r) {
+-		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
++		dev_err(rdev->dev, "failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
++
+ 	return 0;
+ }
+ 
+ int r100_resume(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	/* Make sur GART are not working */
+ 	if (rdev->flags & RADEON_IS_PCI)
+ 		r100_pci_gart_disable(rdev);
+@@ -3940,11 +3996,18 @@ int r100_resume(struct radeon_device *rdev)
+ 	r100_clock_startup(rdev);
+ 	/* Initialize surface registers */
+ 	radeon_surface_init(rdev);
+-	return r100_startup(rdev);
++
++	rdev->accel_working = true;
++	r = r100_startup(rdev);
++	if (r) {
++		rdev->accel_working = false;
++	}
++	return r;
+ }
+ 
+ int r100_suspend(struct radeon_device *rdev)
+ {
++	radeon_ib_pool_suspend(rdev);
+ 	r100_cp_disable(rdev);
+ 	radeon_wb_disable(rdev);
+ 	r100_irq_disable(rdev);
+@@ -4063,7 +4126,14 @@ int r100_init(struct radeon_device *rdev)
+ 			return r;
+ 	}
+ 	r100_set_safe_registers(rdev);
++
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = r100_startup(rdev);
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
+index a1f3ba0..a59cc47 100644
+--- a/drivers/gpu/drm/radeon/r200.c
++++ b/drivers/gpu/drm/radeon/r200.c
+@@ -87,6 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
+ 		  unsigned num_gpu_pages,
+ 		  struct radeon_fence *fence)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	uint32_t size;
+ 	uint32_t cur_size;
+ 	int i, num_loops;
+@@ -95,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev,
+ 	/* radeon pitch is /64 */
+ 	size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
+ 	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+-	r = radeon_ring_lock(rdev, num_loops * 4 + 64);
++	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
+ 	if (r) {
+ 		DRM_ERROR("radeon: moving bo (%d).\n", r);
+ 		return r;
+ 	}
+ 	/* Must wait for 2D idle & clean before DMA or hangs might happen */
+-	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+-	radeon_ring_write(rdev, (1 << 16));
++	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
++	radeon_ring_write(ring, (1 << 16));
+ 	for (i = 0; i < num_loops; i++) {
+ 		cur_size = size;
+ 		if (cur_size > 0x1FFFFF) {
+ 			cur_size = 0x1FFFFF;
+ 		}
+ 		size -= cur_size;
+-		radeon_ring_write(rdev, PACKET0(0x720, 2));
+-		radeon_ring_write(rdev, src_offset);
+-		radeon_ring_write(rdev, dst_offset);
+-		radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
++		radeon_ring_write(ring, PACKET0(0x720, 2));
++		radeon_ring_write(ring, src_offset);
++		radeon_ring_write(ring, dst_offset);
++		radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
+ 		src_offset += cur_size;
+ 		dst_offset += cur_size;
+ 	}
+-	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+-	radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
++	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
++	radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
+ 	if (fence) {
+ 		r = radeon_fence_emit(rdev, fence);
+ 	}
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_unlock_commit(rdev, ring);
+ 	return r;
+ }
+ 
+@@ -214,7 +215,17 @@ int r200_packet0_check(struct radeon_cs_parser *p,
+ 			r100_cs_dump_packet(p, pkt);
+ 			return r;
+ 		}
+-		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
++			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
++				tile_flags |= R200_TXO_MACRO_TILE;
++			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
++				tile_flags |= R200_TXO_MICRO_TILE;
++
++			tmp = idx_value & ~(0x7 << 2);
++			tmp |= tile_flags;
++			ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
++		} else
++			ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ 		track->textures[i].robj = reloc->robj;
+ 		track->tex_dirty = true;
+ 		break;
+@@ -276,14 +287,17 @@ int r200_packet0_check(struct radeon_cs_parser *p,
+ 			return r;
+ 		}
+ 
+-		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+-			tile_flags |= RADEON_COLOR_TILE_ENABLE;
+-		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+-			tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
++			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
++				tile_flags |= RADEON_COLOR_TILE_ENABLE;
++			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
++				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
+ 
+-		tmp = idx_value & ~(0x7 << 16);
+-		tmp |= tile_flags;
+-		ib[idx] = tmp;
++			tmp = idx_value & ~(0x7 << 16);
++			tmp |= tile_flags;
++			ib[idx] = tmp;
++		} else
++			ib[idx] = idx_value;
+ 
+ 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+ 		track->cb_dirty = true;
+diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
+index c93bc64..fa14383 100644
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -105,8 +105,8 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
+ 	if (r)
+ 		DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
+ 	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
+-	rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+-	rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++	rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
++	rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+ 	return radeon_gart_table_vram_alloc(rdev);
+ }
+ 
+@@ -175,36 +175,38 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
+ void r300_fence_ring_emit(struct radeon_device *rdev,
+ 			  struct radeon_fence *fence)
+ {
++	struct radeon_ring *ring = &rdev->ring[fence->ring];
++
+ 	/* Who ever call radeon_fence_emit should call ring_lock and ask
+ 	 * for enough space (today caller are ib schedule and buffer move) */
+ 	/* Write SC register so SC & US assert idle */
+-	radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
++	radeon_ring_write(ring, 0);
+ 	/* Flush 3D cache */
+-	radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
+-	radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, R300_ZC_FLUSH);
++	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
++	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, R300_ZC_FLUSH);
+ 	/* Wait until IDLE & CLEAN */
+-	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+-	radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
++	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
++	radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
+ 				 RADEON_WAIT_2D_IDLECLEAN |
+ 				 RADEON_WAIT_DMA_GUI_IDLE));
+-	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+-	radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
++	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
++	radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
+ 				RADEON_HDP_READ_BUFFER_INVALIDATE);
+-	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+-	radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
++	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
++	radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
+ 	/* Emit fence sequence & fire IRQ */
+-	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
+-	radeon_ring_write(rdev, fence->seq);
+-	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
+-	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
++	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
++	radeon_ring_write(ring, fence->seq);
++	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
++	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+ }
+ 
+-void r300_ring_start(struct radeon_device *rdev)
++void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	unsigned gb_tile_config;
+ 	int r;
+@@ -227,44 +229,44 @@ void r300_ring_start(struct radeon_device *rdev)
+ 		break;
+ 	}
+ 
+-	r = radeon_ring_lock(rdev, 64);
++	r = radeon_ring_lock(rdev, ring, 64);
+ 	if (r) {
+ 		return;
+ 	}
+-	radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
++	radeon_ring_write(ring,
+ 			  RADEON_ISYNC_ANY2D_IDLE3D |
+ 			  RADEON_ISYNC_ANY3D_IDLE2D |
+ 			  RADEON_ISYNC_WAIT_IDLEGUI |
+ 			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
+-	radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
+-	radeon_ring_write(rdev, gb_tile_config);
+-	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
++	radeon_ring_write(ring, gb_tile_config);
++	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
++	radeon_ring_write(ring,
+ 			  RADEON_WAIT_2D_IDLECLEAN |
+ 			  RADEON_WAIT_3D_IDLECLEAN);
+-	radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+-	radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
+-	radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+-	radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+-	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
++	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
++	radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
++	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
++	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
++	radeon_ring_write(ring,
+ 			  RADEON_WAIT_2D_IDLECLEAN |
+ 			  RADEON_WAIT_3D_IDLECLEAN);
+-	radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+-	radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
+-	radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
++	radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
++	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
++	radeon_ring_write(ring,
+ 			  ((6 << R300_MS_X0_SHIFT) |
+ 			   (6 << R300_MS_Y0_SHIFT) |
+ 			   (6 << R300_MS_X1_SHIFT) |
+@@ -273,8 +275,8 @@ void r300_ring_start(struct radeon_device *rdev)
+ 			   (6 << R300_MS_Y2_SHIFT) |
+ 			   (6 << R300_MSBD0_Y_SHIFT) |
+ 			   (6 << R300_MSBD0_X_SHIFT)));
+-	radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
++	radeon_ring_write(ring,
+ 			  ((6 << R300_MS_X3_SHIFT) |
+ 			   (6 << R300_MS_Y3_SHIFT) |
+ 			   (6 << R300_MS_X4_SHIFT) |
+@@ -282,16 +284,16 @@ void r300_ring_start(struct radeon_device *rdev)
+ 			   (6 << R300_MS_X5_SHIFT) |
+ 			   (6 << R300_MS_Y5_SHIFT) |
+ 			   (6 << R300_MSBD1_SHIFT)));
+-	radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
+-	radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+-	radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
++	radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
++	radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
++	radeon_ring_write(ring,
+ 			  R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
+-	radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
++	radeon_ring_write(ring,
+ 			  R300_GEOMETRY_ROUND_NEAREST |
+ 			  R300_COLOR_ROUND_NEAREST);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_unlock_commit(rdev, ring);
+ }
+ 
+ void r300_errata(struct radeon_device *rdev)
+@@ -375,26 +377,26 @@ void r300_gpu_init(struct radeon_device *rdev)
+ 		 rdev->num_gb_pipes, rdev->num_z_pipes);
+ }
+ 
+-bool r300_gpu_is_lockup(struct radeon_device *rdev)
++bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	u32 rbbm_status;
+ 	int r;
+ 
+ 	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
+ 	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
+-		r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
++		r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
+ 		return false;
+ 	}
+ 	/* force CP activities */
+-	r = radeon_ring_lock(rdev, 2);
++	r = radeon_ring_lock(rdev, ring, 2);
+ 	if (!r) {
+ 		/* PACKET2 NOP */
+-		radeon_ring_write(rdev, 0x80000000);
+-		radeon_ring_write(rdev, 0x80000000);
+-		radeon_ring_unlock_commit(rdev);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_unlock_commit(rdev, ring);
+ 	}
+-	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+-	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
++	ring->rptr = RREG32(RADEON_CP_RB_RPTR);
++	return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
+ }
+ 
+ int r300_asic_reset(struct radeon_device *rdev)
+@@ -701,7 +703,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
+ 			return r;
+ 		}
+ 
+-		if (p->keep_tiling_flags) {
++		if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
+ 			ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
+ 				  ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
+ 		} else {
+@@ -765,7 +767,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
+ 		/* RB3D_COLORPITCH1 */
+ 		/* RB3D_COLORPITCH2 */
+ 		/* RB3D_COLORPITCH3 */
+-		if (!p->keep_tiling_flags) {
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ 			r = r100_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+ 				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+@@ -850,7 +852,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
+ 		break;
+ 	case 0x4F24:
+ 		/* ZB_DEPTHPITCH */
+-		if (!p->keep_tiling_flags) {
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ 			r = r100_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+ 				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+@@ -1396,6 +1398,12 @@ static int r300_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	r100_irq_set(rdev);
+ 	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+@@ -1405,16 +1413,25 @@ static int r300_startup(struct radeon_device *rdev)
+ 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ 		return r;
+ 	}
+-	r = r100_ib_init(rdev);
++
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ 	if (r) {
+-		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
++		dev_err(rdev->dev, "failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
++
+ 	return 0;
+ }
+ 
+ int r300_resume(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	/* Make sur GART are not working */
+ 	if (rdev->flags & RADEON_IS_PCIE)
+ 		rv370_pcie_gart_disable(rdev);
+@@ -1434,11 +1451,18 @@ int r300_resume(struct radeon_device *rdev)
+ 	r300_clock_startup(rdev);
+ 	/* Initialize surface registers */
+ 	radeon_surface_init(rdev);
+-	return r300_startup(rdev);
++
++	rdev->accel_working = true;
++	r = r300_startup(rdev);
++	if (r) {
++		rdev->accel_working = false;
++	}
++	return r;
+ }
+ 
+ int r300_suspend(struct radeon_device *rdev)
+ {
++	radeon_ib_pool_suspend(rdev);
+ 	r100_cp_disable(rdev);
+ 	radeon_wb_disable(rdev);
+ 	r100_irq_disable(rdev);
+@@ -1539,7 +1563,14 @@ int r300_init(struct radeon_device *rdev)
+ 			return r;
+ 	}
+ 	r300_set_reg_safe(rdev);
++
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = r300_startup(rdev);
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
+index 417fab8..f3fcaac 100644
+--- a/drivers/gpu/drm/radeon/r420.c
++++ b/drivers/gpu/drm/radeon/r420.c
+@@ -199,6 +199,8 @@ static void r420_clock_resume(struct radeon_device *rdev)
+ 
+ static void r420_cp_errata_init(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++
+ 	/* RV410 and R420 can lock up if CP DMA to host memory happens
+ 	 * while the 2D engine is busy.
+ 	 *
+@@ -206,22 +208,24 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
+ 	 * of the CP init, apparently.
+ 	 */
+ 	radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
+-	radeon_ring_lock(rdev, 8);
+-	radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
+-	radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
+-	radeon_ring_write(rdev, 0xDEADBEEF);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_lock(rdev, ring, 8);
++	radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
++	radeon_ring_write(ring, rdev->config.r300.resync_scratch);
++	radeon_ring_write(ring, 0xDEADBEEF);
++	radeon_ring_unlock_commit(rdev, ring);
+ }
+ 
+ static void r420_cp_errata_fini(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++
+ 	/* Catch the RESYNC we dispatched all the way back,
+ 	 * at the very beginning of the CP init.
+ 	 */
+-	radeon_ring_lock(rdev, 8);
+-	radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_lock(rdev, ring, 8);
++	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, R300_RB3D_DC_FINISH);
++	radeon_ring_unlock_commit(rdev, ring);
+ 	radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
+ }
+ 
+@@ -254,6 +258,12 @@ static int r420_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	r100_irq_set(rdev);
+ 	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+@@ -264,16 +274,25 @@ static int r420_startup(struct radeon_device *rdev)
+ 		return r;
+ 	}
+ 	r420_cp_errata_init(rdev);
+-	r = r100_ib_init(rdev);
++
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ 	if (r) {
+-		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
++		dev_err(rdev->dev, "failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
++
+ 	return 0;
+ }
+ 
+ int r420_resume(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	/* Make sur GART are not working */
+ 	if (rdev->flags & RADEON_IS_PCIE)
+ 		rv370_pcie_gart_disable(rdev);
+@@ -297,11 +316,18 @@ int r420_resume(struct radeon_device *rdev)
+ 	r420_clock_resume(rdev);
+ 	/* Initialize surface registers */
+ 	radeon_surface_init(rdev);
+-	return r420_startup(rdev);
++
++	rdev->accel_working = true;
++	r = r420_startup(rdev);
++	if (r) {
++		rdev->accel_working = false;
++	}
++	return r;
+ }
+ 
+ int r420_suspend(struct radeon_device *rdev)
+ {
++	radeon_ib_pool_suspend(rdev);
+ 	r420_cp_errata_fini(rdev);
+ 	r100_cp_disable(rdev);
+ 	radeon_wb_disable(rdev);
+@@ -414,7 +440,14 @@ int r420_init(struct radeon_device *rdev)
+ 			return r;
+ 	}
+ 	r420_set_reg_safe(rdev);
++
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = r420_startup(rdev);
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
+index fc43705..ec576aa 100644
+--- a/drivers/gpu/drm/radeon/r500_reg.h
++++ b/drivers/gpu/drm/radeon/r500_reg.h
+@@ -351,6 +351,8 @@
+ #define AVIVO_D1CRTC_BLANK_CONTROL                              0x6084
+ #define AVIVO_D1CRTC_INTERLACE_CONTROL                          0x6088
+ #define AVIVO_D1CRTC_INTERLACE_STATUS                           0x608c
++#define AVIVO_D1CRTC_STATUS                                     0x609c
++#       define AVIVO_D1CRTC_V_BLANK                             (1 << 0)
+ #define AVIVO_D1CRTC_STATUS_POSITION                            0x60a0
+ #define AVIVO_D1CRTC_FRAME_COUNT                                0x60a4
+ #define AVIVO_D1CRTC_STEREO_CONTROL                             0x60c4
+@@ -573,6 +575,7 @@
+ 
+ #define AVIVO_TMDSA_CNTL                    0x7880
+ #   define AVIVO_TMDSA_CNTL_ENABLE               (1 << 0)
++#   define AVIVO_TMDSA_CNTL_HDMI_EN              (1 << 2)
+ #   define AVIVO_TMDSA_CNTL_HPD_MASK             (1 << 4)
+ #   define AVIVO_TMDSA_CNTL_HPD_SELECT           (1 << 8)
+ #   define AVIVO_TMDSA_CNTL_SYNC_PHASE           (1 << 12)
+@@ -633,6 +636,7 @@
+ 
+ #define AVIVO_LVTMA_CNTL					0x7a80
+ #   define AVIVO_LVTMA_CNTL_ENABLE               (1 << 0)
++#   define AVIVO_LVTMA_CNTL_HDMI_EN              (1 << 2)
+ #   define AVIVO_LVTMA_CNTL_HPD_MASK             (1 << 4)
+ #   define AVIVO_LVTMA_CNTL_HPD_SELECT           (1 << 8)
+ #   define AVIVO_LVTMA_CNTL_SYNC_PHASE           (1 << 12)
+diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
+index 3081d07..ebcc15b 100644
+--- a/drivers/gpu/drm/radeon/r520.c
++++ b/drivers/gpu/drm/radeon/r520.c
+@@ -33,7 +33,7 @@
+ 
+ /* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
+ 
+-static int r520_mc_wait_for_idle(struct radeon_device *rdev)
++int r520_mc_wait_for_idle(struct radeon_device *rdev)
+ {
+ 	unsigned i;
+ 	uint32_t tmp;
+@@ -187,6 +187,12 @@ static int r520_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	rs600_irq_set(rdev);
+ 	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+@@ -196,9 +202,15 @@ static int r520_startup(struct radeon_device *rdev)
+ 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ 		return r;
+ 	}
+-	r = r100_ib_init(rdev);
++
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ 	if (r) {
+-		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
++		dev_err(rdev->dev, "failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
+ 	return 0;
+@@ -206,6 +218,8 @@ static int r520_startup(struct radeon_device *rdev)
+ 
+ int r520_resume(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	/* Make sur GART are not working */
+ 	if (rdev->flags & RADEON_IS_PCIE)
+ 		rv370_pcie_gart_disable(rdev);
+@@ -223,7 +237,13 @@ int r520_resume(struct radeon_device *rdev)
+ 	rv515_clock_startup(rdev);
+ 	/* Initialize surface registers */
+ 	radeon_surface_init(rdev);
+-	return r520_startup(rdev);
++
++	rdev->accel_working = true;
++	r = r520_startup(rdev);
++	if (r) {
++		rdev->accel_working = false;
++	}
++	return r;
+ }
+ 
+ int r520_init(struct radeon_device *rdev)
+@@ -292,7 +312,14 @@ int r520_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	rv515_set_safe_registers(rdev);
++
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = r520_startup(rdev);
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index bdfa82a..b1ff9cc 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -49,6 +49,7 @@
+ #define EVERGREEN_PM4_UCODE_SIZE 1376
+ #define EVERGREEN_RLC_UCODE_SIZE 768
+ #define CAYMAN_RLC_UCODE_SIZE 1024
++#define ARUBA_RLC_UCODE_SIZE 1536
+ 
+ /* Firmware Names */
+ MODULE_FIRMWARE("radeon/R600_pfp.bin");
+@@ -1134,7 +1135,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
+ 	}
+ 	if (rdev->flags & RADEON_IS_AGP) {
+ 		size_bf = mc->gtt_start;
+-		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
++		size_af = 0xFFFFFFFF - mc->gtt_end;
+ 		if (size_bf > size_af) {
+ 			if (mc->mc_vram_size > size_bf) {
+ 				dev_warn(rdev->dev, "limiting VRAM\n");
+@@ -1148,7 +1149,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
+ 				mc->real_vram_size = size_af;
+ 				mc->mc_vram_size = size_af;
+ 			}
+-			mc->vram_start = mc->gtt_end;
++			mc->vram_start = mc->gtt_end + 1;
+ 		}
+ 		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ 		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+@@ -1344,7 +1345,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
+ 	return 0;
+ }
+ 
+-bool r600_gpu_is_lockup(struct radeon_device *rdev)
++bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	u32 srbm_status;
+ 	u32 grbm_status;
+@@ -1361,19 +1362,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
+ 	grbm_status = RREG32(R_008010_GRBM_STATUS);
+ 	grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
+ 	if (!G_008010_GUI_ACTIVE(grbm_status)) {
+-		r100_gpu_lockup_update(lockup, &rdev->cp);
++		r100_gpu_lockup_update(lockup, ring);
+ 		return false;
+ 	}
+ 	/* force CP activities */
+-	r = radeon_ring_lock(rdev, 2);
++	r = radeon_ring_lock(rdev, ring, 2);
+ 	if (!r) {
+ 		/* PACKET2 NOP */
+-		radeon_ring_write(rdev, 0x80000000);
+-		radeon_ring_write(rdev, 0x80000000);
+-		radeon_ring_unlock_commit(rdev);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_unlock_commit(rdev, ring);
+ 	}
+-	rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+-	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
++	ring->rptr = RREG32(ring->rptr_reg);
++	return r100_gpu_cp_is_lockup(rdev, lockup, ring);
+ }
+ 
+ int r600_asic_reset(struct radeon_device *rdev)
+@@ -2145,27 +2146,28 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
+ 
+ int r600_cp_start(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	int r;
+ 	uint32_t cp_me;
+ 
+-	r = radeon_ring_lock(rdev, 7);
++	r = radeon_ring_lock(rdev, ring, 7);
+ 	if (r) {
+ 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ 		return r;
+ 	}
+-	radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+-	radeon_ring_write(rdev, 0x1);
++	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
++	radeon_ring_write(ring, 0x1);
+ 	if (rdev->family >= CHIP_RV770) {
+-		radeon_ring_write(rdev, 0x0);
+-		radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
++		radeon_ring_write(ring, 0x0);
++		radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
+ 	} else {
+-		radeon_ring_write(rdev, 0x3);
+-		radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
++		radeon_ring_write(ring, 0x3);
++		radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
+ 	}
+-	radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_unlock_commit(rdev, ring);
+ 
+ 	cp_me = 0xff;
+ 	WREG32(R_0086D8_CP_ME_CNTL, cp_me);
+@@ -2174,6 +2176,7 @@ int r600_cp_start(struct radeon_device *rdev)
+ 
+ int r600_cp_resume(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 tmp;
+ 	u32 rb_bufsz;
+ 	int r;
+@@ -2185,13 +2188,13 @@ int r600_cp_resume(struct radeon_device *rdev)
+ 	WREG32(GRBM_SOFT_RESET, 0);
+ 
+ 	/* Set ring buffer size */
+-	rb_bufsz = drm_order(rdev->cp.ring_size / 8);
++	rb_bufsz = drm_order(ring->ring_size / 8);
+ 	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ #ifdef __BIG_ENDIAN
+ 	tmp |= BUF_SWAP_32BIT;
+ #endif
+ 	WREG32(CP_RB_CNTL, tmp);
+-	WREG32(CP_SEM_WAIT_TIMER, 0x4);
++	WREG32(CP_SEM_WAIT_TIMER, 0x0);
+ 
+ 	/* Set the write pointer delay */
+ 	WREG32(CP_RB_WPTR_DELAY, 0);
+@@ -2199,8 +2202,8 @@ int r600_cp_resume(struct radeon_device *rdev)
+ 	/* Initialize the ring buffer's read and write pointers */
+ 	WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
+ 	WREG32(CP_RB_RPTR_WR, 0);
+-	rdev->cp.wptr = 0;
+-	WREG32(CP_RB_WPTR, rdev->cp.wptr);
++	ring->wptr = 0;
++	WREG32(CP_RB_WPTR, ring->wptr);
+ 
+ 	/* set the wb address whether it's enabled or not */
+ 	WREG32(CP_RB_RPTR_ADDR,
+@@ -2218,42 +2221,36 @@ int r600_cp_resume(struct radeon_device *rdev)
+ 	mdelay(1);
+ 	WREG32(CP_RB_CNTL, tmp);
+ 
+-	WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
++	WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
+ 	WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+ 
+-	rdev->cp.rptr = RREG32(CP_RB_RPTR);
++	ring->rptr = RREG32(CP_RB_RPTR);
+ 
+ 	r600_cp_start(rdev);
+-	rdev->cp.ready = true;
+-	r = radeon_ring_test(rdev);
++	ring->ready = true;
++	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
+ 	if (r) {
+-		rdev->cp.ready = false;
++		ring->ready = false;
+ 		return r;
+ 	}
+ 	return 0;
+ }
+ 
+-void r600_cp_commit(struct radeon_device *rdev)
+-{
+-	WREG32(CP_RB_WPTR, rdev->cp.wptr);
+-	(void)RREG32(CP_RB_WPTR);
+-}
+-
+-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
++void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
+ {
+ 	u32 rb_bufsz;
+ 
+ 	/* Align ring size */
+ 	rb_bufsz = drm_order(ring_size / 8);
+ 	ring_size = (1 << (rb_bufsz + 1)) * 4;
+-	rdev->cp.ring_size = ring_size;
+-	rdev->cp.align_mask = 16 - 1;
++	ring->ring_size = ring_size;
++	ring->align_mask = 16 - 1;
+ }
+ 
+ void r600_cp_fini(struct radeon_device *rdev)
+ {
+ 	r600_cp_stop(rdev);
+-	radeon_ring_fini(rdev);
++	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ }
+ 
+ 
+@@ -2272,11 +2269,11 @@ void r600_scratch_init(struct radeon_device *rdev)
+ 	}
+ }
+ 
+-int r600_ring_test(struct radeon_device *rdev)
++int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	uint32_t scratch;
+ 	uint32_t tmp = 0;
+-	unsigned i;
++	unsigned i, ridx = radeon_ring_index(rdev, ring);
+ 	int r;
+ 
+ 	r = radeon_scratch_get(rdev, &scratch);
+@@ -2285,16 +2282,16 @@ int r600_ring_test(struct radeon_device *rdev)
+ 		return r;
+ 	}
+ 	WREG32(scratch, 0xCAFEDEAD);
+-	r = radeon_ring_lock(rdev, 3);
++	r = radeon_ring_lock(rdev, ring, 3);
+ 	if (r) {
+-		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
++		DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
+ 		radeon_scratch_free(rdev, scratch);
+ 		return r;
+ 	}
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+-	radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+-	radeon_ring_write(rdev, 0xDEADBEEF);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
++	radeon_ring_write(ring, 0xDEADBEEF);
++	radeon_ring_unlock_commit(rdev, ring);
+ 	for (i = 0; i < rdev->usec_timeout; i++) {
+ 		tmp = RREG32(scratch);
+ 		if (tmp == 0xDEADBEEF)
+@@ -2302,10 +2299,10 @@ int r600_ring_test(struct radeon_device *rdev)
+ 		DRM_UDELAY(1);
+ 	}
+ 	if (i < rdev->usec_timeout) {
+-		DRM_INFO("ring test succeeded in %d usecs\n", i);
++		DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
+ 	} else {
+-		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
+-			  scratch, tmp);
++		DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
++			  ridx, scratch, tmp);
+ 		r = -EINVAL;
+ 	}
+ 	radeon_scratch_free(rdev, scratch);
+@@ -2315,49 +2312,66 @@ int r600_ring_test(struct radeon_device *rdev)
+ void r600_fence_ring_emit(struct radeon_device *rdev,
+ 			  struct radeon_fence *fence)
+ {
++	struct radeon_ring *ring = &rdev->ring[fence->ring];
++
+ 	if (rdev->wb.use_event) {
+-		u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
+-			(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
++		u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ 		/* flush read cache over gart */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+-		radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
++		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
++		radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
+ 					PACKET3_VC_ACTION_ENA |
+ 					PACKET3_SH_ACTION_ENA);
+-		radeon_ring_write(rdev, 0xFFFFFFFF);
+-		radeon_ring_write(rdev, 0);
+-		radeon_ring_write(rdev, 10); /* poll interval */
++		radeon_ring_write(ring, 0xFFFFFFFF);
++		radeon_ring_write(ring, 0);
++		radeon_ring_write(ring, 10); /* poll interval */
+ 		/* EVENT_WRITE_EOP - flush caches, send int */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+-		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+-		radeon_ring_write(rdev, addr & 0xffffffff);
+-		radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+-		radeon_ring_write(rdev, fence->seq);
+-		radeon_ring_write(rdev, 0);
++		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
++		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
++		radeon_ring_write(ring, addr & 0xffffffff);
++		radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
++		radeon_ring_write(ring, fence->seq);
++		radeon_ring_write(ring, 0);
+ 	} else {
+ 		/* flush read cache over gart */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+-		radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
++		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
++		radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
+ 					PACKET3_VC_ACTION_ENA |
+ 					PACKET3_SH_ACTION_ENA);
+-		radeon_ring_write(rdev, 0xFFFFFFFF);
+-		radeon_ring_write(rdev, 0);
+-		radeon_ring_write(rdev, 10); /* poll interval */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
+-		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
++		radeon_ring_write(ring, 0xFFFFFFFF);
++		radeon_ring_write(ring, 0);
++		radeon_ring_write(ring, 10); /* poll interval */
++		radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
++		radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+ 		/* wait for 3D idle clean */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+-		radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+-		radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
++		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++		radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
++		radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+ 		/* Emit fence sequence & fire IRQ */
+-		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+-		radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+-		radeon_ring_write(rdev, fence->seq);
++		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++		radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
++		radeon_ring_write(ring, fence->seq);
+ 		/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
+-		radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
+-		radeon_ring_write(rdev, RB_INT_STAT);
++		radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
++		radeon_ring_write(ring, RB_INT_STAT);
+ 	}
+ }
+ 
++void r600_semaphore_ring_emit(struct radeon_device *rdev,
++			      struct radeon_ring *ring,
++			      struct radeon_semaphore *semaphore,
++			      bool emit_wait)
++{
++	uint64_t addr = semaphore->gpu_addr;
++	unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
++
++	if (rdev->family < CHIP_CAYMAN)
++		sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
++
++	radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
++	radeon_ring_write(ring, addr & 0xffffffff);
++	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
++}
++
+ int r600_copy_blit(struct radeon_device *rdev,
+ 		   uint64_t src_offset,
+ 		   uint64_t dst_offset,
+@@ -2410,6 +2424,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
+ 
+ int r600_startup(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	int r;
+ 
+ 	/* enable pcie gen2 link */
+@@ -2439,7 +2454,7 @@ int r600_startup(struct radeon_device *rdev)
+ 	r = r600_blit_init(rdev);
+ 	if (r) {
+ 		r600_blit_fini(rdev);
+-		rdev->asic->copy = NULL;
++		rdev->asic->copy.copy = NULL;
+ 		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ 	}
+ 
+@@ -2448,6 +2463,12 @@ int r600_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	r = r600_irq_init(rdev);
+ 	if (r) {
+@@ -2457,7 +2478,10 @@ int r600_startup(struct radeon_device *rdev)
+ 	}
+ 	r600_irq_set(rdev);
+ 
+-	r = radeon_ring_init(rdev, rdev->cp.ring_size);
++	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
++			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
++			     0, 0xfffff, RADEON_CP_PACKET2);
++
+ 	if (r)
+ 		return r;
+ 	r = r600_cp_load_microcode(rdev);
+@@ -2467,6 +2491,17 @@ int r600_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
++	if (r) {
++		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
++		return r;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -2495,15 +2530,11 @@ int r600_resume(struct radeon_device *rdev)
+ 	/* post card */
+ 	atom_asic_init(rdev->mode_info.atom_context);
+ 
++	rdev->accel_working = true;
+ 	r = r600_startup(rdev);
+ 	if (r) {
+ 		DRM_ERROR("r600 startup failed on resume\n");
+-		return r;
+-	}
+-
+-	r = r600_ib_test(rdev);
+-	if (r) {
+-		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
+ 
+@@ -2519,13 +2550,14 @@ int r600_resume(struct radeon_device *rdev)
+ int r600_suspend(struct radeon_device *rdev)
+ {
+ 	r600_audio_fini(rdev);
++	radeon_ib_pool_suspend(rdev);
++	r600_blit_suspend(rdev);
+ 	/* FIXME: we should wait for ring to be empty */
+ 	r600_cp_stop(rdev);
+-	rdev->cp.ready = false;
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ 	r600_irq_suspend(rdev);
+ 	radeon_wb_disable(rdev);
+ 	r600_pcie_gart_disable(rdev);
+-	r600_blit_suspend(rdev);
+ 
+ 	return 0;
+ }
+@@ -2596,8 +2628,8 @@ int r600_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
+-	rdev->cp.ring_obj = NULL;
+-	r600_ring_init(rdev, 1024 * 1024);
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
++	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ 
+ 	rdev->ih.ring_obj = NULL;
+ 	r600_ih_ring_init(rdev, 64 * 1024);
+@@ -2606,30 +2638,24 @@ int r600_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = r600_startup(rdev);
+ 	if (r) {
+ 		dev_err(rdev->dev, "disabling GPU acceleration\n");
+ 		r600_cp_fini(rdev);
+ 		r600_irq_fini(rdev);
+ 		radeon_wb_fini(rdev);
++		r100_ib_fini(rdev);
+ 		radeon_irq_kms_fini(rdev);
+ 		r600_pcie_gart_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+-	if (rdev->accel_working) {
+-		r = radeon_ib_pool_init(rdev);
+-		if (r) {
+-			dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+-			rdev->accel_working = false;
+-		} else {
+-			r = r600_ib_test(rdev);
+-			if (r) {
+-				dev_err(rdev->dev, "IB test failed (%d).\n", r);
+-				rdev->accel_working = false;
+-			}
+-		}
+-	}
+ 
+ 	r = r600_audio_init(rdev);
+ 	if (r)
+@@ -2644,12 +2670,13 @@ void r600_fini(struct radeon_device *rdev)
+ 	r600_cp_fini(rdev);
+ 	r600_irq_fini(rdev);
+ 	radeon_wb_fini(rdev);
+-	radeon_ib_pool_fini(rdev);
++	r100_ib_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	r600_pcie_gart_fini(rdev);
+ 	r600_vram_scratch_fini(rdev);
+ 	radeon_agp_fini(rdev);
+ 	radeon_gem_fini(rdev);
++	radeon_semaphore_driver_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+ 	radeon_bo_fini(rdev);
+ 	radeon_atombios_fini(rdev);
+@@ -2663,24 +2690,27 @@ void r600_fini(struct radeon_device *rdev)
+  */
+ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+ {
++	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
++
+ 	/* FIXME: implement */
+-	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
++	radeon_ring_write(ring,
+ #ifdef __BIG_ENDIAN
+ 			  (2 << 0) |
+ #endif
+ 			  (ib->gpu_addr & 0xFFFFFFFC));
+-	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
+-	radeon_ring_write(rdev, ib->length_dw);
++	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
++	radeon_ring_write(ring, ib->length_dw);
+ }
+ 
+-int r600_ib_test(struct radeon_device *rdev)
++int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	struct radeon_ib *ib;
+ 	uint32_t scratch;
+ 	uint32_t tmp = 0;
+ 	unsigned i;
+ 	int r;
++	int ring_index = radeon_ring_index(rdev, ring);
+ 
+ 	r = radeon_scratch_get(rdev, &scratch);
+ 	if (r) {
+@@ -2688,7 +2718,7 @@ int r600_ib_test(struct radeon_device *rdev)
+ 		return r;
+ 	}
+ 	WREG32(scratch, 0xCAFEDEAD);
+-	r = radeon_ib_get(rdev, &ib);
++	r = radeon_ib_get(rdev, ring_index, &ib, 256);
+ 	if (r) {
+ 		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ 		return r;
+@@ -2696,20 +2726,7 @@ int r600_ib_test(struct radeon_device *rdev)
+ 	ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
+ 	ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ 	ib->ptr[2] = 0xDEADBEEF;
+-	ib->ptr[3] = PACKET2(0);
+-	ib->ptr[4] = PACKET2(0);
+-	ib->ptr[5] = PACKET2(0);
+-	ib->ptr[6] = PACKET2(0);
+-	ib->ptr[7] = PACKET2(0);
+-	ib->ptr[8] = PACKET2(0);
+-	ib->ptr[9] = PACKET2(0);
+-	ib->ptr[10] = PACKET2(0);
+-	ib->ptr[11] = PACKET2(0);
+-	ib->ptr[12] = PACKET2(0);
+-	ib->ptr[13] = PACKET2(0);
+-	ib->ptr[14] = PACKET2(0);
+-	ib->ptr[15] = PACKET2(0);
+-	ib->length_dw = 16;
++	ib->length_dw = 3;
+ 	r = radeon_ib_schedule(rdev, ib);
+ 	if (r) {
+ 		radeon_scratch_free(rdev, scratch);
+@@ -2729,7 +2746,7 @@ int r600_ib_test(struct radeon_device *rdev)
+ 		DRM_UDELAY(1);
+ 	}
+ 	if (i < rdev->usec_timeout) {
+-		DRM_INFO("ib test succeeded in %u usecs\n", i);
++		DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
+ 	} else {
+ 		DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
+ 			  scratch, tmp);
+@@ -2763,7 +2780,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
+ 	rdev->ih.rptr = 0;
+ }
+ 
+-static int r600_ih_ring_alloc(struct radeon_device *rdev)
++int r600_ih_ring_alloc(struct radeon_device *rdev)
+ {
+ 	int r;
+ 
+@@ -2799,7 +2816,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
+ 	return 0;
+ }
+ 
+-static void r600_ih_ring_fini(struct radeon_device *rdev)
++void r600_ih_ring_fini(struct radeon_device *rdev)
+ {
+ 	int r;
+ 	if (rdev->ih.ring_obj) {
+@@ -2823,7 +2840,7 @@ void r600_rlc_stop(struct radeon_device *rdev)
+ 		/* r7xx asics need to soft reset RLC before halting */
+ 		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
+ 		RREG32(SRBM_SOFT_RESET);
+-		udelay(15000);
++		mdelay(15);
+ 		WREG32(SRBM_SOFT_RESET, 0);
+ 		RREG32(SRBM_SOFT_RESET);
+ 	}
+@@ -2846,10 +2863,17 @@ static int r600_rlc_init(struct radeon_device *rdev)
+ 
+ 	r600_rlc_stop(rdev);
+ 
+-	WREG32(RLC_HB_BASE, 0);
+ 	WREG32(RLC_HB_CNTL, 0);
+-	WREG32(RLC_HB_RPTR, 0);
+-	WREG32(RLC_HB_WPTR, 0);
++
++	if (rdev->family == CHIP_ARUBA) {
++		WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
++		WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
++	}
++	if (rdev->family <= CHIP_CAYMAN) {
++		WREG32(RLC_HB_BASE, 0);
++		WREG32(RLC_HB_RPTR, 0);
++		WREG32(RLC_HB_WPTR, 0);
++	}
+ 	if (rdev->family <= CHIP_CAICOS) {
+ 		WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ 		WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+@@ -2858,7 +2882,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
+ 	WREG32(RLC_UCODE_CNTL, 0);
+ 
+ 	fw_data = (const __be32 *)rdev->rlc_fw->data;
+-	if (rdev->family >= CHIP_CAYMAN) {
++	if (rdev->family >= CHIP_ARUBA) {
++		for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
++			WREG32(RLC_UCODE_ADDR, i);
++			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
++		}
++	} else if (rdev->family >= CHIP_CAYMAN) {
+ 		for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
+ 			WREG32(RLC_UCODE_ADDR, i);
+ 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+@@ -3076,7 +3105,7 @@ int r600_irq_set(struct radeon_device *rdev)
+ 		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ 	}
+ 
+-	if (rdev->irq.sw_int) {
++	if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ 		DRM_DEBUG("r600_irq_set: sw int\n");
+ 		cp_int_cntl |= RB_INT_ENABLE;
+ 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+@@ -3460,11 +3489,11 @@ restart_ih:
+ 		case 177: /* CP_INT in IB1 */
+ 		case 178: /* CP_INT in IB2 */
+ 			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
+-			radeon_fence_process(rdev);
++			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ 			break;
+ 		case 181: /* CP EOP event */
+ 			DRM_DEBUG("IH: CP EOP\n");
+-			radeon_fence_process(rdev);
++			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ 			break;
+ 		case 233: /* GUI IDLE */
+ 			DRM_DEBUG("IH: GUI idle\n");
+@@ -3497,30 +3526,6 @@ restart_ih:
+  */
+ #if defined(CONFIG_DEBUG_FS)
+ 
+-static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
+-{
+-	struct drm_info_node *node = (struct drm_info_node *) m->private;
+-	struct drm_device *dev = node->minor->dev;
+-	struct radeon_device *rdev = dev->dev_private;
+-	unsigned count, i, j;
+-
+-	radeon_ring_free_size(rdev);
+-	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
+-	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
+-	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
+-	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
+-	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
+-	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
+-	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
+-	seq_printf(m, "%u dwords in ring\n", count);
+-	i = rdev->cp.rptr;
+-	for (j = 0; j <= count; j++) {
+-		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+-		i = (i + 1) & rdev->cp.ptr_mask;
+-	}
+-	return 0;
+-}
+-
+ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
+ {
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+@@ -3534,7 +3539,6 @@ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
+ 
+ static struct drm_info_list r600_mc_info_list[] = {
+ 	{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
+-	{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
+ };
+ #endif
+ 
+diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
+index 846fae5..24e3939 100644
+--- a/drivers/gpu/drm/radeon/r600_audio.c
++++ b/drivers/gpu/drm/radeon/r600_audio.c
+@@ -36,7 +36,7 @@
+  */
+ static int r600_audio_chipset_supported(struct radeon_device *rdev)
+ {
+-	return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
++	return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
+ 		|| rdev->family == CHIP_RS600
+ 		|| rdev->family == CHIP_RS690
+ 		|| rdev->family == CHIP_RS740;
+@@ -161,8 +161,18 @@ static void r600_audio_update_hdmi(unsigned long param)
+  */
+ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+ {
++	u32 value = 0;
+ 	DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
+-	WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
++	if (ASIC_IS_DCE4(rdev)) {
++		if (enable) {
++			value |= 0x81000000; /* Required to enable audio */
++			value |= 0x0e1000f0; /* fglrx sets that too */
++		}
++		WREG32(EVERGREEN_AUDIO_ENABLE, value);
++	} else {
++		WREG32_P(R600_AUDIO_ENABLE,
++			 enable ? 0x81000000 : 0x0, ~0x81000000);
++	}
+ 	rdev->audio_enabled = enable;
+ }
+ 
+@@ -229,6 +239,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
++	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ 	int base_rate = 48000;
+ 
+ 	switch (radeon_encoder->encoder_id) {
+@@ -248,22 +259,33 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
+ 		return;
+ 	}
+ 
+-	switch (dig->dig_encoder) {
+-	case 0:
+-		WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+-		WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
+-		WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+-		break;
+-
+-	case 1:
+-		WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+-		WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
+-		WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+-		break;
+-	default:
+-		dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
+-			  radeon_encoder->encoder_id);
+-		return;
++	if (ASIC_IS_DCE4(rdev)) {
++		/* TODO: other PLLs? */
++		WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
++		WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
++		WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
++
++		/* Select DTO source */
++		WREG32(0x5ac, radeon_crtc->crtc_id);
++	} else {
++		switch (dig->dig_encoder) {
++		case 0:
++			WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
++			WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
++			WREG32(R600_AUDIO_CLK_SRCSEL, 0);
++			break;
++
++		case 1:
++			WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
++			WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
++			WREG32(R600_AUDIO_CLK_SRCSEL, 1);
++			break;
++		default:
++			dev_err(rdev->dev,
++				"Unsupported DIG on encoder 0x%02X\n",
++				radeon_encoder->encoder_id);
++			return;
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
+index e09d281..db38f58 100644
+--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
++++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
+@@ -30,26 +30,14 @@
+ 
+ #include "r600d.h"
+ #include "r600_blit_shaders.h"
+-
+-#define DI_PT_RECTLIST        0x11
+-#define DI_INDEX_SIZE_16_BIT  0x0
+-#define DI_SRC_SEL_AUTO_INDEX 0x2
+-
+-#define FMT_8                 0x1
+-#define FMT_5_6_5             0x8
+-#define FMT_8_8_8_8           0x1a
+-#define COLOR_8               0x1
+-#define COLOR_5_6_5           0x8
+-#define COLOR_8_8_8_8         0x1a
+-
+-#define RECT_UNIT_H           32
+-#define RECT_UNIT_W           (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
++#include "radeon_blit_common.h"
+ 
+ /* emits 21 on rv770+, 23 on r600 */
+ static void
+ set_render_target(struct radeon_device *rdev, int format,
+ 		  int w, int h, u64 gpu_addr)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 cb_color_info;
+ 	int pitch, slice;
+ 
+@@ -63,38 +51,38 @@ set_render_target(struct radeon_device *rdev, int format,
+ 	pitch = (w / 8) - 1;
+ 	slice = ((w * h) / 64) - 1;
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, gpu_addr >> 8);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, gpu_addr >> 8);
+ 
+ 	if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
+-		radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
+-		radeon_ring_write(rdev, 2 << 0);
++		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
++		radeon_ring_write(ring, 2 << 0);
+ 	}
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, (pitch << 0) | (slice << 10));
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, 0);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, cb_color_info);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, cb_color_info);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, 0);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, 0);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, 0);
+ }
+ 
+ /* emits 5dw */
+@@ -103,6 +91,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
+ 		    u32 sync_type, u32 size,
+ 		    u64 mc_addr)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 cp_coher_size;
+ 
+ 	if (size == 0xffffffff)
+@@ -110,17 +99,18 @@ cp_set_surface_sync(struct radeon_device *rdev,
+ 	else
+ 		cp_coher_size = ((size + 255) >> 8);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+-	radeon_ring_write(rdev, sync_type);
+-	radeon_ring_write(rdev, cp_coher_size);
+-	radeon_ring_write(rdev, mc_addr >> 8);
+-	radeon_ring_write(rdev, 10); /* poll interval */
++	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
++	radeon_ring_write(ring, sync_type);
++	radeon_ring_write(ring, cp_coher_size);
++	radeon_ring_write(ring, mc_addr >> 8);
++	radeon_ring_write(ring, 10); /* poll interval */
+ }
+ 
+ /* emits 21dw + 1 surface sync = 26dw */
+ static void
+ set_shaders(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u64 gpu_addr;
+ 	u32 sq_pgm_resources;
+ 
+@@ -129,35 +119,35 @@ set_shaders(struct radeon_device *rdev)
+ 
+ 	/* VS */
+ 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, gpu_addr >> 8);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, gpu_addr >> 8);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, sq_pgm_resources);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, sq_pgm_resources);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, 0);
+ 
+ 	/* PS */
+ 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, gpu_addr >> 8);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, gpu_addr >> 8);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, 2);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, 2);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+-	radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
++	radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, 0);
+ 
+ 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
+ 	cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
+@@ -167,6 +157,7 @@ set_shaders(struct radeon_device *rdev)
+ static void
+ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 sq_vtx_constant_word2;
+ 
+ 	sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
+@@ -175,15 +166,15 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
+ 	sq_vtx_constant_word2 |=  SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
+ #endif
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
+-	radeon_ring_write(rdev, 0x460);
+-	radeon_ring_write(rdev, gpu_addr & 0xffffffff);
+-	radeon_ring_write(rdev, 48 - 1);
+-	radeon_ring_write(rdev, sq_vtx_constant_word2);
+-	radeon_ring_write(rdev, 1 << 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
++	radeon_ring_write(ring, 0x460);
++	radeon_ring_write(ring, gpu_addr & 0xffffffff);
++	radeon_ring_write(ring, 48 - 1);
++	radeon_ring_write(ring, sq_vtx_constant_word2);
++	radeon_ring_write(ring, 1 << 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
+ 
+ 	if ((rdev->family == CHIP_RV610) ||
+ 	    (rdev->family == CHIP_RV620) ||
+@@ -203,6 +194,7 @@ set_tex_resource(struct radeon_device *rdev,
+ 		 int format, int w, int h, int pitch,
+ 		 u64 gpu_addr, u32 size)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
+ 
+ 	if (h < 1)
+@@ -225,15 +217,15 @@ set_tex_resource(struct radeon_device *rdev,
+ 	cp_set_surface_sync(rdev,
+ 			    PACKET3_TC_ACTION_ENA, size, gpu_addr);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, sq_tex_resource_word0);
+-	radeon_ring_write(rdev, sq_tex_resource_word1);
+-	radeon_ring_write(rdev, gpu_addr >> 8);
+-	radeon_ring_write(rdev, gpu_addr >> 8);
+-	radeon_ring_write(rdev, sq_tex_resource_word4);
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, sq_tex_resource_word0);
++	radeon_ring_write(ring, sq_tex_resource_word1);
++	radeon_ring_write(ring, gpu_addr >> 8);
++	radeon_ring_write(ring, gpu_addr >> 8);
++	radeon_ring_write(ring, sq_tex_resource_word4);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
+ }
+ 
+ /* emits 12 */
+@@ -241,43 +233,45 @@ static void
+ set_scissors(struct radeon_device *rdev, int x1, int y1,
+ 	     int x2, int y2)
+ {
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+-	radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
+-	radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+-
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+-	radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+-	radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+-
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+-	radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
+-	radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
++	radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
++	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
++
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
++	radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
++	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
++
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
++	radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
++	radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+ }
+ 
+ /* emits 10 */
+ static void
+ draw_auto(struct radeon_device *rdev)
+ {
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+-	radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, DI_PT_RECTLIST);
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++	radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, DI_PT_RECTLIST);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
++	radeon_ring_write(ring,
+ #ifdef __BIG_ENDIAN
+ 			  (2 << 2) |
+ #endif
+ 			  DI_INDEX_SIZE_16_BIT);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
+-	radeon_ring_write(rdev, 1);
++	radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
++	radeon_ring_write(ring, 1);
+ 
+-	radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+-	radeon_ring_write(rdev, 3);
+-	radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
++	radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
++	radeon_ring_write(ring, 3);
++	radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
+ 
+ }
+ 
+@@ -285,6 +279,7 @@ draw_auto(struct radeon_device *rdev)
+ static void
+ set_default_state(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
+ 	u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
+ 	int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
+@@ -440,47 +435,62 @@ set_default_state(struct radeon_device *rdev)
+ 	/* emit an IB pointing at default state */
+ 	dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
+ 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+-	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
++	radeon_ring_write(ring,
+ #ifdef __BIG_ENDIAN
+ 			  (2 << 0) |
+ #endif
+ 			  (gpu_addr & 0xFFFFFFFC));
+-	radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
+-	radeon_ring_write(rdev, dwords);
++	radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
++	radeon_ring_write(ring, dwords);
+ 
+ 	/* SQ config */
+-	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
+-	radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+-	radeon_ring_write(rdev, sq_config);
+-	radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
+-	radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
+-	radeon_ring_write(rdev, sq_thread_resource_mgmt);
+-	radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
+-	radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
++	radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
++	radeon_ring_write(ring, sq_config);
++	radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
++	radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
++	radeon_ring_write(ring, sq_thread_resource_mgmt);
++	radeon_ring_write(ring, sq_stack_resource_mgmt_1);
++	radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+ }
+ 
++#define I2F_MAX_BITS 15
++#define I2F_MAX_INPUT  ((1 << I2F_MAX_BITS) - 1)
++#define I2F_SHIFT (24 - I2F_MAX_BITS)
++
++/*
++ * Converts unsigned integer into 32-bit IEEE floating point representation.
++ * Conversion is not universal and only works for the range from 0
++ * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between
++ * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary,
++ * I2F_MAX_BITS can be increased, but that will add to the loop iterations
++ * and slow us down. Conversion is done by shifting the input and counting
++ * down until the first 1 reaches bit position 23. The resulting counter
++ * and the shifted input are, respectively, the exponent and the fraction.
++ * The sign is always zero.
++ */
+ static uint32_t i2f(uint32_t input)
+ {
+ 	u32 result, i, exponent, fraction;
+ 
+-	if ((input & 0x3fff) == 0)
+-		result = 0; /* 0 is a special case */
++	WARN_ON_ONCE(input > I2F_MAX_INPUT);
++
++	if ((input & I2F_MAX_INPUT) == 0)
++		result = 0;
+ 	else {
+-		exponent = 140; /* exponent biased by 127; */
+-		fraction = (input & 0x3fff) << 10; /* cheat and only
+-						      handle numbers below 2^^15 */
+-		for (i = 0; i < 14; i++) {
++		exponent = 126 + I2F_MAX_BITS;
++		fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT;
++
++		for (i = 0; i < I2F_MAX_BITS; i++) {
+ 			if (fraction & 0x800000)
+ 				break;
+ 			else {
+-				fraction = fraction << 1; /* keep
+-							     shifting left until top bit = 1 */
++				fraction = fraction << 1;
+ 				exponent = exponent - 1;
+ 			}
+ 		}
+-		result = exponent << 23 | (fraction & 0x7fffff); /* mask
+-								    off top bit; assumed 1 */
++		result = exponent << 23 | (fraction & 0x7fffff);
+ 	}
+ 	return result;
+ }
+@@ -611,16 +621,17 @@ void r600_blit_fini(struct radeon_device *rdev)
+ 	radeon_bo_unref(&rdev->r600_blit.shader_obj);
+ }
+ 
+-static int r600_vb_ib_get(struct radeon_device *rdev)
++static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
+ {
+ 	int r;
+-	r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
++	r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
++			  &rdev->r600_blit.vb_ib, size);
+ 	if (r) {
+ 		DRM_ERROR("failed to get IB for vertex buffer\n");
+ 		return r;
+ 	}
+ 
+-	rdev->r600_blit.vb_total = 64*1024;
++	rdev->r600_blit.vb_total = size;
+ 	rdev->r600_blit.vb_used = 0;
+ 	return 0;
+ }
+@@ -679,15 +690,12 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
+ 
+ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	int r;
+ 	int ring_size;
+ 	int num_loops = 0;
+ 	int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
+ 
+-	r = r600_vb_ib_get(rdev);
+-	if (r)
+-		return r;
+-
+ 	/* num loops */
+ 	while (num_gpu_pages) {
+ 		num_gpu_pages -=
+@@ -696,10 +704,15 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
+ 		num_loops++;
+ 	}
+ 
++	/* 48 bytes for vertex per loop */
++	r = r600_vb_ib_get(rdev, (num_loops*48)+256);
++	if (r)
++		return r;
++
+ 	/* calculate number of loops correctly */
+ 	ring_size = num_loops * dwords_per_loop;
+ 	ring_size += rdev->r600_blit.ring_size_common;
+-	r = radeon_ring_lock(rdev, ring_size);
++	r = radeon_ring_lock(rdev, ring, ring_size);
+ 	if (r)
+ 		return r;
+ 
+@@ -718,7 +731,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
+ 	if (fence)
+ 		r = radeon_fence_emit(rdev, fence);
+ 
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ }
+ 
+ void r600_kms_blit_copy(struct radeon_device *rdev,
+diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
+index 73e2c7c..34c8b23 100644
+--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
++++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
+@@ -24,6 +24,7 @@
+  *     Alex Deucher <alexander.deucher at amd.com>
+  */
+ 
++#include <linux/bug.h>
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ 
+diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
+index c9db493..75ed17c 100644
+--- a/drivers/gpu/drm/radeon/r600_cp.c
++++ b/drivers/gpu/drm/radeon/r600_cp.c
+@@ -407,7 +407,7 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
+ 
+ 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+ 	RADEON_READ(R600_GRBM_SOFT_RESET);
+-	DRM_UDELAY(15000);
++	mdelay(15);
+ 	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+ 
+ 	fw_data = (const __be32 *)dev_priv->me_fw->data;
+@@ -500,7 +500,7 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
+ 
+ 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+ 	RADEON_READ(R600_GRBM_SOFT_RESET);
+-	DRM_UDELAY(15000);
++	mdelay(15);
+ 	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+ 
+ 	fw_data = (const __be32 *)dev_priv->pfp_fw->data;
+@@ -1797,7 +1797,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
+ 
+ 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
+ 	RADEON_READ(R600_GRBM_SOFT_RESET);
+-	DRM_UDELAY(15000);
++	mdelay(15);
+ 	RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
+ 
+ 
+@@ -1815,7 +1815,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
+ 		     dev_priv->ring.size_l2qw);
+ #endif
+ 
+-	RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x4);
++	RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
+ 
+ 	/* Set the write pointer delay */
+ 	RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index cb1acff..b8e12af 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -52,15 +52,20 @@ struct r600_cs_track {
+ 	struct radeon_bo	*cb_color_bo[8];
+ 	u64			cb_color_bo_mc[8];
+ 	u32			cb_color_bo_offset[8];
+-	struct radeon_bo	*cb_color_frag_bo[8];
+-	struct radeon_bo	*cb_color_tile_bo[8];
++	struct radeon_bo	*cb_color_frag_bo[8]; /* unused */
++	struct radeon_bo	*cb_color_tile_bo[8]; /* unused */
+ 	u32			cb_color_info[8];
+-	u32			cb_color_size_idx[8];
++	u32			cb_color_view[8];
++	u32			cb_color_size_idx[8]; /* unused */
+ 	u32			cb_target_mask;
+-	u32			cb_shader_mask;
++	u32			cb_shader_mask;  /* unused */
+ 	u32			cb_color_size[8];
+ 	u32			vgt_strmout_en;
+ 	u32			vgt_strmout_buffer_en;
++	struct radeon_bo	*vgt_strmout_bo[4];
++	u64			vgt_strmout_bo_mc[4]; /* unused */
++	u32			vgt_strmout_bo_offset[4];
++	u32			vgt_strmout_size[4];
+ 	u32			db_depth_control;
+ 	u32			db_depth_info;
+ 	u32			db_depth_size_idx;
+@@ -69,13 +74,20 @@ struct r600_cs_track {
+ 	u32			db_offset;
+ 	struct radeon_bo	*db_bo;
+ 	u64			db_bo_mc;
++	bool			sx_misc_kill_all_prims;
++	bool			cb_dirty;
++	bool			db_dirty;
++	bool			streamout_dirty;
++	struct radeon_bo	*htile_bo;
++	u64			htile_offset;
++	u32			htile_surface;
+ };
+ 
+ #define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
+ #define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
+-#define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 3,  0, CHIP_R600 }
++#define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
+ #define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
+-#define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 6,  0, CHIP_R600 }
++#define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
+ #define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
+ #define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
+ #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
+@@ -107,7 +119,7 @@ static const struct gpu_formats color_formats_table[] = {
+ 
+ 	/* 24-bit */
+ 	FMT_24_BIT(V_038004_FMT_8_8_8),
+-					       
++
+ 	/* 32-bit */
+ 	FMT_32_BIT(V_038004_COLOR_32, 1),
+ 	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
+@@ -162,22 +174,22 @@ static const struct gpu_formats color_formats_table[] = {
+ 	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
+ };
+ 
+-static bool fmt_is_valid_color(u32 format)
++bool r600_fmt_is_valid_color(u32 format)
+ {
+ 	if (format >= ARRAY_SIZE(color_formats_table))
+ 		return false;
+-	
++
+ 	if (color_formats_table[format].valid_color)
+ 		return true;
+ 
+ 	return false;
+ }
+ 
+-static bool fmt_is_valid_texture(u32 format, enum radeon_family family)
++bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
+ {
+ 	if (format >= ARRAY_SIZE(color_formats_table))
+ 		return false;
+-	
++
+ 	if (family < color_formats_table[format].min_family)
+ 		return false;
+ 
+@@ -187,7 +199,7 @@ static bool fmt_is_valid_texture(u32 format, enum radeon_family family)
+ 	return false;
+ }
+ 
+-static int fmt_get_blocksize(u32 format)
++int r600_fmt_get_blocksize(u32 format)
+ {
+ 	if (format >= ARRAY_SIZE(color_formats_table))
+ 		return 0;
+@@ -195,7 +207,7 @@ static int fmt_get_blocksize(u32 format)
+ 	return color_formats_table[format].blocksize;
+ }
+ 
+-static int fmt_get_nblocksx(u32 format, u32 w)
++int r600_fmt_get_nblocksx(u32 format, u32 w)
+ {
+ 	unsigned bw;
+ 
+@@ -209,7 +221,7 @@ static int fmt_get_nblocksx(u32 format, u32 w)
+ 	return (w + bw - 1) / bw;
+ }
+ 
+-static int fmt_get_nblocksy(u32 format, u32 h)
++int r600_fmt_get_nblocksy(u32 format, u32 h)
+ {
+ 	unsigned bh;
+ 
+@@ -256,7 +268,7 @@ static int r600_get_array_mode_alignment(struct array_mode_checker *values,
+ 		break;
+ 	case ARRAY_LINEAR_ALIGNED:
+ 		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
+-		*height_align = tile_height;
++		*height_align = 1;
+ 		*depth_align = 1;
+ 		*base_align = values->group_size;
+ 		break;
+@@ -269,10 +281,9 @@ static int r600_get_array_mode_alignment(struct array_mode_checker *values,
+ 		*base_align = values->group_size;
+ 		break;
+ 	case ARRAY_2D_TILED_THIN1:
+-		*pitch_align = max((u32)macro_tile_width,
+-				  (u32)(((values->group_size / tile_height) /
+-					 (values->blocksize * values->nsamples)) *
+-					values->nbanks)) * tile_width;
++		*pitch_align = max((u32)macro_tile_width * tile_width,
++				(u32)((values->group_size * values->nbanks) /
++				(values->blocksize * values->nsamples * tile_width)));
+ 		*height_align = macro_tile_height * tile_height;
+ 		*depth_align = 1;
+ 		*base_align = max(macro_tile_bytes,
+@@ -296,12 +307,14 @@ static void r600_cs_track_init(struct r600_cs_track *track)
+ 		track->cb_color_size[i] = 0;
+ 		track->cb_color_size_idx[i] = 0;
+ 		track->cb_color_info[i] = 0;
++		track->cb_color_view[i] = 0xFFFFFFFF;
+ 		track->cb_color_bo[i] = NULL;
+ 		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ 		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
+ 	}
+ 	track->cb_target_mask = 0xFFFFFFFF;
+ 	track->cb_shader_mask = 0xFFFFFFFF;
++	track->cb_dirty = true;
+ 	track->db_bo = NULL;
+ 	track->db_bo_mc = 0xFFFFFFFF;
+ 	/* assume the biggest format and that htile is enabled */
+@@ -310,6 +323,19 @@ static void r600_cs_track_init(struct r600_cs_track *track)
+ 	track->db_depth_size = 0xFFFFFFFF;
+ 	track->db_depth_size_idx = 0;
+ 	track->db_depth_control = 0xFFFFFFFF;
++	track->db_dirty = true;
++	track->htile_bo = NULL;
++	track->htile_offset = 0xFFFFFFFF;
++	track->htile_surface = 0;
++
++	for (i = 0; i < 4; i++) {
++		track->vgt_strmout_size[i] = 0;
++		track->vgt_strmout_bo[i] = NULL;
++		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
++		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
++	}
++	track->streamout_dirty = true;
++	track->sx_misc_kill_all_prims = false;
+ }
+ 
+ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+@@ -322,13 +348,14 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+ 	volatile u32 *ib = p->ib->ptr;
+ 	unsigned array_mode;
+ 	u32 format;
++
+ 	if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+ 		dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
+ 		return -EINVAL;
+ 	}
+ 	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
+ 	format = G_0280A0_FORMAT(track->cb_color_info[i]);
+-	if (!fmt_is_valid_color(format)) {
++	if (!r600_fmt_is_valid_color(format)) {
+ 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+ 			 __func__, __LINE__, format,
+ 			i, track->cb_color_info[i]);
+@@ -349,7 +376,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+ 	array_check.nbanks = track->nbanks;
+ 	array_check.npipes = track->npipes;
+ 	array_check.nsamples = track->nsamples;
+-	array_check.blocksize = fmt_get_blocksize(format);
++	array_check.blocksize = r600_fmt_get_blocksize(format);
+ 	if (r600_get_array_mode_alignment(&array_check,
+ 					  &pitch_align, &height_align, &depth_align, &base_align)) {
+ 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+@@ -393,7 +420,18 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+ 	}
+ 
+ 	/* check offset */
+-	tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format);
++	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
++	switch (array_mode) {
++	default:
++	case V_0280A0_ARRAY_LINEAR_GENERAL:
++	case V_0280A0_ARRAY_LINEAR_ALIGNED:
++		tmp += track->cb_color_view[i] & 0xFF;
++		break;
++	case V_0280A0_ARRAY_1D_TILED_THIN1:
++	case V_0280A0_ARRAY_2D_TILED_THIN1:
++		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
++		break;
++	}
+ 	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+ 		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
+ 			/* the initial DDX does bad things with the CB size occasionally */
+@@ -403,10 +441,13 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+ 			 * broken userspace.
+ 			 */
+ 		} else {
+-			dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
+-				 array_mode,
++			dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
++				 __func__, i, array_mode,
+ 				 track->cb_color_bo_offset[i], tmp,
+-				 radeon_bo_size(track->cb_color_bo[i]));
++				 radeon_bo_size(track->cb_color_bo[i]),
++				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
++				 r600_fmt_get_nblocksy(format, height),
++				 r600_fmt_get_blocksize(format));
+ 			return -EINVAL;
+ 		}
+ 	}
+@@ -420,154 +461,316 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+ 	return 0;
+ }
+ 
+-static int r600_cs_track_check(struct radeon_cs_parser *p)
++static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
+ {
+ 	struct r600_cs_track *track = p->track;
+-	u32 tmp;
+-	int r, i;
++	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
++	u32 height_align, pitch_align, depth_align;
++	u32 pitch = 8192;
++	u32 height = 8192;
++	u64 base_offset, base_align;
++	struct array_mode_checker array_check;
++	int array_mode;
+ 	volatile u32 *ib = p->ib->ptr;
+ 
+-	/* on legacy kernel we don't perform advanced check */
+-	if (p->rdev == NULL)
+-		return 0;
+-	/* we don't support out buffer yet */
+-	if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
+-		dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
++
++	if (track->db_bo == NULL) {
++		dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ 		return -EINVAL;
+ 	}
+-	/* check that we have a cb for each enabled target, we don't check
+-	 * shader_mask because it seems mesa isn't always setting it :(
+-	 */
+-	tmp = track->cb_target_mask;
+-	for (i = 0; i < 8; i++) {
+-		if ((tmp >> (i * 4)) & 0xF) {
+-			/* at least one component is enabled */
+-			if (track->cb_color_bo[i] == NULL) {
+-				dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+-					__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+-				return -EINVAL;
+-			}
+-			/* perform rewrite of CB_COLOR[0-7]_SIZE */
+-			r = r600_cs_track_validate_cb(p, i);
+-			if (r)
+-				return r;
+-		}
++	switch (G_028010_FORMAT(track->db_depth_info)) {
++	case V_028010_DEPTH_16:
++		bpe = 2;
++		break;
++	case V_028010_DEPTH_X8_24:
++	case V_028010_DEPTH_8_24:
++	case V_028010_DEPTH_X8_24_FLOAT:
++	case V_028010_DEPTH_8_24_FLOAT:
++	case V_028010_DEPTH_32_FLOAT:
++		bpe = 4;
++		break;
++	case V_028010_DEPTH_X24_8_32_FLOAT:
++		bpe = 8;
++		break;
++	default:
++		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
++		return -EINVAL;
+ 	}
+-	/* Check depth buffer */
+-	if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+-		G_028800_Z_ENABLE(track->db_depth_control)) {
+-		u32 nviews, bpe, ntiles, size, slice_tile_max;
+-		u32 height, height_align, pitch, pitch_align, depth_align;
+-		u64 base_offset, base_align;
+-		struct array_mode_checker array_check;
+-		int array_mode;
+-
+-		if (track->db_bo == NULL) {
+-			dev_warn(p->dev, "z/stencil with no depth buffer\n");
++	if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
++		if (!track->db_depth_size_idx) {
++			dev_warn(p->dev, "z/stencil buffer size not set\n");
+ 			return -EINVAL;
+ 		}
+-		if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+-			dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
++		tmp = radeon_bo_size(track->db_bo) - track->db_offset;
++		tmp = (tmp / bpe) >> 6;
++		if (!tmp) {
++			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
++					track->db_depth_size, bpe, track->db_offset,
++					radeon_bo_size(track->db_bo));
+ 			return -EINVAL;
+ 		}
+-		switch (G_028010_FORMAT(track->db_depth_info)) {
+-		case V_028010_DEPTH_16:
+-			bpe = 2;
+-			break;
+-		case V_028010_DEPTH_X8_24:
+-		case V_028010_DEPTH_8_24:
+-		case V_028010_DEPTH_X8_24_FLOAT:
+-		case V_028010_DEPTH_8_24_FLOAT:
+-		case V_028010_DEPTH_32_FLOAT:
+-			bpe = 4;
++		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
++	} else {
++		size = radeon_bo_size(track->db_bo);
++		/* pitch in pixels */
++		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
++		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
++		slice_tile_max *= 64;
++		height = slice_tile_max / pitch;
++		if (height > 8192)
++			height = 8192;
++		base_offset = track->db_bo_mc + track->db_offset;
++		array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
++		array_check.array_mode = array_mode;
++		array_check.group_size = track->group_size;
++		array_check.nbanks = track->nbanks;
++		array_check.npipes = track->npipes;
++		array_check.nsamples = track->nsamples;
++		array_check.blocksize = bpe;
++		if (r600_get_array_mode_alignment(&array_check,
++					&pitch_align, &height_align, &depth_align, &base_align)) {
++			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
++					G_028010_ARRAY_MODE(track->db_depth_info),
++					track->db_depth_info);
++			return -EINVAL;
++		}
++		switch (array_mode) {
++		case V_028010_ARRAY_1D_TILED_THIN1:
++			/* don't break userspace */
++			height &= ~0x7;
+ 			break;
+-		case V_028010_DEPTH_X24_8_32_FLOAT:
+-			bpe = 8;
++		case V_028010_ARRAY_2D_TILED_THIN1:
+ 			break;
+ 		default:
+-			dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
++			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
++					G_028010_ARRAY_MODE(track->db_depth_info),
++					track->db_depth_info);
++			return -EINVAL;
++		}
++
++		if (!IS_ALIGNED(pitch, pitch_align)) {
++			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
++					__func__, __LINE__, pitch, pitch_align, array_mode);
++			return -EINVAL;
++		}
++		if (!IS_ALIGNED(height, height_align)) {
++			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
++					__func__, __LINE__, height, height_align, array_mode);
++			return -EINVAL;
++		}
++		if (!IS_ALIGNED(base_offset, base_align)) {
++			dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
++					base_offset, base_align, array_mode);
++			return -EINVAL;
++		}
++
++		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
++		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
++		tmp = ntiles * bpe * 64 * nviews;
++		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
++			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
++					array_mode,
++					track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
++					radeon_bo_size(track->db_bo));
++			return -EINVAL;
++		}
++	}
++
++	/* hyperz */
++	if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
++		unsigned long size;
++		unsigned nbx, nby;
++
++		if (track->htile_bo == NULL) {
++			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
++				 __func__, __LINE__, track->db_depth_info);
+ 			return -EINVAL;
+ 		}
+ 		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+-			if (!track->db_depth_size_idx) {
+-				dev_warn(p->dev, "z/stencil buffer size not set\n");
+-				return -EINVAL;
+-			}
+-			tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+-			tmp = (tmp / bpe) >> 6;
+-			if (!tmp) {
+-				dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+-						track->db_depth_size, bpe, track->db_offset,
+-						radeon_bo_size(track->db_bo));
+-				return -EINVAL;
+-			}
+-			ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
++			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
++				 __func__, __LINE__, track->db_depth_size);
++			return -EINVAL;
++		}
++
++		nbx = pitch;
++		nby = height;
++		if (G_028D24_LINEAR(track->htile_surface)) {
++			/* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
++			nbx = round_up(nbx, 16 * 8);
++			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
++			nby = round_up(nby, track->npipes * 8);
+ 		} else {
+-			size = radeon_bo_size(track->db_bo);
+-			/* pitch in pixels */
+-			pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
+-			slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+-			slice_tile_max *= 64;
+-			height = slice_tile_max / pitch;
+-			if (height > 8192)
+-				height = 8192;
+-			base_offset = track->db_bo_mc + track->db_offset;
+-			array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
+-			array_check.array_mode = array_mode;
+-			array_check.group_size = track->group_size;
+-			array_check.nbanks = track->nbanks;
+-			array_check.npipes = track->npipes;
+-			array_check.nsamples = track->nsamples;
+-			array_check.blocksize = bpe;
+-			if (r600_get_array_mode_alignment(&array_check,
+-							  &pitch_align, &height_align, &depth_align, &base_align)) {
+-				dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+-					 G_028010_ARRAY_MODE(track->db_depth_info),
+-					 track->db_depth_info);
+-				return -EINVAL;
+-			}
+-			switch (array_mode) {
+-			case V_028010_ARRAY_1D_TILED_THIN1:
+-				/* don't break userspace */
+-				height &= ~0x7;
++			/* htile widht & nby (8 or 4) make 2 bits number */
++			tmp = track->htile_surface & 3;
++			/* align is htile align * 8, htile align vary according to
++			 * number of pipe and tile width and nby
++			 */
++			switch (track->npipes) {
++			case 8:
++				switch (tmp) {
++				case 3:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
++					nbx = round_up(nbx, 64 * 8);
++					nby = round_up(nby, 64 * 8);
++					break;
++				case 2:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
++				case 1:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
++					nbx = round_up(nbx, 64 * 8);
++					nby = round_up(nby, 32 * 8);
++					break;
++				case 0:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
++					nbx = round_up(nbx, 32 * 8);
++					nby = round_up(nby, 32 * 8);
++					break;
++				default:
++					return -EINVAL;
++				}
++				break;
++			case 4:
++				switch (tmp) {
++				case 3:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
++					nbx = round_up(nbx, 64 * 8);
++					nby = round_up(nby, 32 * 8);
++					break;
++				case 2:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
++				case 1:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
++					nbx = round_up(nbx, 32 * 8);
++					nby = round_up(nby, 32 * 8);
++					break;
++				case 0:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
++					nbx = round_up(nbx, 32 * 8);
++					nby = round_up(nby, 16 * 8);
++					break;
++				default:
++					return -EINVAL;
++				}
++				break;
++			case 2:
++				switch (tmp) {
++				case 3:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
++					nbx = round_up(nbx, 32 * 8);
++					nby = round_up(nby, 32 * 8);
++					break;
++				case 2:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
++				case 1:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
++					nbx = round_up(nbx, 32 * 8);
++					nby = round_up(nby, 16 * 8);
++					break;
++				case 0:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
++					nbx = round_up(nbx, 16 * 8);
++					nby = round_up(nby, 16 * 8);
++					break;
++				default:
++					return -EINVAL;
++				}
+ 				break;
+-			case V_028010_ARRAY_2D_TILED_THIN1:
++			case 1:
++				switch (tmp) {
++				case 3:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
++					nbx = round_up(nbx, 32 * 8);
++					nby = round_up(nby, 16 * 8);
++					break;
++				case 2:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
++				case 1:	/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
++					nbx = round_up(nbx, 16 * 8);
++					nby = round_up(nby, 16 * 8);
++					break;
++				case 0:	/* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
++					nbx = round_up(nbx, 16 * 8);
++					nby = round_up(nby, 8 * 8);
++					break;
++				default:
++					return -EINVAL;
++				}
+ 				break;
+ 			default:
+-				dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+-					 G_028010_ARRAY_MODE(track->db_depth_info),
+-					 track->db_depth_info);
++				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
++					 __func__, __LINE__, track->npipes);
+ 				return -EINVAL;
+ 			}
++		}
++		/* compute number of htile */
++		nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4;
++		nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4;
++		size = nbx * nby * 4;
++		size += track->htile_offset;
++
++		if (size > radeon_bo_size(track->htile_bo)) {
++			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
++				 __func__, __LINE__, radeon_bo_size(track->htile_bo),
++				 size, nbx, nby);
++			return -EINVAL;
++		}
++	}
+ 
+-			if (!IS_ALIGNED(pitch, pitch_align)) {
+-				dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+-					 __func__, __LINE__, pitch, pitch_align, array_mode);
+-				return -EINVAL;
+-			}
+-			if (!IS_ALIGNED(height, height_align)) {
+-				dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+-					 __func__, __LINE__, height, height_align, array_mode);
+-				return -EINVAL;
+-			}
+-			if (!IS_ALIGNED(base_offset, base_align)) {
+-				dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
+-					 base_offset, base_align, array_mode);
+-				return -EINVAL;
++	track->db_dirty = false;
++	return 0;
++}
++
++static int r600_cs_track_check(struct radeon_cs_parser *p)
++{
++	struct r600_cs_track *track = p->track;
++	u32 tmp;
++	int r, i;
++
++	/* on legacy kernel we don't perform advanced check */
++	if (p->rdev == NULL)
++		return 0;
++
++	/* check streamout */
++	if (track->streamout_dirty && track->vgt_strmout_en) {
++		for (i = 0; i < 4; i++) {
++			if (track->vgt_strmout_buffer_en & (1 << i)) {
++				if (track->vgt_strmout_bo[i]) {
++					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
++						(u64)track->vgt_strmout_size[i];
++					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
++						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
++							  i, offset,
++							  radeon_bo_size(track->vgt_strmout_bo[i]));
++						return -EINVAL;
++					}
++				} else {
++					dev_warn(p->dev, "No buffer for streamout %d\n", i);
++					return -EINVAL;
++				}
+ 			}
++		}
++		track->streamout_dirty = false;
++	}
+ 
+-			ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+-			nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+-			tmp = ntiles * bpe * 64 * nviews;
+-			if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+-				dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+-					 array_mode,
+-					 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+-					 radeon_bo_size(track->db_bo));
+-				return -EINVAL;
++	if (track->sx_misc_kill_all_prims)
++		return 0;
++
++	/* check that we have a cb for each enabled target, we don't check
++	 * shader_mask because it seems mesa isn't always setting it :(
++	 */
++	if (track->cb_dirty) {
++		tmp = track->cb_target_mask;
++		for (i = 0; i < 8; i++) {
++			if ((tmp >> (i * 4)) & 0xF) {
++				/* at least one component is enabled */
++				if (track->cb_color_bo[i] == NULL) {
++					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
++						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
++					return -EINVAL;
++				}
++				/* perform rewrite of CB_COLOR[0-7]_SIZE */
++				r = r600_cs_track_validate_cb(p, i);
++				if (r)
++					return r;
+ 			}
+ 		}
++		track->cb_dirty = false;
++	}
++
++	/* Check depth buffer */
++	if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
++		G_028800_Z_ENABLE(track->db_depth_control))) {
++		r = r600_cs_track_validate_db(p);
++		if (r)
++			return r;
+ 	}
++
+ 	return 0;
+ }
+ 
+@@ -939,9 +1142,10 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		break;
+ 	case R_028800_DB_DEPTH_CONTROL:
+ 		track->db_depth_control = radeon_get_ib_value(p, idx);
++		track->db_dirty = true;
+ 		break;
+ 	case R_028010_DB_DEPTH_INFO:
+-		if (!p->keep_tiling_flags &&
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
+ 		    r600_cs_packet_next_is_pkt3_nop(p)) {
+ 			r = r600_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+@@ -959,24 +1163,66 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+ 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+ 			}
+-		} else
++		} else {
+ 			track->db_depth_info = radeon_get_ib_value(p, idx);
++		}
++		track->db_dirty = true;
+ 		break;
+ 	case R_028004_DB_DEPTH_VIEW:
+ 		track->db_depth_view = radeon_get_ib_value(p, idx);
++		track->db_dirty = true;
+ 		break;
+ 	case R_028000_DB_DEPTH_SIZE:
+ 		track->db_depth_size = radeon_get_ib_value(p, idx);
+ 		track->db_depth_size_idx = idx;
++		track->db_dirty = true;
+ 		break;
+ 	case R_028AB0_VGT_STRMOUT_EN:
+ 		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
++		track->streamout_dirty = true;
+ 		break;
+ 	case R_028B20_VGT_STRMOUT_BUFFER_EN:
+ 		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
++		track->streamout_dirty = true;
++		break;
++	case VGT_STRMOUT_BUFFER_BASE_0:
++	case VGT_STRMOUT_BUFFER_BASE_1:
++	case VGT_STRMOUT_BUFFER_BASE_2:
++	case VGT_STRMOUT_BUFFER_BASE_3:
++		r = r600_cs_packet_next_reloc(p, &reloc);
++		if (r) {
++			dev_warn(p->dev, "bad SET_CONTEXT_REG "
++					"0x%04X\n", reg);
++			return -EINVAL;
++		}
++		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
++		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
++		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
++		track->vgt_strmout_bo[tmp] = reloc->robj;
++		track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
++		track->streamout_dirty = true;
++		break;
++	case VGT_STRMOUT_BUFFER_SIZE_0:
++	case VGT_STRMOUT_BUFFER_SIZE_1:
++	case VGT_STRMOUT_BUFFER_SIZE_2:
++	case VGT_STRMOUT_BUFFER_SIZE_3:
++		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
++		/* size in register is DWs, convert to bytes */
++		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
++		track->streamout_dirty = true;
++		break;
++	case CP_COHER_BASE:
++		r = r600_cs_packet_next_reloc(p, &reloc);
++		if (r) {
++			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
++					"0x%04X\n", reg);
++			return -EINVAL;
++		}
++		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 		break;
+ 	case R_028238_CB_TARGET_MASK:
+ 		track->cb_target_mask = radeon_get_ib_value(p, idx);
++		track->cb_dirty = true;
+ 		break;
+ 	case R_02823C_CB_SHADER_MASK:
+ 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
+@@ -984,6 +1230,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	case R_028C04_PA_SC_AA_CONFIG:
+ 		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+ 		track->nsamples = 1 << tmp;
++		track->cb_dirty = true;
+ 		break;
+ 	case R_0280A0_CB_COLOR0_INFO:
+ 	case R_0280A4_CB_COLOR1_INFO:
+@@ -993,7 +1240,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	case R_0280B4_CB_COLOR5_INFO:
+ 	case R_0280B8_CB_COLOR6_INFO:
+ 	case R_0280BC_CB_COLOR7_INFO:
+-		if (!p->keep_tiling_flags &&
++		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
+ 		     r600_cs_packet_next_is_pkt3_nop(p)) {
+ 			r = r600_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+@@ -1013,6 +1260,19 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+ 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ 		}
++		track->cb_dirty = true;
++		break;
++	case R_028080_CB_COLOR0_VIEW:
++	case R_028084_CB_COLOR1_VIEW:
++	case R_028088_CB_COLOR2_VIEW:
++	case R_02808C_CB_COLOR3_VIEW:
++	case R_028090_CB_COLOR4_VIEW:
++	case R_028094_CB_COLOR5_VIEW:
++	case R_028098_CB_COLOR6_VIEW:
++	case R_02809C_CB_COLOR7_VIEW:
++		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
++		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
++		track->cb_dirty = true;
+ 		break;
+ 	case R_028060_CB_COLOR0_SIZE:
+ 	case R_028064_CB_COLOR1_SIZE:
+@@ -1025,6 +1285,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
+ 		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
+ 		track->cb_color_size_idx[tmp] = idx;
++		track->cb_dirty = true;
+ 		break;
+ 		/* This register were added late, there is userspace
+ 		 * which does provide relocation for those but set
+@@ -1107,6 +1368,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		track->cb_color_base_last[tmp] = ib[idx];
+ 		track->cb_color_bo[tmp] = reloc->robj;
+ 		track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
++		track->cb_dirty = true;
+ 		break;
+ 	case DB_DEPTH_BASE:
+ 		r = r600_cs_packet_next_reloc(p, &reloc);
+@@ -1119,8 +1381,24 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 		track->db_bo = reloc->robj;
+ 		track->db_bo_mc = reloc->lobj.gpu_offset;
++		track->db_dirty = true;
+ 		break;
+ 	case DB_HTILE_DATA_BASE:
++		r = r600_cs_packet_next_reloc(p, &reloc);
++		if (r) {
++			dev_warn(p->dev, "bad SET_CONTEXT_REG "
++					"0x%04X\n", reg);
++			return -EINVAL;
++		}
++		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
++		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
++		track->htile_bo = reloc->robj;
++		track->db_dirty = true;
++		break;
++	case DB_HTILE_SURFACE:
++		track->htile_surface = radeon_get_ib_value(p, idx);
++		track->db_dirty = true;
++		break;
+ 	case SQ_PGM_START_FS:
+ 	case SQ_PGM_START_ES:
+ 	case SQ_PGM_START_VS:
+@@ -1191,6 +1469,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 		}
+ 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ 		break;
++	case SX_MISC:
++		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
++		break;
+ 	default:
+ 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ 		return -EINVAL;
+@@ -1198,7 +1479,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+ 	return 0;
+ }
+ 
+-static unsigned mip_minify(unsigned size, unsigned level)
++unsigned r600_mip_minify(unsigned size, unsigned level)
+ {
+ 	unsigned val;
+ 
+@@ -1220,22 +1501,22 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+ 	unsigned nlevels = llevel - blevel + 1;
+ 
+ 	*l0_size = -1;
+-	blocksize = fmt_get_blocksize(format);
++	blocksize = r600_fmt_get_blocksize(format);
+ 
+-	w0 = mip_minify(w0, 0);
+-	h0 = mip_minify(h0, 0);
+-	d0 = mip_minify(d0, 0);
++	w0 = r600_mip_minify(w0, 0);
++	h0 = r600_mip_minify(h0, 0);
++	d0 = r600_mip_minify(d0, 0);
+ 	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+-		width = mip_minify(w0, i);
+-		nbx = fmt_get_nblocksx(format, width);
++		width = r600_mip_minify(w0, i);
++		nbx = r600_fmt_get_nblocksx(format, width);
+ 
+ 		nbx = round_up(nbx, block_align);
+ 
+-		height = mip_minify(h0, i);
+-		nby = fmt_get_nblocksy(format, height);
++		height = r600_mip_minify(h0, i);
++		nby = r600_fmt_get_nblocksy(format, height);
+ 		nby = round_up(nby, height_align);
+ 
+-		depth = mip_minify(d0, i);
++		depth = r600_mip_minify(d0, i);
+ 
+ 		size = nbx * nby * blocksize;
+ 		if (nfaces)
+@@ -1293,7 +1574,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+ 	mip_offset <<= 8;
+ 
+ 	word0 = radeon_get_ib_value(p, idx + 0);
+-	if (!p->keep_tiling_flags) {
++	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ 		if (tiling_flags & RADEON_TILING_MACRO)
+ 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ 		else if (tiling_flags & RADEON_TILING_MICRO)
+@@ -1304,6 +1585,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+ 	h0 = G_038004_TEX_HEIGHT(word1) + 1;
+ 	d0 = G_038004_TEX_DEPTH(word1);
+ 	nfaces = 1;
++	array = 0;
+ 	switch (G_038000_DIM(word0)) {
+ 	case V_038000_SQ_TEX_DIM_1D:
+ 	case V_038000_SQ_TEX_DIM_2D:
+@@ -1326,7 +1608,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+ 		return -EINVAL;
+ 	}
+ 	format = G_038004_DATA_FORMAT(word1);
+-	if (!fmt_is_valid_texture(format, p->family)) {
++	if (!r600_fmt_is_valid_texture(format, p->family)) {
+ 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+ 			 __func__, __LINE__, format);
+ 		return -EINVAL;
+@@ -1339,7 +1621,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+ 	array_check.nbanks = track->nbanks;
+ 	array_check.npipes = track->npipes;
+ 	array_check.nsamples = 1;
+-	array_check.blocksize = fmt_get_blocksize(format);
++	array_check.blocksize = r600_fmt_get_blocksize(format);
+ 	if (r600_get_array_mode_alignment(&array_check,
+ 					  &pitch_align, &height_align, &depth_align, &base_align)) {
+ 		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
+@@ -1372,6 +1654,10 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+ 	word1 = radeon_get_ib_value(p, idx + 5);
+ 	blevel = G_038010_BASE_LEVEL(word0);
+ 	llevel = G_038014_LAST_LEVEL(word1);
++	if (blevel > llevel) {
++		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
++			 blevel, llevel);
++	}
+ 	if (array == 1) {
+ 		barray = G_038014_BASE_ARRAY(word1);
+ 		larray = G_038014_LAST_ARRAY(word1);
+@@ -1383,8 +1669,10 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+ 			  &l0_size, &mipmap_size);
+ 	/* using get ib will give us the offset into the texture bo */
+ 	if ((l0_size + word2) > radeon_bo_size(texture)) {
+-		dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
+-			w0, h0, format, word2, l0_size, radeon_bo_size(texture));
++		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
++			 w0, h0, pitch_align, height_align,
++			 array_check.array_mode, format, word2,
++			 l0_size, radeon_bo_size(texture));
+ 		dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
+ 		return -EINVAL;
+ 	}
+@@ -1397,6 +1685,22 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
+ 	return 0;
+ }
+ 
++static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
++{
++	u32 m, i;
++
++	i = (reg >> 7);
++	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
++		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
++		return false;
++	}
++	m = 1 << ((reg >> 2) & 31);
++	if (!(r600_reg_safe_bm[i] & m))
++		return true;
++	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
++	return false;
++}
++
+ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 				struct radeon_cs_packet *pkt)
+ {
+@@ -1419,6 +1723,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 	{
+ 		int pred_op;
+ 		int tmp;
++		uint64_t offset;
++
+ 		if (pkt->count != 1) {
+ 			DRM_ERROR("bad SET PREDICATION\n");
+ 			return -EINVAL;
+@@ -1442,8 +1748,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 			return -EINVAL;
+ 		}
+ 
+-		ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-		ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
++		offset = reloc->lobj.gpu_offset +
++		         (idx_value & 0xfffffff0) +
++		         ((u64)(tmp & 0xff) << 32);
++
++		ib[idx + 0] = offset;
++		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ 	}
+ 	break;
+ 
+@@ -1467,6 +1777,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 		}
+ 		break;
+ 	case PACKET3_DRAW_INDEX:
++	{
++		uint64_t offset;
+ 		if (pkt->count != 3) {
+ 			DRM_ERROR("bad DRAW_INDEX\n");
+ 			return -EINVAL;
+@@ -1476,14 +1788,21 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 			DRM_ERROR("bad DRAW_INDEX\n");
+ 			return -EINVAL;
+ 		}
+-		ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-		ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++		offset = reloc->lobj.gpu_offset +
++		         idx_value +
++		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
++
++		ib[idx+0] = offset;
++		ib[idx+1] = upper_32_bits(offset) & 0xff;
++
+ 		r = r600_cs_track_check(p);
+ 		if (r) {
+ 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ 			return r;
+ 		}
+ 		break;
++	}
+ 	case PACKET3_DRAW_INDEX_AUTO:
+ 		if (pkt->count != 1) {
+ 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+@@ -1514,13 +1833,20 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 		}
+ 		/* bit 4 is reg (0) or mem (1) */
+ 		if (idx_value & 0x10) {
++			uint64_t offset;
++
+ 			r = r600_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+ 				DRM_ERROR("bad WAIT_REG_MEM\n");
+ 				return -EINVAL;
+ 			}
+-			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++			offset = reloc->lobj.gpu_offset +
++			         (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
++			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
++
++			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
++			ib[idx+2] = upper_32_bits(offset) & 0xff;
+ 		}
+ 		break;
+ 	case PACKET3_SURFACE_SYNC:
+@@ -1545,16 +1871,25 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 			return -EINVAL;
+ 		}
+ 		if (pkt->count) {
++			uint64_t offset;
++
+ 			r = r600_cs_packet_next_reloc(p, &reloc);
+ 			if (r) {
+ 				DRM_ERROR("bad EVENT_WRITE\n");
+ 				return -EINVAL;
+ 			}
+-			ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-			ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++			offset = reloc->lobj.gpu_offset +
++			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
++			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
++
++			ib[idx+1] = offset & 0xfffffff8;
++			ib[idx+2] = upper_32_bits(offset) & 0xff;
+ 		}
+ 		break;
+ 	case PACKET3_EVENT_WRITE_EOP:
++	{
++		uint64_t offset;
++
+ 		if (pkt->count != 4) {
+ 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ 			return -EINVAL;
+@@ -1564,9 +1899,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 			DRM_ERROR("bad EVENT_WRITE\n");
+ 			return -EINVAL;
+ 		}
+-		ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+-		ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++		offset = reloc->lobj.gpu_offset +
++		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
++		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
++
++		ib[idx+1] = offset & 0xfffffffc;
++		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
+ 		break;
++	}
+ 	case PACKET3_SET_CONFIG_REG:
+ 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
+ 		end_reg = 4 * pkt->count + start_reg - 4;
+@@ -1625,7 +1966,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 					return -EINVAL;
+ 				}
+ 				base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+-				if (!p->keep_tiling_flags) {
++				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ 					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ 						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ 					else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+@@ -1651,6 +1992,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 				ib[idx+1+(i*7)+3] += mip_offset;
+ 				break;
+ 			case SQ_TEX_VTX_VALID_BUFFER:
++			{
++				uint64_t offset64;
+ 				/* vtx base */
+ 				r = r600_cs_packet_next_reloc(p, &reloc);
+ 				if (r) {
+@@ -1663,11 +2006,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 					/* force size to size of the buffer */
+ 					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+ 						 size + offset, radeon_bo_size(reloc->robj));
+-					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
++					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
+ 				}
+-				ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
+-				ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
++
++				offset64 = reloc->lobj.gpu_offset + offset;
++				ib[idx+1+(i*8)+0] = offset64;
++				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
++						    (upper_32_bits(offset64) & 0xff);
+ 				break;
++			}
+ 			case SQ_TEX_VTX_INVALID_TEXTURE:
+ 			case SQ_TEX_VTX_INVALID_BUFFER:
+ 			default:
+@@ -1742,6 +2089,104 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 			return -EINVAL;
+ 		}
+ 		break;
++	case PACKET3_STRMOUT_BUFFER_UPDATE:
++		if (pkt->count != 4) {
++			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
++			return -EINVAL;
++		}
++		/* Updating memory at DST_ADDRESS. */
++		if (idx_value & 0x1) {
++			u64 offset;
++			r = r600_cs_packet_next_reloc(p, &reloc);
++			if (r) {
++				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
++				return -EINVAL;
++			}
++			offset = radeon_get_ib_value(p, idx+1);
++			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
++			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
++				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
++					  offset + 4, radeon_bo_size(reloc->robj));
++				return -EINVAL;
++			}
++			offset += reloc->lobj.gpu_offset;
++			ib[idx+1] = offset;
++			ib[idx+2] = upper_32_bits(offset) & 0xff;
++		}
++		/* Reading data from SRC_ADDRESS. */
++		if (((idx_value >> 1) & 0x3) == 2) {
++			u64 offset;
++			r = r600_cs_packet_next_reloc(p, &reloc);
++			if (r) {
++				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
++				return -EINVAL;
++			}
++			offset = radeon_get_ib_value(p, idx+3);
++			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
++			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
++				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
++					  offset + 4, radeon_bo_size(reloc->robj));
++				return -EINVAL;
++			}
++			offset += reloc->lobj.gpu_offset;
++			ib[idx+3] = offset;
++			ib[idx+4] = upper_32_bits(offset) & 0xff;
++		}
++		break;
++	case PACKET3_COPY_DW:
++		if (pkt->count != 4) {
++			DRM_ERROR("bad COPY_DW (invalid count)\n");
++			return -EINVAL;
++		}
++		if (idx_value & 0x1) {
++			u64 offset;
++			/* SRC is memory. */
++			r = r600_cs_packet_next_reloc(p, &reloc);
++			if (r) {
++				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
++				return -EINVAL;
++			}
++			offset = radeon_get_ib_value(p, idx+1);
++			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
++			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
++				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
++					  offset + 4, radeon_bo_size(reloc->robj));
++				return -EINVAL;
++			}
++			offset += reloc->lobj.gpu_offset;
++			ib[idx+1] = offset;
++			ib[idx+2] = upper_32_bits(offset) & 0xff;
++		} else {
++			/* SRC is a reg. */
++			reg = radeon_get_ib_value(p, idx+1) << 2;
++			if (!r600_is_safe_reg(p, reg, idx+1))
++				return -EINVAL;
++		}
++		if (idx_value & 0x2) {
++			u64 offset;
++			/* DST is memory. */
++			r = r600_cs_packet_next_reloc(p, &reloc);
++			if (r) {
++				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
++				return -EINVAL;
++			}
++			offset = radeon_get_ib_value(p, idx+3);
++			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
++			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
++				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
++					  offset + 4, radeon_bo_size(reloc->robj));
++				return -EINVAL;
++			}
++			offset += reloc->lobj.gpu_offset;
++			ib[idx+3] = offset;
++			ib[idx+4] = upper_32_bits(offset) & 0xff;
++		} else {
++			/* DST is a reg. */
++			reg = radeon_get_ib_value(p, idx+3) << 2;
++			if (!r600_is_safe_reg(p, reg, idx+3))
++				return -EINVAL;
++		}
++		break;
+ 	case PACKET3_NOP:
+ 		break;
+ 	default:
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+index c45d921..0b59206 100644
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -320,7 +320,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ 
+-	if (ASIC_IS_DCE4(rdev))
++	if (ASIC_IS_DCE5(rdev))
+ 		return;
+ 
+ 	if (!offset)
+@@ -462,13 +462,31 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ 
++	u16 eg_offsets[] = {
++		EVERGREEN_CRTC0_REGISTER_OFFSET,
++		EVERGREEN_CRTC1_REGISTER_OFFSET,
++		EVERGREEN_CRTC2_REGISTER_OFFSET,
++		EVERGREEN_CRTC3_REGISTER_OFFSET,
++		EVERGREEN_CRTC4_REGISTER_OFFSET,
++		EVERGREEN_CRTC5_REGISTER_OFFSET,
++	};
++
+ 	if (!dig) {
+ 		dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
+ 		return;
+ 	}
+ 
+-	if (ASIC_IS_DCE4(rdev)) {
++	if (ASIC_IS_DCE5(rdev)) {
+ 		/* TODO */
++	} else if (ASIC_IS_DCE4(rdev)) {
++		if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
++			dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
++			return;
++		}
++		radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
++						eg_offsets[dig->dig_encoder];
++		radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
++						+ EVERGREEN_HDMI_CONFIG_OFFSET;
+ 	} else if (ASIC_IS_DCE3(rdev)) {
+ 		radeon_encoder->hdmi_offset = dig->dig_encoder ?
+ 			R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
+@@ -491,7 +509,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	uint32_t offset;
+ 
+-	if (ASIC_IS_DCE4(rdev))
++	if (ASIC_IS_DCE5(rdev))
+ 		return;
+ 
+ 	if (!radeon_encoder->hdmi_offset) {
+@@ -504,16 +522,24 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
+ 	}
+ 
+ 	offset = radeon_encoder->hdmi_offset;
+-	if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
++	if (ASIC_IS_DCE5(rdev)) {
++		/* TODO */
++	} else if (ASIC_IS_DCE4(rdev)) {
++		WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
++	} else if (ASIC_IS_DCE32(rdev)) {
+ 		WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
+-	} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
++	} else if (ASIC_IS_DCE3(rdev)) {
++		/* TODO */
++	} else if (rdev->family >= CHIP_R600) {
+ 		switch (radeon_encoder->encoder_id) {
+ 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+-			WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
++			WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
++				 ~AVIVO_TMDSA_CNTL_HDMI_EN);
+ 			WREG32(offset + R600_HDMI_ENABLE, 0x101);
+ 			break;
+ 		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+-			WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
++			WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
++				 ~AVIVO_LVTMA_CNTL_HDMI_EN);
+ 			WREG32(offset + R600_HDMI_ENABLE, 0x105);
+ 			break;
+ 		default:
+@@ -525,8 +551,8 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
+ 	if (rdev->irq.installed
+ 	    && rdev->family != CHIP_RS600
+ 	    && rdev->family != CHIP_RS690
+-	    && rdev->family != CHIP_RS740) {
+-
++	    && rdev->family != CHIP_RS740
++	    && !ASIC_IS_DCE4(rdev)) {
+ 		/* if irq is available use it */
+ 		rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
+ 		radeon_irq_set(rdev);
+@@ -551,7 +577,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	uint32_t offset;
+ 
+-	if (ASIC_IS_DCE4(rdev))
++	if (ASIC_IS_DCE5(rdev))
+ 		return;
+ 
+ 	offset = radeon_encoder->hdmi_offset;
+@@ -570,16 +596,22 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
+ 	/* disable polling */
+ 	r600_audio_disable_polling(encoder);
+ 
+-	if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
++	if (ASIC_IS_DCE5(rdev)) {
++		/* TODO */
++	} else if (ASIC_IS_DCE4(rdev)) {
++		WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
++	} else if (ASIC_IS_DCE32(rdev)) {
+ 		WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
+ 	} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ 		switch (radeon_encoder->encoder_id) {
+ 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+-			WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
++			WREG32_P(AVIVO_TMDSA_CNTL, 0,
++				 ~AVIVO_TMDSA_CNTL_HDMI_EN);
+ 			WREG32(offset + R600_HDMI_ENABLE, 0);
+ 			break;
+ 		case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+-			WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
++			WREG32_P(AVIVO_LVTMA_CNTL, 0,
++				 ~AVIVO_LVTMA_CNTL_HDMI_EN);
+ 			WREG32(offset + R600_HDMI_ENABLE, 0);
+ 			break;
+ 		default:
+diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
+index d4d23a8d..12ceb82 100644
+--- a/drivers/gpu/drm/radeon/r600d.h
++++ b/drivers/gpu/drm/radeon/r600d.h
+@@ -78,6 +78,20 @@
+ 
+ #define CB_COLOR0_SIZE                                  0x28060
+ #define CB_COLOR0_VIEW                                  0x28080
++#define R_028080_CB_COLOR0_VIEW                      0x028080
++#define   S_028080_SLICE_START(x)                      (((x) & 0x7FF) << 0)
++#define   G_028080_SLICE_START(x)                      (((x) >> 0) & 0x7FF)
++#define   C_028080_SLICE_START                         0xFFFFF800
++#define   S_028080_SLICE_MAX(x)                        (((x) & 0x7FF) << 13)
++#define   G_028080_SLICE_MAX(x)                        (((x) >> 13) & 0x7FF)
++#define   C_028080_SLICE_MAX                           0xFF001FFF
++#define R_028084_CB_COLOR1_VIEW                      0x028084
++#define R_028088_CB_COLOR2_VIEW                      0x028088
++#define R_02808C_CB_COLOR3_VIEW                      0x02808C
++#define R_028090_CB_COLOR4_VIEW                      0x028090
++#define R_028094_CB_COLOR5_VIEW                      0x028094
++#define R_028098_CB_COLOR6_VIEW                      0x028098
++#define R_02809C_CB_COLOR7_VIEW                      0x02809C
+ #define CB_COLOR0_INFO                                  0x280a0
+ #	define CB_FORMAT(x)				((x) << 2)
+ #       define CB_ARRAY_MODE(x)                         ((x) << 8)
+@@ -181,6 +195,14 @@
+ #define		PREZ_MUST_WAIT_FOR_POSTZ_DONE			(1 << 31)
+ #define	DB_DEPTH_BASE					0x2800C
+ #define	DB_HTILE_DATA_BASE				0x28014
++#define	DB_HTILE_SURFACE				0x28D24
++#define   S_028D24_HTILE_WIDTH(x)                      (((x) & 0x1) << 0)
++#define   G_028D24_HTILE_WIDTH(x)                      (((x) >> 0) & 0x1)
++#define   C_028D24_HTILE_WIDTH                         0xFFFFFFFE
++#define   S_028D24_HTILE_HEIGHT(x)                      (((x) & 0x1) << 1)
++#define   G_028D24_HTILE_HEIGHT(x)                      (((x) >> 1) & 0x1)
++#define   C_028D24_HTILE_HEIGHT                         0xFFFFFFFD
++#define   G_028D24_LINEAR(x)                           (((x) >> 2) & 0x1)
+ #define	DB_WATERMARKS					0x9838
+ #define		DEPTH_FREE(x)					((x) << 0)
+ #define		DEPTH_FLUSH(x)					((x) << 5)
+@@ -494,6 +516,11 @@
+ #define	VGT_STRMOUT_BUFFER_OFFSET_1			0x28AEC
+ #define	VGT_STRMOUT_BUFFER_OFFSET_2			0x28AFC
+ #define	VGT_STRMOUT_BUFFER_OFFSET_3			0x28B0C
++#define VGT_STRMOUT_BUFFER_SIZE_0			0x28AD0
++#define VGT_STRMOUT_BUFFER_SIZE_1			0x28AE0
++#define VGT_STRMOUT_BUFFER_SIZE_2			0x28AF0
++#define VGT_STRMOUT_BUFFER_SIZE_3			0x28B00
++
+ #define	VGT_STRMOUT_EN					0x28AB0
+ #define	VGT_VERTEX_REUSE_BLOCK_CNTL			0x28C58
+ #define		VTX_REUSE_DEPTH_MASK				0x000000FF
+@@ -575,6 +602,10 @@
+ #define RLC_UCODE_ADDR                                    0x3f2c
+ #define RLC_UCODE_DATA                                    0x3f30
+ 
++/* new for TN */
++#define TN_RLC_SAVE_AND_RESTORE_BASE                      0x3f10
++#define TN_RLC_CLEAR_STATE_RESTORE_BASE                   0x3f20
++
+ #define SRBM_SOFT_RESET                                   0xe60
+ #       define SOFT_RESET_RLC                             (1 << 13)
+ 
+@@ -832,7 +863,11 @@
+ #define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
+ #define	PACKET3_INDIRECT_BUFFER_MP			0x38
+ #define	PACKET3_MEM_SEMAPHORE				0x39
++#              define PACKET3_SEM_WAIT_ON_SIGNAL    (0x1 << 12)
++#              define PACKET3_SEM_SEL_SIGNAL	    (0x6 << 29)
++#              define PACKET3_SEM_SEL_WAIT	    (0x7 << 29)
+ #define	PACKET3_MPEG_INDEX				0x3A
++#define	PACKET3_COPY_DW					0x3B
+ #define	PACKET3_WAIT_REG_MEM				0x3C
+ #define	PACKET3_MEM_WRITE				0x3D
+ #define	PACKET3_INDIRECT_BUFFER				0x32
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 28e69e9..66150f0 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -107,6 +107,21 @@ extern int radeon_msi;
+ #define RADEONFB_CONN_LIMIT		4
+ #define RADEON_BIOS_NUM_SCRATCH		8
+ 
++/* max number of rings */
++#define RADEON_NUM_RINGS 3
++
++/* internal ring indices */
++/* r1xx+ has gfx CP ring */
++#define RADEON_RING_TYPE_GFX_INDEX  0
++
++/* cayman has 2 compute CP rings */
++#define CAYMAN_RING_TYPE_CP1_INDEX 1
++#define CAYMAN_RING_TYPE_CP2_INDEX 2
++
++/* hardcode those limit for now */
++#define RADEON_VA_RESERVED_SIZE		(8 << 20)
++#define RADEON_IB_VM_MAX_SIZE		(64 << 10)
++
+ /*
+  * Errata workarounds.
+  */
+@@ -127,6 +142,47 @@ bool radeon_get_bios(struct radeon_device *rdev);
+ 
+ 
+ /*
++ * Mutex which allows recursive locking from the same process.
++ */
++struct radeon_mutex {
++	struct mutex		mutex;
++	struct task_struct	*owner;
++	int			level;
++};
++
++static inline void radeon_mutex_init(struct radeon_mutex *mutex)
++{
++	mutex_init(&mutex->mutex);
++	mutex->owner = NULL;
++	mutex->level = 0;
++}
++
++static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
++{
++	if (mutex_trylock(&mutex->mutex)) {
++		/* The mutex was unlocked before, so it's ours now */
++		mutex->owner = current;
++	} else if (mutex->owner != current) {
++		/* Another process locked the mutex, take it */
++		mutex_lock(&mutex->mutex);
++		mutex->owner = current;
++	}
++	/* Otherwise the mutex was already locked by this process */
++
++	mutex->level++;
++}
++
++static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
++{
++	if (--mutex->level > 0)
++		return;
++
++	mutex->owner = NULL;
++	mutex_unlock(&mutex->mutex);
++}
++
++
++/*
+  * Dummy page
+  */
+ struct radeon_dummy_page {
+@@ -165,26 +221,30 @@ void radeon_pm_resume(struct radeon_device *rdev);
+ void radeon_combios_get_power_modes(struct radeon_device *rdev);
+ void radeon_atombios_get_power_modes(struct radeon_device *rdev);
+ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
+-int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
+ void rs690_pm_info(struct radeon_device *rdev);
+ extern int rv6xx_get_temp(struct radeon_device *rdev);
+ extern int rv770_get_temp(struct radeon_device *rdev);
+ extern int evergreen_get_temp(struct radeon_device *rdev);
+ extern int sumo_get_temp(struct radeon_device *rdev);
++extern int si_get_temp(struct radeon_device *rdev);
++extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
++				    unsigned *bankh, unsigned *mtaspect,
++				    unsigned *tile_split);
+ 
+ /*
+  * Fences.
+  */
+ struct radeon_fence_driver {
+ 	uint32_t			scratch_reg;
++	uint64_t			gpu_addr;
++	volatile uint32_t		*cpu_addr;
+ 	atomic_t			seq;
+ 	uint32_t			last_seq;
+ 	unsigned long			last_jiffies;
+ 	unsigned long			last_timeout;
+ 	wait_queue_head_t		queue;
+-	rwlock_t			lock;
+ 	struct list_head		created;
+-	struct list_head		emited;
++	struct list_head		emitted;
+ 	struct list_head		signaled;
+ 	bool				initialized;
+ };
+@@ -195,21 +255,26 @@ struct radeon_fence {
+ 	struct list_head		list;
+ 	/* protected by radeon_fence.lock */
+ 	uint32_t			seq;
+-	bool				emited;
++	bool				emitted;
+ 	bool				signaled;
++	/* RB, DMA, etc. */
++	int				ring;
++	struct radeon_semaphore		*semaphore;
+ };
+ 
++int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
+ int radeon_fence_driver_init(struct radeon_device *rdev);
+ void radeon_fence_driver_fini(struct radeon_device *rdev);
+-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
++int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
+ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
+-void radeon_fence_process(struct radeon_device *rdev);
++void radeon_fence_process(struct radeon_device *rdev, int ring);
+ bool radeon_fence_signaled(struct radeon_fence *fence);
+ int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
+-int radeon_fence_wait_next(struct radeon_device *rdev);
+-int radeon_fence_wait_last(struct radeon_device *rdev);
++int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
++int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
+ struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
+ void radeon_fence_unref(struct radeon_fence **fence);
++int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+ 
+ /*
+  * Tiling registers
+@@ -231,6 +296,21 @@ struct radeon_mman {
+ 	bool				initialized;
+ };
+ 
++/* bo virtual address in a specific vm */
++struct radeon_bo_va {
++	/* bo list is protected by bo being reserved */
++	struct list_head		bo_list;
++	/* vm list is protected by vm mutex */
++	struct list_head		vm_list;
++	/* constant after initialization */
++	struct radeon_vm		*vm;
++	struct radeon_bo		*bo;
++	uint64_t			soffset;
++	uint64_t			eoffset;
++	uint32_t			flags;
++	bool				valid;
++};
++
+ struct radeon_bo {
+ 	/* Protected by gem.mutex */
+ 	struct list_head		list;
+@@ -244,6 +324,10 @@ struct radeon_bo {
+ 	u32				tiling_flags;
+ 	u32				pitch;
+ 	int				surface_reg;
++	/* list of all virtual address to which this bo
++	 * is associated to
++	 */
++	struct list_head		va;
+ 	/* Constant after initialization */
+ 	struct radeon_device		*rdev;
+ 	struct drm_gem_object		gem_base;
+@@ -259,6 +343,48 @@ struct radeon_bo_list {
+ 	u32			tiling_flags;
+ };
+ 
++/* sub-allocation manager, it has to be protected by another lock.
++ * By conception this is an helper for other part of the driver
++ * like the indirect buffer or semaphore, which both have their
++ * locking.
++ *
++ * Principe is simple, we keep a list of sub allocation in offset
++ * order (first entry has offset == 0, last entry has the highest
++ * offset).
++ *
++ * When allocating new object we first check if there is room at
++ * the end total_size - (last_object_offset + last_object_size) >=
++ * alloc_size. If so we allocate new object there.
++ *
++ * When there is not enough room at the end, we start waiting for
++ * each sub object until we reach object_offset+object_size >=
++ * alloc_size, this object then become the sub object we return.
++ *
++ * Alignment can't be bigger than page size.
++ *
++ * Hole are not considered for allocation to keep things simple.
++ * Assumption is that there won't be hole (all object on same
++ * alignment).
++ */
++struct radeon_sa_manager {
++	struct radeon_bo	*bo;
++	struct list_head	sa_bo;
++	unsigned		size;
++	uint64_t		gpu_addr;
++	void			*cpu_ptr;
++	uint32_t		domain;
++};
++
++struct radeon_sa_bo;
++
++/* sub-allocation buffer */
++struct radeon_sa_bo {
++	struct list_head		list;
++	struct radeon_sa_manager	*manager;
++	unsigned			offset;
++	unsigned			size;
++};
++
+ /*
+  * GEM objects.
+  */
+@@ -273,9 +399,6 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
+ 				int alignment, int initial_domain,
+ 				bool discardable, bool kernel,
+ 				struct drm_gem_object **obj);
+-int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+-			  uint64_t *gpu_addr);
+-void radeon_gem_object_unpin(struct drm_gem_object *obj);
+ 
+ int radeon_mode_dumb_create(struct drm_file *file_priv,
+ 			    struct drm_device *dev,
+@@ -288,6 +411,46 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ 			     uint32_t handle);
+ 
+ /*
++ * Semaphores.
++ */
++struct radeon_ring;
++
++#define	RADEON_SEMAPHORE_BO_SIZE	256
++
++struct radeon_semaphore_driver {
++	rwlock_t			lock;
++	struct list_head		bo;
++};
++
++struct radeon_semaphore_bo;
++
++/* everything here is constant */
++struct radeon_semaphore {
++	struct list_head		list;
++	uint64_t			gpu_addr;
++	uint32_t			*cpu_ptr;
++	struct radeon_semaphore_bo	*bo;
++};
++
++struct radeon_semaphore_bo {
++	struct list_head		list;
++	struct radeon_ib		*ib;
++	struct list_head		free;
++	struct radeon_semaphore		semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
++	unsigned			nused;
++};
++
++void radeon_semaphore_driver_fini(struct radeon_device *rdev);
++int radeon_semaphore_create(struct radeon_device *rdev,
++			    struct radeon_semaphore **semaphore);
++void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
++				  struct radeon_semaphore *semaphore);
++void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
++				struct radeon_semaphore *semaphore);
++void radeon_semaphore_free(struct radeon_device *rdev,
++			   struct radeon_semaphore *semaphore);
++
++/*
+  * GART structures, functions & helpers
+  */
+ struct radeon_mc;
+@@ -295,6 +458,7 @@ struct radeon_mc;
+ #define RADEON_GPU_PAGE_SIZE 4096
+ #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
+ #define RADEON_GPU_PAGE_SHIFT 12
++#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
+ 
+ struct radeon_gart {
+ 	dma_addr_t			table_addr;
+@@ -305,7 +469,6 @@ struct radeon_gart {
+ 	unsigned			table_size;
+ 	struct page			**pages;
+ 	dma_addr_t			*pages_addr;
+-	bool				*ttm_alloced;
+ 	bool				ready;
+ };
+ 
+@@ -419,7 +582,7 @@ union radeon_irq_stat_regs {
+ 
+ struct radeon_irq {
+ 	bool		installed;
+-	bool		sw_int;
++	bool		sw_int[RADEON_NUM_RINGS];
+ 	bool		crtc_vblank_int[RADEON_MAX_CRTCS];
+ 	bool		pflip[RADEON_MAX_CRTCS];
+ 	wait_queue_head_t	vblank_queue;
+@@ -429,7 +592,7 @@ struct radeon_irq {
+ 	wait_queue_head_t	idle_queue;
+ 	bool		hdmi[RADEON_MAX_HDMI_BLOCKS];
+ 	spinlock_t sw_lock;
+-	int sw_refcount;
++	int sw_refcount[RADEON_NUM_RINGS];
+ 	union radeon_irq_stat_regs stat_regs;
+ 	spinlock_t pflip_lock[RADEON_MAX_CRTCS];
+ 	int pflip_refcount[RADEON_MAX_CRTCS];
+@@ -437,22 +600,24 @@ struct radeon_irq {
+ 
+ int radeon_irq_kms_init(struct radeon_device *rdev);
+ void radeon_irq_kms_fini(struct radeon_device *rdev);
+-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
+-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
++void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
++void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
+ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
+ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
+ 
+ /*
+- * CP & ring.
++ * CP & rings.
+  */
++
+ struct radeon_ib {
+-	struct list_head	list;
++	struct radeon_sa_bo	sa_bo;
+ 	unsigned		idx;
++	uint32_t		length_dw;
+ 	uint64_t		gpu_addr;
+-	struct radeon_fence	*fence;
+ 	uint32_t		*ptr;
+-	uint32_t		length_dw;
+-	bool			free;
++	struct radeon_fence	*fence;
++	unsigned		vm_id;
++	bool			is_const_ib;
+ };
+ 
+ /*
+@@ -460,20 +625,22 @@ struct radeon_ib {
+  * mutex protects scheduled_ibs, ready, alloc_bm
+  */
+ struct radeon_ib_pool {
+-	struct mutex		mutex;
+-	struct radeon_bo	*robj;
+-	struct list_head	bogus_ib;
+-	struct radeon_ib	ibs[RADEON_IB_POOL_SIZE];
+-	bool			ready;
+-	unsigned		head_id;
++	struct radeon_mutex		mutex;
++	struct radeon_sa_manager	sa_manager;
++	struct radeon_ib		ibs[RADEON_IB_POOL_SIZE];
++	bool				ready;
++	unsigned			head_id;
+ };
+ 
+-struct radeon_cp {
++struct radeon_ring {
+ 	struct radeon_bo	*ring_obj;
+ 	volatile uint32_t	*ring;
+ 	unsigned		rptr;
++	unsigned		rptr_offs;
++	unsigned		rptr_reg;
+ 	unsigned		wptr;
+ 	unsigned		wptr_old;
++	unsigned		wptr_reg;
+ 	unsigned		ring_size;
+ 	unsigned		ring_free_dw;
+ 	int			count_dw;
+@@ -482,6 +649,61 @@ struct radeon_cp {
+ 	uint32_t		ptr_mask;
+ 	struct mutex		mutex;
+ 	bool			ready;
++	u32			ptr_reg_shift;
++	u32			ptr_reg_mask;
++	u32			nop;
++};
++
++/*
++ * VM
++ */
++struct radeon_vm {
++	struct list_head		list;
++	struct list_head		va;
++	int				id;
++	unsigned			last_pfn;
++	u64				pt_gpu_addr;
++	u64				*pt;
++	struct radeon_sa_bo		sa_bo;
++	struct mutex			mutex;
++	/* last fence for cs using this vm */
++	struct radeon_fence		*fence;
++};
++
++struct radeon_vm_funcs {
++	int (*init)(struct radeon_device *rdev);
++	void (*fini)(struct radeon_device *rdev);
++	/* cs mutex must be lock for schedule_ib */
++	int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
++	void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
++	void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
++	uint32_t (*page_flags)(struct radeon_device *rdev,
++			       struct radeon_vm *vm,
++			       uint32_t flags);
++	void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
++			unsigned pfn, uint64_t addr, uint32_t flags);
++};
++
++struct radeon_vm_manager {
++	struct list_head		lru_vm;
++	uint32_t			use_bitmap;
++	struct radeon_sa_manager	sa_manager;
++	uint32_t			max_pfn;
++	/* fields constant after init */
++	const struct radeon_vm_funcs	*funcs;
++	/* number of VMIDs */
++	unsigned			nvm;
++	/* vram base address for page table entry  */
++	u64				vram_base_offset;
++	/* is vm enabled? */
++	bool				enabled;
++};
++
++/*
++ * file private structure
++ */
++struct radeon_fpriv {
++	struct radeon_vm		vm;
+ };
+ 
+ /*
+@@ -491,6 +713,7 @@ struct r600_ih {
+ 	struct radeon_bo	*ring_obj;
+ 	volatile uint32_t	*ring;
+ 	unsigned		rptr;
++	unsigned		rptr_offs;
+ 	unsigned		wptr;
+ 	unsigned		wptr_old;
+ 	unsigned		ring_size;
+@@ -534,23 +757,40 @@ struct r600_blit {
+ 
+ void r600_blit_suspend(struct radeon_device *rdev);
+ 
+-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
++/*
++ * SI RLC stuff
++ */
++struct si_rlc {
++	/* for power gating */
++	struct radeon_bo	*save_restore_obj;
++	uint64_t		save_restore_gpu_addr;
++	/* for clear state */
++	struct radeon_bo	*clear_state_obj;
++	uint64_t		clear_state_gpu_addr;
++};
++
++int radeon_ib_get(struct radeon_device *rdev, int ring,
++		  struct radeon_ib **ib, unsigned size);
+ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
++bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
+ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
+ int radeon_ib_pool_init(struct radeon_device *rdev);
+ void radeon_ib_pool_fini(struct radeon_device *rdev);
+-int radeon_ib_test(struct radeon_device *rdev);
+-extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
++int radeon_ib_pool_start(struct radeon_device *rdev);
++int radeon_ib_pool_suspend(struct radeon_device *rdev);
+ /* Ring access between begin & end cannot sleep */
+-void radeon_ring_free_size(struct radeon_device *rdev);
+-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
+-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
+-void radeon_ring_commit(struct radeon_device *rdev);
+-void radeon_ring_unlock_commit(struct radeon_device *rdev);
+-void radeon_ring_unlock_undo(struct radeon_device *rdev);
+-int radeon_ring_test(struct radeon_device *rdev);
+-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
+-void radeon_ring_fini(struct radeon_device *rdev);
++int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
++void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
++int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
++int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
++void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
++void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
++void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
++int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
++int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
++		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
++		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
++void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
+ 
+ 
+ /*
+@@ -567,12 +807,12 @@ struct radeon_cs_reloc {
+ struct radeon_cs_chunk {
+ 	uint32_t		chunk_id;
+ 	uint32_t		length_dw;
+-	int kpage_idx[2];
+-	uint32_t                *kpage[2];
++	int			kpage_idx[2];
++	uint32_t		*kpage[2];
+ 	uint32_t		*kdata;
+-	void __user *user_ptr;
+-	int last_copied_page;
+-	int last_page_index;
++	void __user		*user_ptr;
++	int			last_copied_page;
++	int			last_page_index;
+ };
+ 
+ struct radeon_cs_parser {
+@@ -593,11 +833,16 @@ struct radeon_cs_parser {
+ 	/* indices of various chunks */
+ 	int			chunk_ib_idx;
+ 	int			chunk_relocs_idx;
++	int			chunk_flags_idx;
++	int			chunk_const_ib_idx;
+ 	struct radeon_ib	*ib;
++	struct radeon_ib	*const_ib;
+ 	void			*track;
+ 	unsigned		family;
+ 	int			parser_error;
+-	bool			keep_tiling_flags;
++	u32			cs_flags;
++	u32			ring;
++	s32			priority;
+ };
+ 
+ extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
+@@ -733,6 +978,7 @@ enum radeon_int_thermal_type {
+ 	THERMAL_TYPE_EVERGREEN,
+ 	THERMAL_TYPE_SUMO,
+ 	THERMAL_TYPE_NI,
++	THERMAL_TYPE_SI,
+ };
+ 
+ struct radeon_voltage {
+@@ -854,11 +1100,20 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number);
+  * Testing
+  */
+ void radeon_test_moves(struct radeon_device *rdev);
++void radeon_test_ring_sync(struct radeon_device *rdev,
++			   struct radeon_ring *cpA,
++			   struct radeon_ring *cpB);
++void radeon_test_syncing(struct radeon_device *rdev);
+ 
+ 
+ /*
+  * Debugfs
+  */
++struct radeon_debugfs {
++	struct drm_info_list	*files;
++	unsigned		num_files;
++};
++
+ int radeon_debugfs_add_files(struct radeon_device *rdev,
+ 			     struct drm_info_list *files,
+ 			     unsigned nfiles);
+@@ -874,53 +1129,8 @@ struct radeon_asic {
+ 	int (*resume)(struct radeon_device *rdev);
+ 	int (*suspend)(struct radeon_device *rdev);
+ 	void (*vga_set_state)(struct radeon_device *rdev, bool state);
+-	bool (*gpu_is_lockup)(struct radeon_device *rdev);
++	bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+ 	int (*asic_reset)(struct radeon_device *rdev);
+-	void (*gart_tlb_flush)(struct radeon_device *rdev);
+-	int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
+-	int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
+-	void (*cp_fini)(struct radeon_device *rdev);
+-	void (*cp_disable)(struct radeon_device *rdev);
+-	void (*cp_commit)(struct radeon_device *rdev);
+-	void (*ring_start)(struct radeon_device *rdev);
+-	int (*ring_test)(struct radeon_device *rdev);
+-	void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+-	int (*irq_set)(struct radeon_device *rdev);
+-	int (*irq_process)(struct radeon_device *rdev);
+-	u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
+-	void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
+-	int (*cs_parse)(struct radeon_cs_parser *p);
+-	int (*copy_blit)(struct radeon_device *rdev,
+-			 uint64_t src_offset,
+-			 uint64_t dst_offset,
+-			 unsigned num_gpu_pages,
+-			 struct radeon_fence *fence);
+-	int (*copy_dma)(struct radeon_device *rdev,
+-			uint64_t src_offset,
+-			uint64_t dst_offset,
+-			unsigned num_gpu_pages,
+-			struct radeon_fence *fence);
+-	int (*copy)(struct radeon_device *rdev,
+-		    uint64_t src_offset,
+-		    uint64_t dst_offset,
+-		    unsigned num_gpu_pages,
+-		    struct radeon_fence *fence);
+-	uint32_t (*get_engine_clock)(struct radeon_device *rdev);
+-	void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
+-	uint32_t (*get_memory_clock)(struct radeon_device *rdev);
+-	void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+-	int (*get_pcie_lanes)(struct radeon_device *rdev);
+-	void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
+-	void (*set_clock_gating)(struct radeon_device *rdev, int enable);
+-	int (*set_surface_reg)(struct radeon_device *rdev, int reg,
+-			       uint32_t tiling_flags, uint32_t pitch,
+-			       uint32_t offset, uint32_t obj_size);
+-	void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
+-	void (*bandwidth_update)(struct radeon_device *rdev);
+-	void (*hpd_init)(struct radeon_device *rdev);
+-	void (*hpd_fini)(struct radeon_device *rdev);
+-	bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+-	void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ 	/* ioctl hw specific callback. Some hw might want to perform special
+ 	 * operation on specific ioctl. For instance on wait idle some hw
+ 	 * might want to perform and HDP flush through MMIO as it seems that
+@@ -928,17 +1138,99 @@ struct radeon_asic {
+ 	 * through ring.
+ 	 */
+ 	void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
++	/* check if 3D engine is idle */
+ 	bool (*gui_idle)(struct radeon_device *rdev);
++	/* wait for mc_idle */
++	int (*mc_wait_for_idle)(struct radeon_device *rdev);
++	/* gart */
++	struct {
++		void (*tlb_flush)(struct radeon_device *rdev);
++		int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
++	} gart;
++	/* ring specific callbacks */
++	struct {
++		void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
++		int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
++		void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
++		void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
++				       struct radeon_semaphore *semaphore, bool emit_wait);
++		int (*cs_parse)(struct radeon_cs_parser *p);
++		void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
++		int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
++		int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
++	} ring[RADEON_NUM_RINGS];
++	/* irqs */
++	struct {
++		int (*set)(struct radeon_device *rdev);
++		int (*process)(struct radeon_device *rdev);
++	} irq;
++	/* displays */
++	struct {
++		/* display watermarks */
++		void (*bandwidth_update)(struct radeon_device *rdev);
++		/* get frame count */
++		u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
++		/* wait for vblank */
++		void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
++	} display;
++	/* copy functions for bo handling */
++	struct {
++		int (*blit)(struct radeon_device *rdev,
++			    uint64_t src_offset,
++			    uint64_t dst_offset,
++			    unsigned num_gpu_pages,
++			    struct radeon_fence *fence);
++		u32 blit_ring_index;
++		int (*dma)(struct radeon_device *rdev,
++			   uint64_t src_offset,
++			   uint64_t dst_offset,
++			   unsigned num_gpu_pages,
++			   struct radeon_fence *fence);
++		u32 dma_ring_index;
++		/* method used for bo copy */
++		int (*copy)(struct radeon_device *rdev,
++			    uint64_t src_offset,
++			    uint64_t dst_offset,
++			    unsigned num_gpu_pages,
++			    struct radeon_fence *fence);
++		/* ring used for bo copies */
++		u32 copy_ring_index;
++	} copy;
++	/* surfaces */
++	struct {
++		int (*set_reg)(struct radeon_device *rdev, int reg,
++				       uint32_t tiling_flags, uint32_t pitch,
++				       uint32_t offset, uint32_t obj_size);
++		void (*clear_reg)(struct radeon_device *rdev, int reg);
++	} surface;
++	/* hotplug detect */
++	struct {
++		void (*init)(struct radeon_device *rdev);
++		void (*fini)(struct radeon_device *rdev);
++		bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
++		void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
++	} hpd;
+ 	/* power management */
+-	void (*pm_misc)(struct radeon_device *rdev);
+-	void (*pm_prepare)(struct radeon_device *rdev);
+-	void (*pm_finish)(struct radeon_device *rdev);
+-	void (*pm_init_profile)(struct radeon_device *rdev);
+-	void (*pm_get_dynpm_state)(struct radeon_device *rdev);
++	struct {
++		void (*misc)(struct radeon_device *rdev);
++		void (*prepare)(struct radeon_device *rdev);
++		void (*finish)(struct radeon_device *rdev);
++		void (*init_profile)(struct radeon_device *rdev);
++		void (*get_dynpm_state)(struct radeon_device *rdev);
++		uint32_t (*get_engine_clock)(struct radeon_device *rdev);
++		void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
++		uint32_t (*get_memory_clock)(struct radeon_device *rdev);
++		void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
++		int (*get_pcie_lanes)(struct radeon_device *rdev);
++		void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
++		void (*set_clock_gating)(struct radeon_device *rdev, int enable);
++	} pm;
+ 	/* pageflipping */
+-	void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+-	u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+-	void (*post_page_flip)(struct radeon_device *rdev, int crtc);
++	struct {
++		void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
++		u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
++		void (*post_page_flip)(struct radeon_device *rdev, int crtc);
++	} pflip;
+ };
+ 
+ /*
+@@ -1078,6 +1370,37 @@ struct cayman_asic {
+ 	struct r100_gpu_lockup	lockup;
+ };
+ 
++struct si_asic {
++	unsigned max_shader_engines;
++	unsigned max_pipes_per_simd;
++	unsigned max_tile_pipes;
++	unsigned max_simds_per_se;
++	unsigned max_backends_per_se;
++	unsigned max_texture_channel_caches;
++	unsigned max_gprs;
++	unsigned max_gs_threads;
++	unsigned max_hw_contexts;
++	unsigned sc_prim_fifo_size_frontend;
++	unsigned sc_prim_fifo_size_backend;
++	unsigned sc_hiz_tile_fifo_size;
++	unsigned sc_earlyz_tile_fifo_size;
++
++	unsigned num_shader_engines;
++	unsigned num_tile_pipes;
++	unsigned num_backends_per_se;
++	unsigned backend_disable_mask_per_asic;
++	unsigned backend_map;
++	unsigned num_texture_channel_caches;
++	unsigned mem_max_burst_length_bytes;
++	unsigned mem_row_size_in_kb;
++	unsigned shader_engine_tile_size;
++	unsigned num_gpus;
++	unsigned multi_gpu_tile_size;
++
++	unsigned tile_config;
++	struct r100_gpu_lockup	lockup;
++};
++
+ union radeon_asic_config {
+ 	struct r300_asic	r300;
+ 	struct r100_asic	r100;
+@@ -1085,6 +1408,7 @@ union radeon_asic_config {
+ 	struct rv770_asic	rv770;
+ 	struct evergreen_asic	evergreen;
+ 	struct cayman_asic	cayman;
++	struct si_asic		si;
+ };
+ 
+ /*
+@@ -1117,6 +1441,8 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+ 			  struct drm_file *filp);
+ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+ 			      struct drm_file *filp);
++int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
++			  struct drm_file *filp);
+ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ 				struct drm_file *filp);
+@@ -1132,47 +1458,6 @@ struct r600_vram_scratch {
+ 
+ 
+ /*
+- * Mutex which allows recursive locking from the same process.
+- */
+-struct radeon_mutex {
+-	struct mutex		mutex;
+-	struct task_struct	*owner;
+-	int			level;
+-};
+-
+-static inline void radeon_mutex_init(struct radeon_mutex *mutex)
+-{
+-	mutex_init(&mutex->mutex);
+-	mutex->owner = NULL;
+-	mutex->level = 0;
+-}
+-
+-static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
+-{
+-	if (mutex_trylock(&mutex->mutex)) {
+-		/* The mutex was unlocked before, so it's ours now */
+-		mutex->owner = current;
+-	} else if (mutex->owner != current) {
+-		/* Another process locked the mutex, take it */
+-		mutex_lock(&mutex->mutex);
+-		mutex->owner = current;
+-	}
+-	/* Otherwise the mutex was already locked by this process */
+-
+-	mutex->level++;
+-}
+-
+-static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
+-{
+-	if (--mutex->level > 0)
+-		return;
+-
+-	mutex->owner = NULL;
+-	mutex_unlock(&mutex->mutex);
+-}
+-
+-
+-/*
+  * Core structure, functions and helpers.
+  */
+ typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
+@@ -1216,11 +1501,10 @@ struct radeon_device {
+ 	struct radeon_mode_info		mode_info;
+ 	struct radeon_scratch		scratch;
+ 	struct radeon_mman		mman;
+-	struct radeon_fence_driver	fence_drv;
+-	struct radeon_cp		cp;
+-	/* cayman compute rings */
+-	struct radeon_cp		cp1;
+-	struct radeon_cp		cp2;
++	rwlock_t			fence_lock;
++	struct radeon_fence_driver	fence_drv[RADEON_NUM_RINGS];
++	struct radeon_semaphore_driver	semaphore_drv;
++	struct radeon_ring		ring[RADEON_NUM_RINGS];
+ 	struct radeon_ib_pool		ib_pool;
+ 	struct radeon_irq		irq;
+ 	struct radeon_asic		*asic;
+@@ -1240,10 +1524,12 @@ struct radeon_device {
+ 	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
+ 	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
+ 	const struct firmware *mc_fw;	/* NI MC firmware */
++	const struct firmware *ce_fw;	/* SI CE firmware */
+ 	struct r600_blit r600_blit;
+ 	struct r600_vram_scratch vram_scratch;
+ 	int msi_enabled; /* msi enabled */
+ 	struct r600_ih ih; /* r6/700 interrupt ring */
++	struct si_rlc rlc;
+ 	struct work_struct hotplug_work;
+ 	int num_crtc; /* number of crtcs */
+ 	struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+@@ -1264,6 +1550,11 @@ struct radeon_device {
+ 	struct drm_file *cmask_filp;
+ 	/* i2c buses */
+ 	struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
++	/* debugfs */
++	struct radeon_debugfs	debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
++	unsigned 		debugfs_count;
++	/* virtual memory */
++	struct radeon_vm_manager	vm_manager;
+ };
+ 
+ int radeon_device_init(struct radeon_device *rdev,
+@@ -1382,6 +1673,9 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
+ #define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
+ 			     (rdev->flags & RADEON_IS_IGP))
+ #define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
++#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
++#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
++			     (rdev->flags & RADEON_IS_IGP))
+ 
+ /*
+  * BIOS helpers.
+@@ -1399,18 +1693,17 @@ void radeon_atombios_fini(struct radeon_device *rdev);
+ /*
+  * RING helpers.
+  */
+-
+ #if DRM_DEBUG_CODE == 0
+-static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
++static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+ {
+-	rdev->cp.ring[rdev->cp.wptr++] = v;
+-	rdev->cp.wptr &= rdev->cp.ptr_mask;
+-	rdev->cp.count_dw--;
+-	rdev->cp.ring_free_dw--;
++	ring->ring[ring->wptr++] = v;
++	ring->wptr &= ring->ptr_mask;
++	ring->count_dw--;
++	ring->ring_free_dw--;
+ }
+ #else
+ /* With debugging this is just too big to inline */
+-void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
++void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
+ #endif
+ 
+ /*
+@@ -1420,46 +1713,53 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
+ #define radeon_fini(rdev) (rdev)->asic->fini((rdev))
+ #define radeon_resume(rdev) (rdev)->asic->resume((rdev))
+ #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
+-#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
++#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
+ #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
+-#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
++#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
+ #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
+-#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
+-#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
+-#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
+-#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
+-#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
+-#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
+-#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
+-#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
+-#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
+-#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
+-#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
+-#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
+-#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
+-#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
+-#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+-#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
+-#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
+-#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
+-#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
+-#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
+-#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
+-#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
+-#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
+-#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
+-#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
+-#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
+-#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
++#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
++#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
++#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
++#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
++#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
++#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
++#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
++#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
++#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
++#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
++#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
++#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
++#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
++#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
++#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
++#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
++#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
++#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
++#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
++#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
++#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
++#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
++#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
++#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
++#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
++#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
++#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
++#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
++#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
++#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
++#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
++#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
+ #define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
+-#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
+-#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
+-#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
+-#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
+-#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
+-#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
+-#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
+-#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
++#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
++#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
++#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
++#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
++#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
++#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
++#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
++#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
++#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
++#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
+ 
+ /* Common functions */
+ /* AGP */
+@@ -1488,12 +1788,49 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
+ 
+ /*
++ * vm
++ */
++int radeon_vm_manager_init(struct radeon_device *rdev);
++void radeon_vm_manager_fini(struct radeon_device *rdev);
++int radeon_vm_manager_start(struct radeon_device *rdev);
++int radeon_vm_manager_suspend(struct radeon_device *rdev);
++int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
++void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
++int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
++void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
++int radeon_vm_bo_update_pte(struct radeon_device *rdev,
++			    struct radeon_vm *vm,
++			    struct radeon_bo *bo,
++			    struct ttm_mem_reg *mem);
++void radeon_vm_bo_invalidate(struct radeon_device *rdev,
++			     struct radeon_bo *bo);
++int radeon_vm_bo_add(struct radeon_device *rdev,
++		     struct radeon_vm *vm,
++		     struct radeon_bo *bo,
++		     uint64_t offset,
++		     uint32_t flags);
++int radeon_vm_bo_rmv(struct radeon_device *rdev,
++		     struct radeon_vm *vm,
++		     struct radeon_bo *bo);
++
++
++/*
+  * R600 vram scratch functions
+  */
+ int r600_vram_scratch_init(struct radeon_device *rdev);
+ void r600_vram_scratch_fini(struct radeon_device *rdev);
+ 
+ /*
++ * r600 cs checking helper
++ */
++unsigned r600_mip_minify(unsigned size, unsigned level);
++bool r600_fmt_is_valid_color(u32 format);
++bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
++int r600_fmt_get_blocksize(u32 format);
++int r600_fmt_get_nblocksx(u32 format, u32 w);
++int r600_fmt_get_nblocksy(u32 format, u32 h);
++
++/*
+  * r600 functions used by radeon_encoder.c
+  */
+ extern void r600_hdmi_enable(struct drm_encoder *encoder);
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
+index a2e1eae..be4dc2f 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.c
++++ b/drivers/gpu/drm/radeon/radeon_asic.c
+@@ -114,13 +114,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
+ 			rdev->family == CHIP_R423) {
+ 		DRM_INFO("Forcing AGP to PCIE mode\n");
+ 		rdev->flags |= RADEON_IS_PCIE;
+-		rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+-		rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
++		rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
++		rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
+ 	} else {
+ 		DRM_INFO("Forcing AGP to PCI mode\n");
+ 		rdev->flags |= RADEON_IS_PCI;
+-		rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+-		rdev->asic->gart_set_page = &r100_pci_gart_set_page;
++		rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
++		rdev->asic->gart.set_page = &r100_pci_gart_set_page;
+ 	}
+ 	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ }
+@@ -136,44 +136,70 @@ static struct radeon_asic r100_asic = {
+ 	.vga_set_state = &r100_vga_set_state,
+ 	.gpu_is_lockup = &r100_gpu_is_lockup,
+ 	.asic_reset = &r100_asic_reset,
+-	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
+-	.gart_set_page = &r100_pci_gart_set_page,
+-	.cp_commit = &r100_cp_commit,
+-	.ring_start = &r100_ring_start,
+-	.ring_test = &r100_ring_test,
+-	.ring_ib_execute = &r100_ring_ib_execute,
+-	.irq_set = &r100_irq_set,
+-	.irq_process = &r100_irq_process,
+-	.get_vblank_counter = &r100_get_vblank_counter,
+-	.fence_ring_emit = &r100_fence_ring_emit,
+-	.cs_parse = &r100_cs_parse,
+-	.copy_blit = &r100_copy_blit,
+-	.copy_dma = NULL,
+-	.copy = &r100_copy_blit,
+-	.get_engine_clock = &radeon_legacy_get_engine_clock,
+-	.set_engine_clock = &radeon_legacy_set_engine_clock,
+-	.get_memory_clock = &radeon_legacy_get_memory_clock,
+-	.set_memory_clock = NULL,
+-	.get_pcie_lanes = NULL,
+-	.set_pcie_lanes = NULL,
+-	.set_clock_gating = &radeon_legacy_set_clock_gating,
+-	.set_surface_reg = r100_set_surface_reg,
+-	.clear_surface_reg = r100_clear_surface_reg,
+-	.bandwidth_update = &r100_bandwidth_update,
+-	.hpd_init = &r100_hpd_init,
+-	.hpd_fini = &r100_hpd_fini,
+-	.hpd_sense = &r100_hpd_sense,
+-	.hpd_set_polarity = &r100_hpd_set_polarity,
+ 	.ioctl_wait_idle = NULL,
+ 	.gui_idle = &r100_gui_idle,
+-	.pm_misc = &r100_pm_misc,
+-	.pm_prepare = &r100_pm_prepare,
+-	.pm_finish = &r100_pm_finish,
+-	.pm_init_profile = &r100_pm_init_profile,
+-	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+-	.pre_page_flip = &r100_pre_page_flip,
+-	.page_flip = &r100_page_flip,
+-	.post_page_flip = &r100_post_page_flip,
++	.mc_wait_for_idle = &r100_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &r100_pci_gart_tlb_flush,
++		.set_page = &r100_pci_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r100_ring_ib_execute,
++			.emit_fence = &r100_fence_ring_emit,
++			.emit_semaphore = &r100_semaphore_ring_emit,
++			.cs_parse = &r100_cs_parse,
++			.ring_start = &r100_ring_start,
++			.ring_test = &r100_ring_test,
++			.ib_test = &r100_ib_test,
++		}
++	},
++	.irq = {
++		.set = &r100_irq_set,
++		.process = &r100_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &r100_bandwidth_update,
++		.get_vblank_counter = &r100_get_vblank_counter,
++		.wait_for_vblank = &r100_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r100_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = NULL,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r100_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r100_set_surface_reg,
++		.clear_reg = r100_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &r100_hpd_init,
++		.fini = &r100_hpd_fini,
++		.sense = &r100_hpd_sense,
++		.set_polarity = &r100_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &r100_pm_misc,
++		.prepare = &r100_pm_prepare,
++		.finish = &r100_pm_finish,
++		.init_profile = &r100_pm_init_profile,
++		.get_dynpm_state = &r100_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_legacy_get_engine_clock,
++		.set_engine_clock = &radeon_legacy_set_engine_clock,
++		.get_memory_clock = &radeon_legacy_get_memory_clock,
++		.set_memory_clock = NULL,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = &radeon_legacy_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &r100_pre_page_flip,
++		.page_flip = &r100_page_flip,
++		.post_page_flip = &r100_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic r200_asic = {
+@@ -184,43 +210,70 @@ static struct radeon_asic r200_asic = {
+ 	.vga_set_state = &r100_vga_set_state,
+ 	.gpu_is_lockup = &r100_gpu_is_lockup,
+ 	.asic_reset = &r100_asic_reset,
+-	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
+-	.gart_set_page = &r100_pci_gart_set_page,
+-	.cp_commit = &r100_cp_commit,
+-	.ring_start = &r100_ring_start,
+-	.ring_test = &r100_ring_test,
+-	.ring_ib_execute = &r100_ring_ib_execute,
+-	.irq_set = &r100_irq_set,
+-	.irq_process = &r100_irq_process,
+-	.get_vblank_counter = &r100_get_vblank_counter,
+-	.fence_ring_emit = &r100_fence_ring_emit,
+-	.cs_parse = &r100_cs_parse,
+-	.copy_blit = &r100_copy_blit,
+-	.copy_dma = &r200_copy_dma,
+-	.copy = &r100_copy_blit,
+-	.get_engine_clock = &radeon_legacy_get_engine_clock,
+-	.set_engine_clock = &radeon_legacy_set_engine_clock,
+-	.get_memory_clock = &radeon_legacy_get_memory_clock,
+-	.set_memory_clock = NULL,
+-	.set_pcie_lanes = NULL,
+-	.set_clock_gating = &radeon_legacy_set_clock_gating,
+-	.set_surface_reg = r100_set_surface_reg,
+-	.clear_surface_reg = r100_clear_surface_reg,
+-	.bandwidth_update = &r100_bandwidth_update,
+-	.hpd_init = &r100_hpd_init,
+-	.hpd_fini = &r100_hpd_fini,
+-	.hpd_sense = &r100_hpd_sense,
+-	.hpd_set_polarity = &r100_hpd_set_polarity,
+ 	.ioctl_wait_idle = NULL,
+ 	.gui_idle = &r100_gui_idle,
+-	.pm_misc = &r100_pm_misc,
+-	.pm_prepare = &r100_pm_prepare,
+-	.pm_finish = &r100_pm_finish,
+-	.pm_init_profile = &r100_pm_init_profile,
+-	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+-	.pre_page_flip = &r100_pre_page_flip,
+-	.page_flip = &r100_page_flip,
+-	.post_page_flip = &r100_post_page_flip,
++	.mc_wait_for_idle = &r100_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &r100_pci_gart_tlb_flush,
++		.set_page = &r100_pci_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r100_ring_ib_execute,
++			.emit_fence = &r100_fence_ring_emit,
++			.emit_semaphore = &r100_semaphore_ring_emit,
++			.cs_parse = &r100_cs_parse,
++			.ring_start = &r100_ring_start,
++			.ring_test = &r100_ring_test,
++			.ib_test = &r100_ib_test,
++		}
++	},
++	.irq = {
++		.set = &r100_irq_set,
++		.process = &r100_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &r100_bandwidth_update,
++		.get_vblank_counter = &r100_get_vblank_counter,
++		.wait_for_vblank = &r100_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r100_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = &r200_copy_dma,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r100_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r100_set_surface_reg,
++		.clear_reg = r100_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &r100_hpd_init,
++		.fini = &r100_hpd_fini,
++		.sense = &r100_hpd_sense,
++		.set_polarity = &r100_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &r100_pm_misc,
++		.prepare = &r100_pm_prepare,
++		.finish = &r100_pm_finish,
++		.init_profile = &r100_pm_init_profile,
++		.get_dynpm_state = &r100_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_legacy_get_engine_clock,
++		.set_engine_clock = &radeon_legacy_set_engine_clock,
++		.get_memory_clock = &radeon_legacy_get_memory_clock,
++		.set_memory_clock = NULL,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = &radeon_legacy_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &r100_pre_page_flip,
++		.page_flip = &r100_page_flip,
++		.post_page_flip = &r100_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic r300_asic = {
+@@ -231,44 +284,70 @@ static struct radeon_asic r300_asic = {
+ 	.vga_set_state = &r100_vga_set_state,
+ 	.gpu_is_lockup = &r300_gpu_is_lockup,
+ 	.asic_reset = &r300_asic_reset,
+-	.gart_tlb_flush = &r100_pci_gart_tlb_flush,
+-	.gart_set_page = &r100_pci_gart_set_page,
+-	.cp_commit = &r100_cp_commit,
+-	.ring_start = &r300_ring_start,
+-	.ring_test = &r100_ring_test,
+-	.ring_ib_execute = &r100_ring_ib_execute,
+-	.irq_set = &r100_irq_set,
+-	.irq_process = &r100_irq_process,
+-	.get_vblank_counter = &r100_get_vblank_counter,
+-	.fence_ring_emit = &r300_fence_ring_emit,
+-	.cs_parse = &r300_cs_parse,
+-	.copy_blit = &r100_copy_blit,
+-	.copy_dma = &r200_copy_dma,
+-	.copy = &r100_copy_blit,
+-	.get_engine_clock = &radeon_legacy_get_engine_clock,
+-	.set_engine_clock = &radeon_legacy_set_engine_clock,
+-	.get_memory_clock = &radeon_legacy_get_memory_clock,
+-	.set_memory_clock = NULL,
+-	.get_pcie_lanes = &rv370_get_pcie_lanes,
+-	.set_pcie_lanes = &rv370_set_pcie_lanes,
+-	.set_clock_gating = &radeon_legacy_set_clock_gating,
+-	.set_surface_reg = r100_set_surface_reg,
+-	.clear_surface_reg = r100_clear_surface_reg,
+-	.bandwidth_update = &r100_bandwidth_update,
+-	.hpd_init = &r100_hpd_init,
+-	.hpd_fini = &r100_hpd_fini,
+-	.hpd_sense = &r100_hpd_sense,
+-	.hpd_set_polarity = &r100_hpd_set_polarity,
+ 	.ioctl_wait_idle = NULL,
+ 	.gui_idle = &r100_gui_idle,
+-	.pm_misc = &r100_pm_misc,
+-	.pm_prepare = &r100_pm_prepare,
+-	.pm_finish = &r100_pm_finish,
+-	.pm_init_profile = &r100_pm_init_profile,
+-	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+-	.pre_page_flip = &r100_pre_page_flip,
+-	.page_flip = &r100_page_flip,
+-	.post_page_flip = &r100_post_page_flip,
++	.mc_wait_for_idle = &r300_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &r100_pci_gart_tlb_flush,
++		.set_page = &r100_pci_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r100_ring_ib_execute,
++			.emit_fence = &r300_fence_ring_emit,
++			.emit_semaphore = &r100_semaphore_ring_emit,
++			.cs_parse = &r300_cs_parse,
++			.ring_start = &r300_ring_start,
++			.ring_test = &r100_ring_test,
++			.ib_test = &r100_ib_test,
++		}
++	},
++	.irq = {
++		.set = &r100_irq_set,
++		.process = &r100_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &r100_bandwidth_update,
++		.get_vblank_counter = &r100_get_vblank_counter,
++		.wait_for_vblank = &r100_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r100_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = &r200_copy_dma,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r100_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r100_set_surface_reg,
++		.clear_reg = r100_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &r100_hpd_init,
++		.fini = &r100_hpd_fini,
++		.sense = &r100_hpd_sense,
++		.set_polarity = &r100_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &r100_pm_misc,
++		.prepare = &r100_pm_prepare,
++		.finish = &r100_pm_finish,
++		.init_profile = &r100_pm_init_profile,
++		.get_dynpm_state = &r100_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_legacy_get_engine_clock,
++		.set_engine_clock = &radeon_legacy_set_engine_clock,
++		.get_memory_clock = &radeon_legacy_get_memory_clock,
++		.set_memory_clock = NULL,
++		.get_pcie_lanes = &rv370_get_pcie_lanes,
++		.set_pcie_lanes = &rv370_set_pcie_lanes,
++		.set_clock_gating = &radeon_legacy_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &r100_pre_page_flip,
++		.page_flip = &r100_page_flip,
++		.post_page_flip = &r100_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic r300_asic_pcie = {
+@@ -279,43 +358,70 @@ static struct radeon_asic r300_asic_pcie = {
+ 	.vga_set_state = &r100_vga_set_state,
+ 	.gpu_is_lockup = &r300_gpu_is_lockup,
+ 	.asic_reset = &r300_asic_reset,
+-	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+-	.gart_set_page = &rv370_pcie_gart_set_page,
+-	.cp_commit = &r100_cp_commit,
+-	.ring_start = &r300_ring_start,
+-	.ring_test = &r100_ring_test,
+-	.ring_ib_execute = &r100_ring_ib_execute,
+-	.irq_set = &r100_irq_set,
+-	.irq_process = &r100_irq_process,
+-	.get_vblank_counter = &r100_get_vblank_counter,
+-	.fence_ring_emit = &r300_fence_ring_emit,
+-	.cs_parse = &r300_cs_parse,
+-	.copy_blit = &r100_copy_blit,
+-	.copy_dma = &r200_copy_dma,
+-	.copy = &r100_copy_blit,
+-	.get_engine_clock = &radeon_legacy_get_engine_clock,
+-	.set_engine_clock = &radeon_legacy_set_engine_clock,
+-	.get_memory_clock = &radeon_legacy_get_memory_clock,
+-	.set_memory_clock = NULL,
+-	.set_pcie_lanes = &rv370_set_pcie_lanes,
+-	.set_clock_gating = &radeon_legacy_set_clock_gating,
+-	.set_surface_reg = r100_set_surface_reg,
+-	.clear_surface_reg = r100_clear_surface_reg,
+-	.bandwidth_update = &r100_bandwidth_update,
+-	.hpd_init = &r100_hpd_init,
+-	.hpd_fini = &r100_hpd_fini,
+-	.hpd_sense = &r100_hpd_sense,
+-	.hpd_set_polarity = &r100_hpd_set_polarity,
+ 	.ioctl_wait_idle = NULL,
+ 	.gui_idle = &r100_gui_idle,
+-	.pm_misc = &r100_pm_misc,
+-	.pm_prepare = &r100_pm_prepare,
+-	.pm_finish = &r100_pm_finish,
+-	.pm_init_profile = &r100_pm_init_profile,
+-	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+-	.pre_page_flip = &r100_pre_page_flip,
+-	.page_flip = &r100_page_flip,
+-	.post_page_flip = &r100_post_page_flip,
++	.mc_wait_for_idle = &r300_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &rv370_pcie_gart_tlb_flush,
++		.set_page = &rv370_pcie_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r100_ring_ib_execute,
++			.emit_fence = &r300_fence_ring_emit,
++			.emit_semaphore = &r100_semaphore_ring_emit,
++			.cs_parse = &r300_cs_parse,
++			.ring_start = &r300_ring_start,
++			.ring_test = &r100_ring_test,
++			.ib_test = &r100_ib_test,
++		}
++	},
++	.irq = {
++		.set = &r100_irq_set,
++		.process = &r100_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &r100_bandwidth_update,
++		.get_vblank_counter = &r100_get_vblank_counter,
++		.wait_for_vblank = &r100_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r100_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = &r200_copy_dma,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r100_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r100_set_surface_reg,
++		.clear_reg = r100_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &r100_hpd_init,
++		.fini = &r100_hpd_fini,
++		.sense = &r100_hpd_sense,
++		.set_polarity = &r100_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &r100_pm_misc,
++		.prepare = &r100_pm_prepare,
++		.finish = &r100_pm_finish,
++		.init_profile = &r100_pm_init_profile,
++		.get_dynpm_state = &r100_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_legacy_get_engine_clock,
++		.set_engine_clock = &radeon_legacy_set_engine_clock,
++		.get_memory_clock = &radeon_legacy_get_memory_clock,
++		.set_memory_clock = NULL,
++		.get_pcie_lanes = &rv370_get_pcie_lanes,
++		.set_pcie_lanes = &rv370_set_pcie_lanes,
++		.set_clock_gating = &radeon_legacy_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &r100_pre_page_flip,
++		.page_flip = &r100_page_flip,
++		.post_page_flip = &r100_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic r420_asic = {
+@@ -326,44 +432,70 @@ static struct radeon_asic r420_asic = {
+ 	.vga_set_state = &r100_vga_set_state,
+ 	.gpu_is_lockup = &r300_gpu_is_lockup,
+ 	.asic_reset = &r300_asic_reset,
+-	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+-	.gart_set_page = &rv370_pcie_gart_set_page,
+-	.cp_commit = &r100_cp_commit,
+-	.ring_start = &r300_ring_start,
+-	.ring_test = &r100_ring_test,
+-	.ring_ib_execute = &r100_ring_ib_execute,
+-	.irq_set = &r100_irq_set,
+-	.irq_process = &r100_irq_process,
+-	.get_vblank_counter = &r100_get_vblank_counter,
+-	.fence_ring_emit = &r300_fence_ring_emit,
+-	.cs_parse = &r300_cs_parse,
+-	.copy_blit = &r100_copy_blit,
+-	.copy_dma = &r200_copy_dma,
+-	.copy = &r100_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = &radeon_atom_get_memory_clock,
+-	.set_memory_clock = &radeon_atom_set_memory_clock,
+-	.get_pcie_lanes = &rv370_get_pcie_lanes,
+-	.set_pcie_lanes = &rv370_set_pcie_lanes,
+-	.set_clock_gating = &radeon_atom_set_clock_gating,
+-	.set_surface_reg = r100_set_surface_reg,
+-	.clear_surface_reg = r100_clear_surface_reg,
+-	.bandwidth_update = &r100_bandwidth_update,
+-	.hpd_init = &r100_hpd_init,
+-	.hpd_fini = &r100_hpd_fini,
+-	.hpd_sense = &r100_hpd_sense,
+-	.hpd_set_polarity = &r100_hpd_set_polarity,
+ 	.ioctl_wait_idle = NULL,
+ 	.gui_idle = &r100_gui_idle,
+-	.pm_misc = &r100_pm_misc,
+-	.pm_prepare = &r100_pm_prepare,
+-	.pm_finish = &r100_pm_finish,
+-	.pm_init_profile = &r420_pm_init_profile,
+-	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+-	.pre_page_flip = &r100_pre_page_flip,
+-	.page_flip = &r100_page_flip,
+-	.post_page_flip = &r100_post_page_flip,
++	.mc_wait_for_idle = &r300_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &rv370_pcie_gart_tlb_flush,
++		.set_page = &rv370_pcie_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r100_ring_ib_execute,
++			.emit_fence = &r300_fence_ring_emit,
++			.emit_semaphore = &r100_semaphore_ring_emit,
++			.cs_parse = &r300_cs_parse,
++			.ring_start = &r300_ring_start,
++			.ring_test = &r100_ring_test,
++			.ib_test = &r100_ib_test,
++		}
++	},
++	.irq = {
++		.set = &r100_irq_set,
++		.process = &r100_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &r100_bandwidth_update,
++		.get_vblank_counter = &r100_get_vblank_counter,
++		.wait_for_vblank = &r100_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r100_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = &r200_copy_dma,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r100_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r100_set_surface_reg,
++		.clear_reg = r100_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &r100_hpd_init,
++		.fini = &r100_hpd_fini,
++		.sense = &r100_hpd_sense,
++		.set_polarity = &r100_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &r100_pm_misc,
++		.prepare = &r100_pm_prepare,
++		.finish = &r100_pm_finish,
++		.init_profile = &r420_pm_init_profile,
++		.get_dynpm_state = &r100_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = &rv370_get_pcie_lanes,
++		.set_pcie_lanes = &rv370_set_pcie_lanes,
++		.set_clock_gating = &radeon_atom_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &r100_pre_page_flip,
++		.page_flip = &r100_page_flip,
++		.post_page_flip = &r100_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic rs400_asic = {
+@@ -374,44 +506,70 @@ static struct radeon_asic rs400_asic = {
+ 	.vga_set_state = &r100_vga_set_state,
+ 	.gpu_is_lockup = &r300_gpu_is_lockup,
+ 	.asic_reset = &r300_asic_reset,
+-	.gart_tlb_flush = &rs400_gart_tlb_flush,
+-	.gart_set_page = &rs400_gart_set_page,
+-	.cp_commit = &r100_cp_commit,
+-	.ring_start = &r300_ring_start,
+-	.ring_test = &r100_ring_test,
+-	.ring_ib_execute = &r100_ring_ib_execute,
+-	.irq_set = &r100_irq_set,
+-	.irq_process = &r100_irq_process,
+-	.get_vblank_counter = &r100_get_vblank_counter,
+-	.fence_ring_emit = &r300_fence_ring_emit,
+-	.cs_parse = &r300_cs_parse,
+-	.copy_blit = &r100_copy_blit,
+-	.copy_dma = &r200_copy_dma,
+-	.copy = &r100_copy_blit,
+-	.get_engine_clock = &radeon_legacy_get_engine_clock,
+-	.set_engine_clock = &radeon_legacy_set_engine_clock,
+-	.get_memory_clock = &radeon_legacy_get_memory_clock,
+-	.set_memory_clock = NULL,
+-	.get_pcie_lanes = NULL,
+-	.set_pcie_lanes = NULL,
+-	.set_clock_gating = &radeon_legacy_set_clock_gating,
+-	.set_surface_reg = r100_set_surface_reg,
+-	.clear_surface_reg = r100_clear_surface_reg,
+-	.bandwidth_update = &r100_bandwidth_update,
+-	.hpd_init = &r100_hpd_init,
+-	.hpd_fini = &r100_hpd_fini,
+-	.hpd_sense = &r100_hpd_sense,
+-	.hpd_set_polarity = &r100_hpd_set_polarity,
+ 	.ioctl_wait_idle = NULL,
+ 	.gui_idle = &r100_gui_idle,
+-	.pm_misc = &r100_pm_misc,
+-	.pm_prepare = &r100_pm_prepare,
+-	.pm_finish = &r100_pm_finish,
+-	.pm_init_profile = &r100_pm_init_profile,
+-	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+-	.pre_page_flip = &r100_pre_page_flip,
+-	.page_flip = &r100_page_flip,
+-	.post_page_flip = &r100_post_page_flip,
++	.mc_wait_for_idle = &rs400_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &rs400_gart_tlb_flush,
++		.set_page = &rs400_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r100_ring_ib_execute,
++			.emit_fence = &r300_fence_ring_emit,
++			.emit_semaphore = &r100_semaphore_ring_emit,
++			.cs_parse = &r300_cs_parse,
++			.ring_start = &r300_ring_start,
++			.ring_test = &r100_ring_test,
++			.ib_test = &r100_ib_test,
++		}
++	},
++	.irq = {
++		.set = &r100_irq_set,
++		.process = &r100_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &r100_bandwidth_update,
++		.get_vblank_counter = &r100_get_vblank_counter,
++		.wait_for_vblank = &r100_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r100_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = &r200_copy_dma,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r100_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r100_set_surface_reg,
++		.clear_reg = r100_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &r100_hpd_init,
++		.fini = &r100_hpd_fini,
++		.sense = &r100_hpd_sense,
++		.set_polarity = &r100_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &r100_pm_misc,
++		.prepare = &r100_pm_prepare,
++		.finish = &r100_pm_finish,
++		.init_profile = &r100_pm_init_profile,
++		.get_dynpm_state = &r100_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_legacy_get_engine_clock,
++		.set_engine_clock = &radeon_legacy_set_engine_clock,
++		.get_memory_clock = &radeon_legacy_get_memory_clock,
++		.set_memory_clock = NULL,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = &radeon_legacy_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &r100_pre_page_flip,
++		.page_flip = &r100_page_flip,
++		.post_page_flip = &r100_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic rs600_asic = {
+@@ -422,44 +580,70 @@ static struct radeon_asic rs600_asic = {
+ 	.vga_set_state = &r100_vga_set_state,
+ 	.gpu_is_lockup = &r300_gpu_is_lockup,
+ 	.asic_reset = &rs600_asic_reset,
+-	.gart_tlb_flush = &rs600_gart_tlb_flush,
+-	.gart_set_page = &rs600_gart_set_page,
+-	.cp_commit = &r100_cp_commit,
+-	.ring_start = &r300_ring_start,
+-	.ring_test = &r100_ring_test,
+-	.ring_ib_execute = &r100_ring_ib_execute,
+-	.irq_set = &rs600_irq_set,
+-	.irq_process = &rs600_irq_process,
+-	.get_vblank_counter = &rs600_get_vblank_counter,
+-	.fence_ring_emit = &r300_fence_ring_emit,
+-	.cs_parse = &r300_cs_parse,
+-	.copy_blit = &r100_copy_blit,
+-	.copy_dma = &r200_copy_dma,
+-	.copy = &r100_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = &radeon_atom_get_memory_clock,
+-	.set_memory_clock = &radeon_atom_set_memory_clock,
+-	.get_pcie_lanes = NULL,
+-	.set_pcie_lanes = NULL,
+-	.set_clock_gating = &radeon_atom_set_clock_gating,
+-	.set_surface_reg = r100_set_surface_reg,
+-	.clear_surface_reg = r100_clear_surface_reg,
+-	.bandwidth_update = &rs600_bandwidth_update,
+-	.hpd_init = &rs600_hpd_init,
+-	.hpd_fini = &rs600_hpd_fini,
+-	.hpd_sense = &rs600_hpd_sense,
+-	.hpd_set_polarity = &rs600_hpd_set_polarity,
+ 	.ioctl_wait_idle = NULL,
+ 	.gui_idle = &r100_gui_idle,
+-	.pm_misc = &rs600_pm_misc,
+-	.pm_prepare = &rs600_pm_prepare,
+-	.pm_finish = &rs600_pm_finish,
+-	.pm_init_profile = &r420_pm_init_profile,
+-	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+-	.pre_page_flip = &rs600_pre_page_flip,
+-	.page_flip = &rs600_page_flip,
+-	.post_page_flip = &rs600_post_page_flip,
++	.mc_wait_for_idle = &rs600_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &rs600_gart_tlb_flush,
++		.set_page = &rs600_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r100_ring_ib_execute,
++			.emit_fence = &r300_fence_ring_emit,
++			.emit_semaphore = &r100_semaphore_ring_emit,
++			.cs_parse = &r300_cs_parse,
++			.ring_start = &r300_ring_start,
++			.ring_test = &r100_ring_test,
++			.ib_test = &r100_ib_test,
++		}
++	},
++	.irq = {
++		.set = &rs600_irq_set,
++		.process = &rs600_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &rs600_bandwidth_update,
++		.get_vblank_counter = &rs600_get_vblank_counter,
++		.wait_for_vblank = &avivo_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r100_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = &r200_copy_dma,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r100_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r100_set_surface_reg,
++		.clear_reg = r100_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &rs600_hpd_init,
++		.fini = &rs600_hpd_fini,
++		.sense = &rs600_hpd_sense,
++		.set_polarity = &rs600_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &rs600_pm_misc,
++		.prepare = &rs600_pm_prepare,
++		.finish = &rs600_pm_finish,
++		.init_profile = &r420_pm_init_profile,
++		.get_dynpm_state = &r100_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = &radeon_atom_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &rs600_pre_page_flip,
++		.page_flip = &rs600_page_flip,
++		.post_page_flip = &rs600_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic rs690_asic = {
+@@ -470,44 +654,70 @@ static struct radeon_asic rs690_asic = {
+ 	.vga_set_state = &r100_vga_set_state,
+ 	.gpu_is_lockup = &r300_gpu_is_lockup,
+ 	.asic_reset = &rs600_asic_reset,
+-	.gart_tlb_flush = &rs400_gart_tlb_flush,
+-	.gart_set_page = &rs400_gart_set_page,
+-	.cp_commit = &r100_cp_commit,
+-	.ring_start = &r300_ring_start,
+-	.ring_test = &r100_ring_test,
+-	.ring_ib_execute = &r100_ring_ib_execute,
+-	.irq_set = &rs600_irq_set,
+-	.irq_process = &rs600_irq_process,
+-	.get_vblank_counter = &rs600_get_vblank_counter,
+-	.fence_ring_emit = &r300_fence_ring_emit,
+-	.cs_parse = &r300_cs_parse,
+-	.copy_blit = &r100_copy_blit,
+-	.copy_dma = &r200_copy_dma,
+-	.copy = &r200_copy_dma,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = &radeon_atom_get_memory_clock,
+-	.set_memory_clock = &radeon_atom_set_memory_clock,
+-	.get_pcie_lanes = NULL,
+-	.set_pcie_lanes = NULL,
+-	.set_clock_gating = &radeon_atom_set_clock_gating,
+-	.set_surface_reg = r100_set_surface_reg,
+-	.clear_surface_reg = r100_clear_surface_reg,
+-	.bandwidth_update = &rs690_bandwidth_update,
+-	.hpd_init = &rs600_hpd_init,
+-	.hpd_fini = &rs600_hpd_fini,
+-	.hpd_sense = &rs600_hpd_sense,
+-	.hpd_set_polarity = &rs600_hpd_set_polarity,
+ 	.ioctl_wait_idle = NULL,
+ 	.gui_idle = &r100_gui_idle,
+-	.pm_misc = &rs600_pm_misc,
+-	.pm_prepare = &rs600_pm_prepare,
+-	.pm_finish = &rs600_pm_finish,
+-	.pm_init_profile = &r420_pm_init_profile,
+-	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+-	.pre_page_flip = &rs600_pre_page_flip,
+-	.page_flip = &rs600_page_flip,
+-	.post_page_flip = &rs600_post_page_flip,
++	.mc_wait_for_idle = &rs690_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &rs400_gart_tlb_flush,
++		.set_page = &rs400_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r100_ring_ib_execute,
++			.emit_fence = &r300_fence_ring_emit,
++			.emit_semaphore = &r100_semaphore_ring_emit,
++			.cs_parse = &r300_cs_parse,
++			.ring_start = &r300_ring_start,
++			.ring_test = &r100_ring_test,
++			.ib_test = &r100_ib_test,
++		}
++	},
++	.irq = {
++		.set = &rs600_irq_set,
++		.process = &rs600_irq_process,
++	},
++	.display = {
++		.get_vblank_counter = &rs600_get_vblank_counter,
++		.bandwidth_update = &rs690_bandwidth_update,
++		.wait_for_vblank = &avivo_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r100_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = &r200_copy_dma,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r200_copy_dma,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r100_set_surface_reg,
++		.clear_reg = r100_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &rs600_hpd_init,
++		.fini = &rs600_hpd_fini,
++		.sense = &rs600_hpd_sense,
++		.set_polarity = &rs600_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &rs600_pm_misc,
++		.prepare = &rs600_pm_prepare,
++		.finish = &rs600_pm_finish,
++		.init_profile = &r420_pm_init_profile,
++		.get_dynpm_state = &r100_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = &radeon_atom_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &rs600_pre_page_flip,
++		.page_flip = &rs600_page_flip,
++		.post_page_flip = &rs600_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic rv515_asic = {
+@@ -518,44 +728,70 @@ static struct radeon_asic rv515_asic = {
+ 	.vga_set_state = &r100_vga_set_state,
+ 	.gpu_is_lockup = &r300_gpu_is_lockup,
+ 	.asic_reset = &rs600_asic_reset,
+-	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+-	.gart_set_page = &rv370_pcie_gart_set_page,
+-	.cp_commit = &r100_cp_commit,
+-	.ring_start = &rv515_ring_start,
+-	.ring_test = &r100_ring_test,
+-	.ring_ib_execute = &r100_ring_ib_execute,
+-	.irq_set = &rs600_irq_set,
+-	.irq_process = &rs600_irq_process,
+-	.get_vblank_counter = &rs600_get_vblank_counter,
+-	.fence_ring_emit = &r300_fence_ring_emit,
+-	.cs_parse = &r300_cs_parse,
+-	.copy_blit = &r100_copy_blit,
+-	.copy_dma = &r200_copy_dma,
+-	.copy = &r100_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = &radeon_atom_get_memory_clock,
+-	.set_memory_clock = &radeon_atom_set_memory_clock,
+-	.get_pcie_lanes = &rv370_get_pcie_lanes,
+-	.set_pcie_lanes = &rv370_set_pcie_lanes,
+-	.set_clock_gating = &radeon_atom_set_clock_gating,
+-	.set_surface_reg = r100_set_surface_reg,
+-	.clear_surface_reg = r100_clear_surface_reg,
+-	.bandwidth_update = &rv515_bandwidth_update,
+-	.hpd_init = &rs600_hpd_init,
+-	.hpd_fini = &rs600_hpd_fini,
+-	.hpd_sense = &rs600_hpd_sense,
+-	.hpd_set_polarity = &rs600_hpd_set_polarity,
+ 	.ioctl_wait_idle = NULL,
+ 	.gui_idle = &r100_gui_idle,
+-	.pm_misc = &rs600_pm_misc,
+-	.pm_prepare = &rs600_pm_prepare,
+-	.pm_finish = &rs600_pm_finish,
+-	.pm_init_profile = &r420_pm_init_profile,
+-	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+-	.pre_page_flip = &rs600_pre_page_flip,
+-	.page_flip = &rs600_page_flip,
+-	.post_page_flip = &rs600_post_page_flip,
++	.mc_wait_for_idle = &rv515_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &rv370_pcie_gart_tlb_flush,
++		.set_page = &rv370_pcie_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r100_ring_ib_execute,
++			.emit_fence = &r300_fence_ring_emit,
++			.emit_semaphore = &r100_semaphore_ring_emit,
++			.cs_parse = &r300_cs_parse,
++			.ring_start = &rv515_ring_start,
++			.ring_test = &r100_ring_test,
++			.ib_test = &r100_ib_test,
++		}
++	},
++	.irq = {
++		.set = &rs600_irq_set,
++		.process = &rs600_irq_process,
++	},
++	.display = {
++		.get_vblank_counter = &rs600_get_vblank_counter,
++		.bandwidth_update = &rv515_bandwidth_update,
++		.wait_for_vblank = &avivo_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r100_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = &r200_copy_dma,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r100_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r100_set_surface_reg,
++		.clear_reg = r100_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &rs600_hpd_init,
++		.fini = &rs600_hpd_fini,
++		.sense = &rs600_hpd_sense,
++		.set_polarity = &rs600_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &rs600_pm_misc,
++		.prepare = &rs600_pm_prepare,
++		.finish = &rs600_pm_finish,
++		.init_profile = &r420_pm_init_profile,
++		.get_dynpm_state = &r100_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = &rv370_get_pcie_lanes,
++		.set_pcie_lanes = &rv370_set_pcie_lanes,
++		.set_clock_gating = &radeon_atom_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &rs600_pre_page_flip,
++		.page_flip = &rs600_page_flip,
++		.post_page_flip = &rs600_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic r520_asic = {
+@@ -566,44 +802,70 @@ static struct radeon_asic r520_asic = {
+ 	.vga_set_state = &r100_vga_set_state,
+ 	.gpu_is_lockup = &r300_gpu_is_lockup,
+ 	.asic_reset = &rs600_asic_reset,
+-	.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+-	.gart_set_page = &rv370_pcie_gart_set_page,
+-	.cp_commit = &r100_cp_commit,
+-	.ring_start = &rv515_ring_start,
+-	.ring_test = &r100_ring_test,
+-	.ring_ib_execute = &r100_ring_ib_execute,
+-	.irq_set = &rs600_irq_set,
+-	.irq_process = &rs600_irq_process,
+-	.get_vblank_counter = &rs600_get_vblank_counter,
+-	.fence_ring_emit = &r300_fence_ring_emit,
+-	.cs_parse = &r300_cs_parse,
+-	.copy_blit = &r100_copy_blit,
+-	.copy_dma = &r200_copy_dma,
+-	.copy = &r100_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = &radeon_atom_get_memory_clock,
+-	.set_memory_clock = &radeon_atom_set_memory_clock,
+-	.get_pcie_lanes = &rv370_get_pcie_lanes,
+-	.set_pcie_lanes = &rv370_set_pcie_lanes,
+-	.set_clock_gating = &radeon_atom_set_clock_gating,
+-	.set_surface_reg = r100_set_surface_reg,
+-	.clear_surface_reg = r100_clear_surface_reg,
+-	.bandwidth_update = &rv515_bandwidth_update,
+-	.hpd_init = &rs600_hpd_init,
+-	.hpd_fini = &rs600_hpd_fini,
+-	.hpd_sense = &rs600_hpd_sense,
+-	.hpd_set_polarity = &rs600_hpd_set_polarity,
+ 	.ioctl_wait_idle = NULL,
+ 	.gui_idle = &r100_gui_idle,
+-	.pm_misc = &rs600_pm_misc,
+-	.pm_prepare = &rs600_pm_prepare,
+-	.pm_finish = &rs600_pm_finish,
+-	.pm_init_profile = &r420_pm_init_profile,
+-	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+-	.pre_page_flip = &rs600_pre_page_flip,
+-	.page_flip = &rs600_page_flip,
+-	.post_page_flip = &rs600_post_page_flip,
++	.mc_wait_for_idle = &r520_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &rv370_pcie_gart_tlb_flush,
++		.set_page = &rv370_pcie_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r100_ring_ib_execute,
++			.emit_fence = &r300_fence_ring_emit,
++			.emit_semaphore = &r100_semaphore_ring_emit,
++			.cs_parse = &r300_cs_parse,
++			.ring_start = &rv515_ring_start,
++			.ring_test = &r100_ring_test,
++			.ib_test = &r100_ib_test,
++		}
++	},
++	.irq = {
++		.set = &rs600_irq_set,
++		.process = &rs600_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &rv515_bandwidth_update,
++		.get_vblank_counter = &rs600_get_vblank_counter,
++		.wait_for_vblank = &avivo_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r100_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = &r200_copy_dma,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r100_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r100_set_surface_reg,
++		.clear_reg = r100_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &rs600_hpd_init,
++		.fini = &rs600_hpd_fini,
++		.sense = &rs600_hpd_sense,
++		.set_polarity = &rs600_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &rs600_pm_misc,
++		.prepare = &rs600_pm_prepare,
++		.finish = &rs600_pm_finish,
++		.init_profile = &r420_pm_init_profile,
++		.get_dynpm_state = &r100_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = &rv370_get_pcie_lanes,
++		.set_pcie_lanes = &rv370_set_pcie_lanes,
++		.set_clock_gating = &radeon_atom_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &rs600_pre_page_flip,
++		.page_flip = &rs600_page_flip,
++		.post_page_flip = &rs600_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic r600_asic = {
+@@ -611,46 +873,72 @@ static struct radeon_asic r600_asic = {
+ 	.fini = &r600_fini,
+ 	.suspend = &r600_suspend,
+ 	.resume = &r600_resume,
+-	.cp_commit = &r600_cp_commit,
+ 	.vga_set_state = &r600_vga_set_state,
+ 	.gpu_is_lockup = &r600_gpu_is_lockup,
+ 	.asic_reset = &r600_asic_reset,
+-	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+-	.gart_set_page = &rs600_gart_set_page,
+-	.ring_test = &r600_ring_test,
+-	.ring_ib_execute = &r600_ring_ib_execute,
+-	.irq_set = &r600_irq_set,
+-	.irq_process = &r600_irq_process,
+-	.get_vblank_counter = &rs600_get_vblank_counter,
+-	.fence_ring_emit = &r600_fence_ring_emit,
+-	.cs_parse = &r600_cs_parse,
+-	.copy_blit = &r600_copy_blit,
+-	.copy_dma = NULL,
+-	.copy = &r600_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = &radeon_atom_get_memory_clock,
+-	.set_memory_clock = &radeon_atom_set_memory_clock,
+-	.get_pcie_lanes = &r600_get_pcie_lanes,
+-	.set_pcie_lanes = &r600_set_pcie_lanes,
+-	.set_clock_gating = NULL,
+-	.set_surface_reg = r600_set_surface_reg,
+-	.clear_surface_reg = r600_clear_surface_reg,
+-	.bandwidth_update = &rv515_bandwidth_update,
+-	.hpd_init = &r600_hpd_init,
+-	.hpd_fini = &r600_hpd_fini,
+-	.hpd_sense = &r600_hpd_sense,
+-	.hpd_set_polarity = &r600_hpd_set_polarity,
+ 	.ioctl_wait_idle = r600_ioctl_wait_idle,
+ 	.gui_idle = &r600_gui_idle,
+-	.pm_misc = &r600_pm_misc,
+-	.pm_prepare = &rs600_pm_prepare,
+-	.pm_finish = &rs600_pm_finish,
+-	.pm_init_profile = &r600_pm_init_profile,
+-	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+-	.pre_page_flip = &rs600_pre_page_flip,
+-	.page_flip = &rs600_page_flip,
+-	.post_page_flip = &rs600_post_page_flip,
++	.mc_wait_for_idle = &r600_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &r600_pcie_gart_tlb_flush,
++		.set_page = &rs600_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r600_ring_ib_execute,
++			.emit_fence = &r600_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &r600_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		}
++	},
++	.irq = {
++		.set = &r600_irq_set,
++		.process = &r600_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &rv515_bandwidth_update,
++		.get_vblank_counter = &rs600_get_vblank_counter,
++		.wait_for_vblank = &avivo_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r600_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = NULL,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r600_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r600_set_surface_reg,
++		.clear_reg = r600_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &r600_hpd_init,
++		.fini = &r600_hpd_fini,
++		.sense = &r600_hpd_sense,
++		.set_polarity = &r600_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &r600_pm_misc,
++		.prepare = &rs600_pm_prepare,
++		.finish = &rs600_pm_finish,
++		.init_profile = &r600_pm_init_profile,
++		.get_dynpm_state = &r600_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = &r600_get_pcie_lanes,
++		.set_pcie_lanes = &r600_set_pcie_lanes,
++		.set_clock_gating = NULL,
++	},
++	.pflip = {
++		.pre_page_flip = &rs600_pre_page_flip,
++		.page_flip = &rs600_page_flip,
++		.post_page_flip = &rs600_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic rs780_asic = {
+@@ -658,46 +946,72 @@ static struct radeon_asic rs780_asic = {
+ 	.fini = &r600_fini,
+ 	.suspend = &r600_suspend,
+ 	.resume = &r600_resume,
+-	.cp_commit = &r600_cp_commit,
+ 	.gpu_is_lockup = &r600_gpu_is_lockup,
+ 	.vga_set_state = &r600_vga_set_state,
+ 	.asic_reset = &r600_asic_reset,
+-	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+-	.gart_set_page = &rs600_gart_set_page,
+-	.ring_test = &r600_ring_test,
+-	.ring_ib_execute = &r600_ring_ib_execute,
+-	.irq_set = &r600_irq_set,
+-	.irq_process = &r600_irq_process,
+-	.get_vblank_counter = &rs600_get_vblank_counter,
+-	.fence_ring_emit = &r600_fence_ring_emit,
+-	.cs_parse = &r600_cs_parse,
+-	.copy_blit = &r600_copy_blit,
+-	.copy_dma = NULL,
+-	.copy = &r600_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = NULL,
+-	.set_memory_clock = NULL,
+-	.get_pcie_lanes = NULL,
+-	.set_pcie_lanes = NULL,
+-	.set_clock_gating = NULL,
+-	.set_surface_reg = r600_set_surface_reg,
+-	.clear_surface_reg = r600_clear_surface_reg,
+-	.bandwidth_update = &rs690_bandwidth_update,
+-	.hpd_init = &r600_hpd_init,
+-	.hpd_fini = &r600_hpd_fini,
+-	.hpd_sense = &r600_hpd_sense,
+-	.hpd_set_polarity = &r600_hpd_set_polarity,
+ 	.ioctl_wait_idle = r600_ioctl_wait_idle,
+ 	.gui_idle = &r600_gui_idle,
+-	.pm_misc = &r600_pm_misc,
+-	.pm_prepare = &rs600_pm_prepare,
+-	.pm_finish = &rs600_pm_finish,
+-	.pm_init_profile = &rs780_pm_init_profile,
+-	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+-	.pre_page_flip = &rs600_pre_page_flip,
+-	.page_flip = &rs600_page_flip,
+-	.post_page_flip = &rs600_post_page_flip,
++	.mc_wait_for_idle = &r600_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &r600_pcie_gart_tlb_flush,
++		.set_page = &rs600_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r600_ring_ib_execute,
++			.emit_fence = &r600_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &r600_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		}
++	},
++	.irq = {
++		.set = &r600_irq_set,
++		.process = &r600_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &rs690_bandwidth_update,
++		.get_vblank_counter = &rs600_get_vblank_counter,
++		.wait_for_vblank = &avivo_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r600_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = NULL,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r600_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r600_set_surface_reg,
++		.clear_reg = r600_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &r600_hpd_init,
++		.fini = &r600_hpd_fini,
++		.sense = &r600_hpd_sense,
++		.set_polarity = &r600_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &r600_pm_misc,
++		.prepare = &rs600_pm_prepare,
++		.finish = &rs600_pm_finish,
++		.init_profile = &rs780_pm_init_profile,
++		.get_dynpm_state = &r600_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = NULL,
++		.set_memory_clock = NULL,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = NULL,
++	},
++	.pflip = {
++		.pre_page_flip = &rs600_pre_page_flip,
++		.page_flip = &rs600_page_flip,
++		.post_page_flip = &rs600_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic rv770_asic = {
+@@ -705,46 +1019,72 @@ static struct radeon_asic rv770_asic = {
+ 	.fini = &rv770_fini,
+ 	.suspend = &rv770_suspend,
+ 	.resume = &rv770_resume,
+-	.cp_commit = &r600_cp_commit,
+ 	.asic_reset = &r600_asic_reset,
+ 	.gpu_is_lockup = &r600_gpu_is_lockup,
+ 	.vga_set_state = &r600_vga_set_state,
+-	.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+-	.gart_set_page = &rs600_gart_set_page,
+-	.ring_test = &r600_ring_test,
+-	.ring_ib_execute = &r600_ring_ib_execute,
+-	.irq_set = &r600_irq_set,
+-	.irq_process = &r600_irq_process,
+-	.get_vblank_counter = &rs600_get_vblank_counter,
+-	.fence_ring_emit = &r600_fence_ring_emit,
+-	.cs_parse = &r600_cs_parse,
+-	.copy_blit = &r600_copy_blit,
+-	.copy_dma = NULL,
+-	.copy = &r600_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = &radeon_atom_get_memory_clock,
+-	.set_memory_clock = &radeon_atom_set_memory_clock,
+-	.get_pcie_lanes = &r600_get_pcie_lanes,
+-	.set_pcie_lanes = &r600_set_pcie_lanes,
+-	.set_clock_gating = &radeon_atom_set_clock_gating,
+-	.set_surface_reg = r600_set_surface_reg,
+-	.clear_surface_reg = r600_clear_surface_reg,
+-	.bandwidth_update = &rv515_bandwidth_update,
+-	.hpd_init = &r600_hpd_init,
+-	.hpd_fini = &r600_hpd_fini,
+-	.hpd_sense = &r600_hpd_sense,
+-	.hpd_set_polarity = &r600_hpd_set_polarity,
+ 	.ioctl_wait_idle = r600_ioctl_wait_idle,
+ 	.gui_idle = &r600_gui_idle,
+-	.pm_misc = &rv770_pm_misc,
+-	.pm_prepare = &rs600_pm_prepare,
+-	.pm_finish = &rs600_pm_finish,
+-	.pm_init_profile = &r600_pm_init_profile,
+-	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+-	.pre_page_flip = &rs600_pre_page_flip,
+-	.page_flip = &rv770_page_flip,
+-	.post_page_flip = &rs600_post_page_flip,
++	.mc_wait_for_idle = &r600_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &r600_pcie_gart_tlb_flush,
++		.set_page = &rs600_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &r600_ring_ib_execute,
++			.emit_fence = &r600_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &r600_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		}
++	},
++	.irq = {
++		.set = &r600_irq_set,
++		.process = &r600_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &rv515_bandwidth_update,
++		.get_vblank_counter = &rs600_get_vblank_counter,
++		.wait_for_vblank = &avivo_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r600_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = NULL,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r600_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r600_set_surface_reg,
++		.clear_reg = r600_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &r600_hpd_init,
++		.fini = &r600_hpd_fini,
++		.sense = &r600_hpd_sense,
++		.set_polarity = &r600_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &rv770_pm_misc,
++		.prepare = &rs600_pm_prepare,
++		.finish = &rs600_pm_finish,
++		.init_profile = &r600_pm_init_profile,
++		.get_dynpm_state = &r600_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = &r600_get_pcie_lanes,
++		.set_pcie_lanes = &r600_set_pcie_lanes,
++		.set_clock_gating = &radeon_atom_set_clock_gating,
++	},
++	.pflip = {
++		.pre_page_flip = &rs600_pre_page_flip,
++		.page_flip = &rv770_page_flip,
++		.post_page_flip = &rs600_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic evergreen_asic = {
+@@ -752,46 +1092,72 @@ static struct radeon_asic evergreen_asic = {
+ 	.fini = &evergreen_fini,
+ 	.suspend = &evergreen_suspend,
+ 	.resume = &evergreen_resume,
+-	.cp_commit = &r600_cp_commit,
+ 	.gpu_is_lockup = &evergreen_gpu_is_lockup,
+ 	.asic_reset = &evergreen_asic_reset,
+ 	.vga_set_state = &r600_vga_set_state,
+-	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+-	.gart_set_page = &rs600_gart_set_page,
+-	.ring_test = &r600_ring_test,
+-	.ring_ib_execute = &evergreen_ring_ib_execute,
+-	.irq_set = &evergreen_irq_set,
+-	.irq_process = &evergreen_irq_process,
+-	.get_vblank_counter = &evergreen_get_vblank_counter,
+-	.fence_ring_emit = &r600_fence_ring_emit,
+-	.cs_parse = &evergreen_cs_parse,
+-	.copy_blit = &r600_copy_blit,
+-	.copy_dma = NULL,
+-	.copy = &r600_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = &radeon_atom_get_memory_clock,
+-	.set_memory_clock = &radeon_atom_set_memory_clock,
+-	.get_pcie_lanes = &r600_get_pcie_lanes,
+-	.set_pcie_lanes = &r600_set_pcie_lanes,
+-	.set_clock_gating = NULL,
+-	.set_surface_reg = r600_set_surface_reg,
+-	.clear_surface_reg = r600_clear_surface_reg,
+-	.bandwidth_update = &evergreen_bandwidth_update,
+-	.hpd_init = &evergreen_hpd_init,
+-	.hpd_fini = &evergreen_hpd_fini,
+-	.hpd_sense = &evergreen_hpd_sense,
+-	.hpd_set_polarity = &evergreen_hpd_set_polarity,
+ 	.ioctl_wait_idle = r600_ioctl_wait_idle,
+ 	.gui_idle = &r600_gui_idle,
+-	.pm_misc = &evergreen_pm_misc,
+-	.pm_prepare = &evergreen_pm_prepare,
+-	.pm_finish = &evergreen_pm_finish,
+-	.pm_init_profile = &r600_pm_init_profile,
+-	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+-	.pre_page_flip = &evergreen_pre_page_flip,
+-	.page_flip = &evergreen_page_flip,
+-	.post_page_flip = &evergreen_post_page_flip,
++	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
++		.set_page = &rs600_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &evergreen_ring_ib_execute,
++			.emit_fence = &r600_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &evergreen_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		}
++	},
++	.irq = {
++		.set = &evergreen_irq_set,
++		.process = &evergreen_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &evergreen_bandwidth_update,
++		.get_vblank_counter = &evergreen_get_vblank_counter,
++		.wait_for_vblank = &dce4_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r600_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = NULL,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r600_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r600_set_surface_reg,
++		.clear_reg = r600_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &evergreen_hpd_init,
++		.fini = &evergreen_hpd_fini,
++		.sense = &evergreen_hpd_sense,
++		.set_polarity = &evergreen_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &evergreen_pm_misc,
++		.prepare = &evergreen_pm_prepare,
++		.finish = &evergreen_pm_finish,
++		.init_profile = &r600_pm_init_profile,
++		.get_dynpm_state = &r600_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = &r600_get_pcie_lanes,
++		.set_pcie_lanes = &r600_set_pcie_lanes,
++		.set_clock_gating = NULL,
++	},
++	.pflip = {
++		.pre_page_flip = &evergreen_pre_page_flip,
++		.page_flip = &evergreen_page_flip,
++		.post_page_flip = &evergreen_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic sumo_asic = {
+@@ -799,46 +1165,72 @@ static struct radeon_asic sumo_asic = {
+ 	.fini = &evergreen_fini,
+ 	.suspend = &evergreen_suspend,
+ 	.resume = &evergreen_resume,
+-	.cp_commit = &r600_cp_commit,
+ 	.gpu_is_lockup = &evergreen_gpu_is_lockup,
+ 	.asic_reset = &evergreen_asic_reset,
+ 	.vga_set_state = &r600_vga_set_state,
+-	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+-	.gart_set_page = &rs600_gart_set_page,
+-	.ring_test = &r600_ring_test,
+-	.ring_ib_execute = &evergreen_ring_ib_execute,
+-	.irq_set = &evergreen_irq_set,
+-	.irq_process = &evergreen_irq_process,
+-	.get_vblank_counter = &evergreen_get_vblank_counter,
+-	.fence_ring_emit = &r600_fence_ring_emit,
+-	.cs_parse = &evergreen_cs_parse,
+-	.copy_blit = &r600_copy_blit,
+-	.copy_dma = NULL,
+-	.copy = &r600_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = NULL,
+-	.set_memory_clock = NULL,
+-	.get_pcie_lanes = NULL,
+-	.set_pcie_lanes = NULL,
+-	.set_clock_gating = NULL,
+-	.set_surface_reg = r600_set_surface_reg,
+-	.clear_surface_reg = r600_clear_surface_reg,
+-	.bandwidth_update = &evergreen_bandwidth_update,
+-	.hpd_init = &evergreen_hpd_init,
+-	.hpd_fini = &evergreen_hpd_fini,
+-	.hpd_sense = &evergreen_hpd_sense,
+-	.hpd_set_polarity = &evergreen_hpd_set_polarity,
+ 	.ioctl_wait_idle = r600_ioctl_wait_idle,
+ 	.gui_idle = &r600_gui_idle,
+-	.pm_misc = &evergreen_pm_misc,
+-	.pm_prepare = &evergreen_pm_prepare,
+-	.pm_finish = &evergreen_pm_finish,
+-	.pm_init_profile = &sumo_pm_init_profile,
+-	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+-	.pre_page_flip = &evergreen_pre_page_flip,
+-	.page_flip = &evergreen_page_flip,
+-	.post_page_flip = &evergreen_post_page_flip,
++	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
++		.set_page = &rs600_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &evergreen_ring_ib_execute,
++			.emit_fence = &r600_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &evergreen_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		},
++	},
++	.irq = {
++		.set = &evergreen_irq_set,
++		.process = &evergreen_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &evergreen_bandwidth_update,
++		.get_vblank_counter = &evergreen_get_vblank_counter,
++		.wait_for_vblank = &dce4_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r600_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = NULL,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r600_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r600_set_surface_reg,
++		.clear_reg = r600_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &evergreen_hpd_init,
++		.fini = &evergreen_hpd_fini,
++		.sense = &evergreen_hpd_sense,
++		.set_polarity = &evergreen_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &evergreen_pm_misc,
++		.prepare = &evergreen_pm_prepare,
++		.finish = &evergreen_pm_finish,
++		.init_profile = &sumo_pm_init_profile,
++		.get_dynpm_state = &r600_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = NULL,
++		.set_memory_clock = NULL,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = NULL,
++	},
++	.pflip = {
++		.pre_page_flip = &evergreen_pre_page_flip,
++		.page_flip = &evergreen_page_flip,
++		.post_page_flip = &evergreen_post_page_flip,
++	},
+ };
+ 
+ static struct radeon_asic btc_asic = {
+@@ -846,46 +1238,82 @@ static struct radeon_asic btc_asic = {
+ 	.fini = &evergreen_fini,
+ 	.suspend = &evergreen_suspend,
+ 	.resume = &evergreen_resume,
+-	.cp_commit = &r600_cp_commit,
+ 	.gpu_is_lockup = &evergreen_gpu_is_lockup,
+ 	.asic_reset = &evergreen_asic_reset,
+ 	.vga_set_state = &r600_vga_set_state,
+-	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+-	.gart_set_page = &rs600_gart_set_page,
+-	.ring_test = &r600_ring_test,
+-	.ring_ib_execute = &evergreen_ring_ib_execute,
+-	.irq_set = &evergreen_irq_set,
+-	.irq_process = &evergreen_irq_process,
+-	.get_vblank_counter = &evergreen_get_vblank_counter,
+-	.fence_ring_emit = &r600_fence_ring_emit,
+-	.cs_parse = &evergreen_cs_parse,
+-	.copy_blit = &r600_copy_blit,
+-	.copy_dma = NULL,
+-	.copy = &r600_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = &radeon_atom_get_memory_clock,
+-	.set_memory_clock = &radeon_atom_set_memory_clock,
+-	.get_pcie_lanes = NULL,
+-	.set_pcie_lanes = NULL,
+-	.set_clock_gating = NULL,
+-	.set_surface_reg = r600_set_surface_reg,
+-	.clear_surface_reg = r600_clear_surface_reg,
+-	.bandwidth_update = &evergreen_bandwidth_update,
+-	.hpd_init = &evergreen_hpd_init,
+-	.hpd_fini = &evergreen_hpd_fini,
+-	.hpd_sense = &evergreen_hpd_sense,
+-	.hpd_set_polarity = &evergreen_hpd_set_polarity,
+ 	.ioctl_wait_idle = r600_ioctl_wait_idle,
+ 	.gui_idle = &r600_gui_idle,
+-	.pm_misc = &evergreen_pm_misc,
+-	.pm_prepare = &evergreen_pm_prepare,
+-	.pm_finish = &evergreen_pm_finish,
+-	.pm_init_profile = &r600_pm_init_profile,
+-	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+-	.pre_page_flip = &evergreen_pre_page_flip,
+-	.page_flip = &evergreen_page_flip,
+-	.post_page_flip = &evergreen_post_page_flip,
++	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &evergreen_pcie_gart_tlb_flush,
++		.set_page = &rs600_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &evergreen_ring_ib_execute,
++			.emit_fence = &r600_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &evergreen_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		}
++	},
++	.irq = {
++		.set = &evergreen_irq_set,
++		.process = &evergreen_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &evergreen_bandwidth_update,
++		.get_vblank_counter = &evergreen_get_vblank_counter,
++		.wait_for_vblank = &dce4_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r600_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = NULL,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r600_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r600_set_surface_reg,
++		.clear_reg = r600_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &evergreen_hpd_init,
++		.fini = &evergreen_hpd_fini,
++		.sense = &evergreen_hpd_sense,
++		.set_polarity = &evergreen_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &evergreen_pm_misc,
++		.prepare = &evergreen_pm_prepare,
++		.finish = &evergreen_pm_finish,
++		.init_profile = &r600_pm_init_profile,
++		.get_dynpm_state = &r600_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = NULL,
++	},
++	.pflip = {
++		.pre_page_flip = &evergreen_pre_page_flip,
++		.page_flip = &evergreen_page_flip,
++		.post_page_flip = &evergreen_post_page_flip,
++	},
++};
++
++static const struct radeon_vm_funcs cayman_vm_funcs = {
++	.init = &cayman_vm_init,
++	.fini = &cayman_vm_fini,
++	.bind = &cayman_vm_bind,
++	.unbind = &cayman_vm_unbind,
++	.tlb_flush = &cayman_vm_tlb_flush,
++	.page_flags = &cayman_vm_page_flags,
++	.set_page = &cayman_vm_set_page,
+ };
+ 
+ static struct radeon_asic cayman_asic = {
+@@ -893,46 +1321,285 @@ static struct radeon_asic cayman_asic = {
+ 	.fini = &cayman_fini,
+ 	.suspend = &cayman_suspend,
+ 	.resume = &cayman_resume,
+-	.cp_commit = &r600_cp_commit,
+ 	.gpu_is_lockup = &cayman_gpu_is_lockup,
+ 	.asic_reset = &cayman_asic_reset,
+ 	.vga_set_state = &r600_vga_set_state,
+-	.gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
+-	.gart_set_page = &rs600_gart_set_page,
+-	.ring_test = &r600_ring_test,
+-	.ring_ib_execute = &evergreen_ring_ib_execute,
+-	.irq_set = &evergreen_irq_set,
+-	.irq_process = &evergreen_irq_process,
+-	.get_vblank_counter = &evergreen_get_vblank_counter,
+-	.fence_ring_emit = &r600_fence_ring_emit,
+-	.cs_parse = &evergreen_cs_parse,
+-	.copy_blit = &r600_copy_blit,
+-	.copy_dma = NULL,
+-	.copy = &r600_copy_blit,
+-	.get_engine_clock = &radeon_atom_get_engine_clock,
+-	.set_engine_clock = &radeon_atom_set_engine_clock,
+-	.get_memory_clock = &radeon_atom_get_memory_clock,
+-	.set_memory_clock = &radeon_atom_set_memory_clock,
+-	.get_pcie_lanes = NULL,
+-	.set_pcie_lanes = NULL,
+-	.set_clock_gating = NULL,
+-	.set_surface_reg = r600_set_surface_reg,
+-	.clear_surface_reg = r600_clear_surface_reg,
+-	.bandwidth_update = &evergreen_bandwidth_update,
+-	.hpd_init = &evergreen_hpd_init,
+-	.hpd_fini = &evergreen_hpd_fini,
+-	.hpd_sense = &evergreen_hpd_sense,
+-	.hpd_set_polarity = &evergreen_hpd_set_polarity,
+ 	.ioctl_wait_idle = r600_ioctl_wait_idle,
+ 	.gui_idle = &r600_gui_idle,
+-	.pm_misc = &evergreen_pm_misc,
+-	.pm_prepare = &evergreen_pm_prepare,
+-	.pm_finish = &evergreen_pm_finish,
+-	.pm_init_profile = &r600_pm_init_profile,
+-	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+-	.pre_page_flip = &evergreen_pre_page_flip,
+-	.page_flip = &evergreen_page_flip,
+-	.post_page_flip = &evergreen_post_page_flip,
++	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &cayman_pcie_gart_tlb_flush,
++		.set_page = &rs600_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &cayman_ring_ib_execute,
++			.ib_parse = &evergreen_ib_parse,
++			.emit_fence = &cayman_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &evergreen_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		},
++		[CAYMAN_RING_TYPE_CP1_INDEX] = {
++			.ib_execute = &cayman_ring_ib_execute,
++			.ib_parse = &evergreen_ib_parse,
++			.emit_fence = &cayman_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &evergreen_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		},
++		[CAYMAN_RING_TYPE_CP2_INDEX] = {
++			.ib_execute = &cayman_ring_ib_execute,
++			.ib_parse = &evergreen_ib_parse,
++			.emit_fence = &cayman_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &evergreen_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		}
++	},
++	.irq = {
++		.set = &evergreen_irq_set,
++		.process = &evergreen_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &evergreen_bandwidth_update,
++		.get_vblank_counter = &evergreen_get_vblank_counter,
++		.wait_for_vblank = &dce4_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r600_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = NULL,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r600_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r600_set_surface_reg,
++		.clear_reg = r600_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &evergreen_hpd_init,
++		.fini = &evergreen_hpd_fini,
++		.sense = &evergreen_hpd_sense,
++		.set_polarity = &evergreen_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &evergreen_pm_misc,
++		.prepare = &evergreen_pm_prepare,
++		.finish = &evergreen_pm_finish,
++		.init_profile = &r600_pm_init_profile,
++		.get_dynpm_state = &r600_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = NULL,
++	},
++	.pflip = {
++		.pre_page_flip = &evergreen_pre_page_flip,
++		.page_flip = &evergreen_page_flip,
++		.post_page_flip = &evergreen_post_page_flip,
++	},
++};
++
++static struct radeon_asic trinity_asic = {
++	.init = &cayman_init,
++	.fini = &cayman_fini,
++	.suspend = &cayman_suspend,
++	.resume = &cayman_resume,
++	.gpu_is_lockup = &cayman_gpu_is_lockup,
++	.asic_reset = &cayman_asic_reset,
++	.vga_set_state = &r600_vga_set_state,
++	.ioctl_wait_idle = r600_ioctl_wait_idle,
++	.gui_idle = &r600_gui_idle,
++	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &cayman_pcie_gart_tlb_flush,
++		.set_page = &rs600_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &cayman_ring_ib_execute,
++			.ib_parse = &evergreen_ib_parse,
++			.emit_fence = &cayman_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &evergreen_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		},
++		[CAYMAN_RING_TYPE_CP1_INDEX] = {
++			.ib_execute = &cayman_ring_ib_execute,
++			.ib_parse = &evergreen_ib_parse,
++			.emit_fence = &cayman_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &evergreen_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		},
++		[CAYMAN_RING_TYPE_CP2_INDEX] = {
++			.ib_execute = &cayman_ring_ib_execute,
++			.ib_parse = &evergreen_ib_parse,
++			.emit_fence = &cayman_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = &evergreen_cs_parse,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		}
++	},
++	.irq = {
++		.set = &evergreen_irq_set,
++		.process = &evergreen_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &dce6_bandwidth_update,
++		.get_vblank_counter = &evergreen_get_vblank_counter,
++		.wait_for_vblank = &dce4_wait_for_vblank,
++	},
++	.copy = {
++		.blit = &r600_copy_blit,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = NULL,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = &r600_copy_blit,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r600_set_surface_reg,
++		.clear_reg = r600_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &evergreen_hpd_init,
++		.fini = &evergreen_hpd_fini,
++		.sense = &evergreen_hpd_sense,
++		.set_polarity = &evergreen_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &evergreen_pm_misc,
++		.prepare = &evergreen_pm_prepare,
++		.finish = &evergreen_pm_finish,
++		.init_profile = &sumo_pm_init_profile,
++		.get_dynpm_state = &r600_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = NULL,
++		.set_memory_clock = NULL,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = NULL,
++	},
++	.pflip = {
++		.pre_page_flip = &evergreen_pre_page_flip,
++		.page_flip = &evergreen_page_flip,
++		.post_page_flip = &evergreen_post_page_flip,
++	},
++};
++
++static const struct radeon_vm_funcs si_vm_funcs = {
++	.init = &si_vm_init,
++	.fini = &si_vm_fini,
++	.bind = &si_vm_bind,
++	.unbind = &si_vm_unbind,
++	.tlb_flush = &si_vm_tlb_flush,
++	.page_flags = &cayman_vm_page_flags,
++	.set_page = &cayman_vm_set_page,
++};
++
++static struct radeon_asic si_asic = {
++	.init = &si_init,
++	.fini = &si_fini,
++	.suspend = &si_suspend,
++	.resume = &si_resume,
++	.gpu_is_lockup = &si_gpu_is_lockup,
++	.asic_reset = &si_asic_reset,
++	.vga_set_state = &r600_vga_set_state,
++	.ioctl_wait_idle = r600_ioctl_wait_idle,
++	.gui_idle = &r600_gui_idle,
++	.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
++	.gart = {
++		.tlb_flush = &si_pcie_gart_tlb_flush,
++		.set_page = &rs600_gart_set_page,
++	},
++	.ring = {
++		[RADEON_RING_TYPE_GFX_INDEX] = {
++			.ib_execute = &si_ring_ib_execute,
++			.ib_parse = &si_ib_parse,
++			.emit_fence = &si_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = NULL,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		},
++		[CAYMAN_RING_TYPE_CP1_INDEX] = {
++			.ib_execute = &si_ring_ib_execute,
++			.ib_parse = &si_ib_parse,
++			.emit_fence = &si_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = NULL,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		},
++		[CAYMAN_RING_TYPE_CP2_INDEX] = {
++			.ib_execute = &si_ring_ib_execute,
++			.ib_parse = &si_ib_parse,
++			.emit_fence = &si_fence_ring_emit,
++			.emit_semaphore = &r600_semaphore_ring_emit,
++			.cs_parse = NULL,
++			.ring_test = &r600_ring_test,
++			.ib_test = &r600_ib_test,
++		}
++	},
++	.irq = {
++		.set = &si_irq_set,
++		.process = &si_irq_process,
++	},
++	.display = {
++		.bandwidth_update = &dce6_bandwidth_update,
++		.get_vblank_counter = &evergreen_get_vblank_counter,
++		.wait_for_vblank = &dce4_wait_for_vblank,
++	},
++	.copy = {
++		.blit = NULL,
++		.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.dma = NULL,
++		.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++		.copy = NULL,
++		.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
++	},
++	.surface = {
++		.set_reg = r600_set_surface_reg,
++		.clear_reg = r600_clear_surface_reg,
++	},
++	.hpd = {
++		.init = &evergreen_hpd_init,
++		.fini = &evergreen_hpd_fini,
++		.sense = &evergreen_hpd_sense,
++		.set_polarity = &evergreen_hpd_set_polarity,
++	},
++	.pm = {
++		.misc = &evergreen_pm_misc,
++		.prepare = &evergreen_pm_prepare,
++		.finish = &evergreen_pm_finish,
++		.init_profile = &sumo_pm_init_profile,
++		.get_dynpm_state = &r600_pm_get_dynpm_state,
++		.get_engine_clock = &radeon_atom_get_engine_clock,
++		.set_engine_clock = &radeon_atom_set_engine_clock,
++		.get_memory_clock = &radeon_atom_get_memory_clock,
++		.set_memory_clock = &radeon_atom_set_memory_clock,
++		.get_pcie_lanes = NULL,
++		.set_pcie_lanes = NULL,
++		.set_clock_gating = NULL,
++	},
++	.pflip = {
++		.pre_page_flip = &evergreen_pre_page_flip,
++		.page_flip = &evergreen_page_flip,
++		.post_page_flip = &evergreen_post_page_flip,
++	},
+ };
+ 
+ int radeon_asic_init(struct radeon_device *rdev)
+@@ -974,10 +1641,10 @@ int radeon_asic_init(struct radeon_device *rdev)
+ 		rdev->asic = &r420_asic;
+ 		/* handle macs */
+ 		if (rdev->bios == NULL) {
+-			rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
+-			rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
+-			rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
+-			rdev->asic->set_memory_clock = NULL;
++			rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
++			rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
++			rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
++			rdev->asic->pm.set_memory_clock = NULL;
+ 		}
+ 		break;
+ 	case CHIP_RS400:
+@@ -1050,6 +1717,21 @@ int radeon_asic_init(struct radeon_device *rdev)
+ 		rdev->asic = &cayman_asic;
+ 		/* set num crtcs */
+ 		rdev->num_crtc = 6;
++		rdev->vm_manager.funcs = &cayman_vm_funcs;
++		break;
++	case CHIP_ARUBA:
++		rdev->asic = &trinity_asic;
++		/* set num crtcs */
++		rdev->num_crtc = 4;
++		rdev->vm_manager.funcs = &cayman_vm_funcs;
++		break;
++	case CHIP_TAHITI:
++	case CHIP_PITCAIRN:
++	case CHIP_VERDE:
++		rdev->asic = &si_asic;
++		/* set num crtcs */
++		rdev->num_crtc = 6;
++		rdev->vm_manager.funcs = &si_vm_funcs;
+ 		break;
+ 	default:
+ 		/* FIXME: not supported yet */
+@@ -1057,8 +1739,8 @@ int radeon_asic_init(struct radeon_device *rdev)
+ 	}
+ 
+ 	if (rdev->flags & RADEON_IS_IGP) {
+-		rdev->asic->get_memory_clock = NULL;
+-		rdev->asic->set_memory_clock = NULL;
++		rdev->asic->pm.get_memory_clock = NULL;
++		rdev->asic->pm.set_memory_clock = NULL;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index 5aa6670..917e49c 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -58,17 +58,20 @@ void r100_fini(struct radeon_device *rdev);
+ int r100_suspend(struct radeon_device *rdev);
+ int r100_resume(struct radeon_device *rdev);
+ void r100_vga_set_state(struct radeon_device *rdev, bool state);
+-bool r100_gpu_is_lockup(struct radeon_device *rdev);
++bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+ int r100_asic_reset(struct radeon_device *rdev);
+ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
+ void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+-void r100_cp_commit(struct radeon_device *rdev);
+-void r100_ring_start(struct radeon_device *rdev);
++void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+ int r100_irq_set(struct radeon_device *rdev);
+ int r100_irq_process(struct radeon_device *rdev);
+ void r100_fence_ring_emit(struct radeon_device *rdev,
+ 			  struct radeon_fence *fence);
++void r100_semaphore_ring_emit(struct radeon_device *rdev,
++			      struct radeon_ring *cp,
++			      struct radeon_semaphore *semaphore,
++			      bool emit_wait);
+ int r100_cs_parse(struct radeon_cs_parser *p);
+ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
+@@ -83,7 +86,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
+ void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
+ void r100_bandwidth_update(struct radeon_device *rdev);
+ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+-int r100_ring_test(struct radeon_device *rdev);
++int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+ void r100_hpd_init(struct radeon_device *rdev);
+ void r100_hpd_fini(struct radeon_device *rdev);
+ bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+@@ -101,12 +104,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev);
+ int r100_debugfs_mc_info_init(struct radeon_device *rdev);
+ int r100_gui_wait_for_idle(struct radeon_device *rdev);
+ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
+-			    struct radeon_cp *cp);
++			    struct radeon_ring *cp);
+ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
+ 			   struct r100_gpu_lockup *lockup,
+-			   struct radeon_cp *cp);
++			   struct radeon_ring *cp);
+ void r100_ib_fini(struct radeon_device *rdev);
+-int r100_ib_init(struct radeon_device *rdev);
++int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+ void r100_irq_disable(struct radeon_device *rdev);
+ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
+ void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
+@@ -136,6 +139,8 @@ extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
+ extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
+ extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
++extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
++extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
+ 
+ /*
+  * r200,rv250,rs300,rv280
+@@ -154,9 +159,9 @@ extern int r300_init(struct radeon_device *rdev);
+ extern void r300_fini(struct radeon_device *rdev);
+ extern int r300_suspend(struct radeon_device *rdev);
+ extern int r300_resume(struct radeon_device *rdev);
+-extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
++extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+ extern int r300_asic_reset(struct radeon_device *rdev);
+-extern void r300_ring_start(struct radeon_device *rdev);
++extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+ extern void r300_fence_ring_emit(struct radeon_device *rdev,
+ 				struct radeon_fence *fence);
+ extern int r300_cs_parse(struct radeon_cs_parser *p);
+@@ -173,6 +178,7 @@ extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+ extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+ extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
+ extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
++extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+ 
+ /*
+  * r420,r423,rv410
+@@ -203,6 +209,7 @@ int rs400_gart_enable(struct radeon_device *rdev);
+ void rs400_gart_adjust_size(struct radeon_device *rdev);
+ void rs400_gart_disable(struct radeon_device *rdev);
+ void rs400_gart_fini(struct radeon_device *rdev);
++extern int rs400_mc_wait_for_idle(struct radeon_device *rdev);
+ 
+ /*
+  * rs600.
+@@ -233,7 +240,8 @@ extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
+ extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+ void rs600_set_safe_registers(struct radeon_device *rdev);
+-
++extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
++extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+ 
+ /*
+  * rs690,rs740
+@@ -248,6 +256,7 @@ void rs690_bandwidth_update(struct radeon_device *rdev);
+ void rs690_line_buffer_adjust(struct radeon_device *rdev,
+ 					struct drm_display_mode *mode1,
+ 					struct drm_display_mode *mode2);
++extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
+ 
+ /*
+  * rv515
+@@ -261,7 +270,7 @@ int rv515_init(struct radeon_device *rdev);
+ void rv515_fini(struct radeon_device *rdev);
+ uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+-void rv515_ring_start(struct radeon_device *rdev);
++void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
+ void rv515_bandwidth_update(struct radeon_device *rdev);
+ int rv515_resume(struct radeon_device *rdev);
+ int rv515_suspend(struct radeon_device *rdev);
+@@ -272,13 +281,14 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+ void rv515_clock_startup(struct radeon_device *rdev);
+ void rv515_debugfs(struct radeon_device *rdev);
+-
++int rv515_mc_wait_for_idle(struct radeon_device *rdev);
+ 
+ /*
+  * r520,rv530,rv560,rv570,r580
+  */
+ int r520_init(struct radeon_device *rdev);
+ int r520_resume(struct radeon_device *rdev);
++int r520_mc_wait_for_idle(struct radeon_device *rdev);
+ 
+ /*
+  * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
+@@ -290,22 +300,25 @@ int r600_resume(struct radeon_device *rdev);
+ void r600_vga_set_state(struct radeon_device *rdev, bool state);
+ int r600_wb_init(struct radeon_device *rdev);
+ void r600_wb_fini(struct radeon_device *rdev);
+-void r600_cp_commit(struct radeon_device *rdev);
+ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
+ uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
+ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+ int r600_cs_parse(struct radeon_cs_parser *p);
+ void r600_fence_ring_emit(struct radeon_device *rdev,
+ 			  struct radeon_fence *fence);
+-bool r600_gpu_is_lockup(struct radeon_device *rdev);
++void r600_semaphore_ring_emit(struct radeon_device *rdev,
++			      struct radeon_ring *cp,
++			      struct radeon_semaphore *semaphore,
++			      bool emit_wait);
++bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+ int r600_asic_reset(struct radeon_device *rdev);
+ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
+ 			 uint32_t tiling_flags, uint32_t pitch,
+ 			 uint32_t offset, uint32_t obj_size);
+ void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+-int r600_ib_test(struct radeon_device *rdev);
++int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+-int r600_ring_test(struct radeon_device *rdev);
++int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+ int r600_copy_blit(struct radeon_device *rdev,
+ 		   uint64_t src_offset, uint64_t dst_offset,
+ 		   unsigned num_gpu_pages, struct radeon_fence *fence);
+@@ -325,7 +338,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev);
+ bool r600_card_posted(struct radeon_device *rdev);
+ void r600_cp_stop(struct radeon_device *rdev);
+ int r600_cp_start(struct radeon_device *rdev);
+-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
++void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
+ int r600_cp_resume(struct radeon_device *rdev);
+ void r600_cp_fini(struct radeon_device *rdev);
+ int r600_count_pipe_bits(uint32_t val);
+@@ -366,6 +379,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
+ void r600_kms_blit_copy(struct radeon_device *rdev,
+ 			u64 src_gpu_addr, u64 dst_gpu_addr,
+ 			unsigned num_gpu_pages);
++int r600_mc_wait_for_idle(struct radeon_device *rdev);
+ 
+ /*
+  * rv770,rv730,rv710,rv740
+@@ -394,7 +408,7 @@ int evergreen_init(struct radeon_device *rdev);
+ void evergreen_fini(struct radeon_device *rdev);
+ int evergreen_suspend(struct radeon_device *rdev);
+ int evergreen_resume(struct radeon_device *rdev);
+-bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
++bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+ int evergreen_asic_reset(struct radeon_device *rdev);
+ void evergreen_bandwidth_update(struct radeon_device *rdev);
+ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+@@ -414,18 +428,59 @@ extern void sumo_pm_init_profile(struct radeon_device *rdev);
+ extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+ extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
++extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
+ void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+ int evergreen_blit_init(struct radeon_device *rdev);
++int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+ 
+ /*
+  * cayman
+  */
++void cayman_fence_ring_emit(struct radeon_device *rdev,
++			    struct radeon_fence *fence);
+ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
+ int cayman_init(struct radeon_device *rdev);
+ void cayman_fini(struct radeon_device *rdev);
+ int cayman_suspend(struct radeon_device *rdev);
+ int cayman_resume(struct radeon_device *rdev);
+-bool cayman_gpu_is_lockup(struct radeon_device *rdev);
++bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+ int cayman_asic_reset(struct radeon_device *rdev);
++void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
++int cayman_vm_init(struct radeon_device *rdev);
++void cayman_vm_fini(struct radeon_device *rdev);
++int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
++void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
++void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
++uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
++			      struct radeon_vm *vm,
++			      uint32_t flags);
++void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
++			unsigned pfn, uint64_t addr, uint32_t flags);
++int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
++
++/* DCE6 - SI */
++void dce6_bandwidth_update(struct radeon_device *rdev);
++
++/*
++ * si
++ */
++void si_fence_ring_emit(struct radeon_device *rdev,
++			struct radeon_fence *fence);
++void si_pcie_gart_tlb_flush(struct radeon_device *rdev);
++int si_init(struct radeon_device *rdev);
++void si_fini(struct radeon_device *rdev);
++int si_suspend(struct radeon_device *rdev);
++int si_resume(struct radeon_device *rdev);
++bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
++int si_asic_reset(struct radeon_device *rdev);
++void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
++int si_irq_set(struct radeon_device *rdev);
++int si_irq_process(struct radeon_device *rdev);
++int si_vm_init(struct radeon_device *rdev);
++void si_vm_fini(struct radeon_device *rdev);
++int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
++void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
++void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
++int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 38585c5..5e30e12 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -56,6 +56,10 @@ extern void
+ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
+ 			  uint32_t supported_device);
+ 
++/* local */
++static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
++				    u16 voltage_id, u16 *voltage);
++
+ union atom_supported_devices {
+ 	struct _ATOM_SUPPORTED_DEVICES_INFO info;
+ 	struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
+@@ -253,7 +257,9 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
+ 
+ 	memset(&hpd, 0, sizeof(struct radeon_hpd));
+ 
+-	if (ASIC_IS_DCE4(rdev))
++	if (ASIC_IS_DCE6(rdev))
++		reg = SI_DC_GPIO_HPD_A;
++	else if (ASIC_IS_DCE4(rdev))
+ 		reg = EVERGREEN_DC_GPIO_HPD_A;
+ 	else
+ 		reg = AVIVO_DC_GPIO_HPD_A;
+@@ -1890,6 +1896,8 @@ static const char *pp_lib_thermal_controller_names[] = {
+ 	"emc2103",
+ 	"Sumo",
+ 	"Northern Islands",
++	"Southern Islands",
++	"lm96163",
+ };
+ 
+ union power_info {
+@@ -1906,6 +1914,7 @@ union pplib_clock_info {
+ 	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ 	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ 	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
++	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+ };
+ 
+ union pplib_power_state {
+@@ -2163,6 +2172,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
+ 				 (controller->ucFanParameters &
+ 				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ 			rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
++		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
++			DRM_INFO("Internal thermal controller %s fan control\n",
++				 (controller->ucFanParameters &
++				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
++			rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
+ 		} else if ((controller->ucType ==
+ 			    ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ 			   (controller->ucType ==
+@@ -2283,6 +2297,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+ 						   union pplib_clock_info *clock_info)
+ {
+ 	u32 sclk, mclk;
++	u16 vddc;
+ 
+ 	if (rdev->flags & RADEON_IS_IGP) {
+ 		if (rdev->family >= CHIP_PALM) {
+@@ -2294,6 +2309,19 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+ 			sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+ 			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ 		}
++	} else if (ASIC_IS_DCE6(rdev)) {
++		sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
++		sclk |= clock_info->si.ucEngineClockHigh << 16;
++		mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
++		mclk |= clock_info->si.ucMemoryClockHigh << 16;
++		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
++		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
++		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
++			VOLTAGE_SW;
++		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
++			le16_to_cpu(clock_info->si.usVDDC);
++		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
++			le16_to_cpu(clock_info->si.usVDDCI);
+ 	} else if (ASIC_IS_DCE4(rdev)) {
+ 		sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+ 		sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+@@ -2321,11 +2349,18 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+ 	}
+ 
+ 	/* patch up vddc if necessary */
+-	if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
+-		u16 vddc;
+-
+-		if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
++	switch (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage) {
++	case ATOM_VIRTUAL_VOLTAGE_ID0:
++	case ATOM_VIRTUAL_VOLTAGE_ID1:
++	case ATOM_VIRTUAL_VOLTAGE_ID2:
++	case ATOM_VIRTUAL_VOLTAGE_ID3:
++		if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC,
++					     rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage,
++					     &vddc) == 0)
+ 			rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
++		break;
++	default:
++		break;
+ 	}
+ 
+ 	if (rdev->flags & RADEON_IS_IGP) {
+@@ -2435,9 +2470,9 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ 	int i, j, non_clock_array_index, clock_array_index;
+ 	int state_index = 0, mode_index = 0;
+ 	union pplib_clock_info *clock_info;
+-	struct StateArray *state_array;
+-	struct ClockInfoArray *clock_info_array;
+-	struct NonClockInfoArray *non_clock_info_array;
++	struct _StateArray *state_array;
++	struct _ClockInfoArray *clock_info_array;
++	struct _NonClockInfoArray *non_clock_info_array;
+ 	bool valid;
+ 	union power_info *power_info;
+ 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+@@ -2450,13 +2485,13 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+ 
+ 	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+-	state_array = (struct StateArray *)
++	state_array = (struct _StateArray *)
+ 		(mode_info->atom_context->bios + data_offset +
+ 		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
+-	clock_info_array = (struct ClockInfoArray *)
++	clock_info_array = (struct _ClockInfoArray *)
+ 		(mode_info->atom_context->bios + data_offset +
+ 		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+-	non_clock_info_array = (struct NonClockInfoArray *)
++	non_clock_info_array = (struct _NonClockInfoArray *)
+ 		(mode_info->atom_context->bios + data_offset +
+ 		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+ 	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+@@ -2483,7 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+ 				if (clock_array_index >= clock_info_array->ucNumEntries)
+ 					continue;
+ 				clock_info = (union pplib_clock_info *)
+-					&clock_info_array->clockInfo[clock_array_index];
++					&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ 				valid = radeon_atombios_parse_pplib_clock_info(rdev,
+ 									       state_index, mode_index,
+ 									       clock_info);
+@@ -2640,6 +2675,7 @@ union set_voltage {
+ 	struct _SET_VOLTAGE_PS_ALLOCATION alloc;
+ 	struct _SET_VOLTAGE_PARAMETERS v1;
+ 	struct _SET_VOLTAGE_PARAMETERS_V2 v2;
++	struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
+ };
+ 
+ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
+@@ -2666,6 +2702,11 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
+ 		args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
+ 		args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
+ 		break;
++	case 3:
++		args.v3.ucVoltageType = voltage_type;
++		args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
++		args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
++		break;
+ 	default:
+ 		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ 		return;
+@@ -2674,8 +2715,8 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
+ 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ 
+-int radeon_atom_get_max_vddc(struct radeon_device *rdev,
+-			     u16 *voltage)
++static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
++				    u16 voltage_id, u16 *voltage)
+ {
+ 	union set_voltage args;
+ 	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+@@ -2696,6 +2737,15 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev,
+ 
+ 		*voltage = le16_to_cpu(args.v2.usVoltageLevel);
+ 		break;
++	case 3:
++		args.v3.ucVoltageType = voltage_type;
++		args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
++		args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
++
++		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
++
++		*voltage = le16_to_cpu(args.v3.usVoltageLevel);
++		break;
+ 	default:
+ 		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ 		return -EINVAL;
+@@ -2947,6 +2997,20 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+ 			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
+ 		}
+ 	}
++	if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
++	    (radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
++		if (connected) {
++			DRM_DEBUG_KMS("DFP6 connected\n");
++			bios_0_scratch |= ATOM_S0_DFP6;
++			bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
++			bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
++		} else {
++			DRM_DEBUG_KMS("DFP6 disconnected\n");
++			bios_0_scratch &= ~ATOM_S0_DFP6;
++			bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
++			bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
++		}
++	}
+ 
+ 	if (rdev->family >= CHIP_R600) {
+ 		WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
+@@ -2967,6 +3031,9 @@ radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	uint32_t bios_3_scratch;
+ 
++	if (ASIC_IS_DCE4(rdev))
++		return;
++
+ 	if (rdev->family >= CHIP_R600)
+ 		bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
+ 	else
+@@ -3019,6 +3086,9 @@ radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	uint32_t bios_2_scratch;
+ 
++	if (ASIC_IS_DCE4(rdev))
++		return;
++
+ 	if (rdev->family >= CHIP_R600)
+ 		bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
+ 	else
+diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
+index 17e1a9b..fef7b72 100644
+--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
++++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
+@@ -43,17 +43,19 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
+ 
+ 	start_jiffies = jiffies;
+ 	for (i = 0; i < n; i++) {
+-		r = radeon_fence_create(rdev, &fence);
+-		if (r)
+-			return r;
+-
+ 		switch (flag) {
+ 		case RADEON_BENCHMARK_COPY_DMA:
++			r = radeon_fence_create(rdev, &fence, radeon_copy_dma_ring_index(rdev));
++			if (r)
++				return r;
+ 			r = radeon_copy_dma(rdev, saddr, daddr,
+ 					    size / RADEON_GPU_PAGE_SIZE,
+ 					    fence);
+ 			break;
+ 		case RADEON_BENCHMARK_COPY_BLIT:
++			r = radeon_fence_create(rdev, &fence, radeon_copy_blit_ring_index(rdev));
++			if (r)
++				return r;
+ 			r = radeon_copy_blit(rdev, saddr, daddr,
+ 					     size / RADEON_GPU_PAGE_SIZE,
+ 					     fence);
+@@ -129,7 +131,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
+ 	/* r100 doesn't have dma engine so skip the test */
+ 	/* also, VRAM-to-VRAM test doesn't make much sense for DMA */
+ 	/* skip it as well if domains are the same */
+-	if ((rdev->asic->copy_dma) && (sdomain != ddomain)) {
++	if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
+ 		time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+ 						RADEON_BENCHMARK_COPY_DMA, n);
+ 		if (time < 0)
+@@ -208,42 +210,42 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number)
+ 		break;
+ 	case 3:
+ 		/* GTT to VRAM, buffer size sweep, powers of 2 */
+-		for (i = 1; i <= 65536; i <<= 1)
+-			radeon_benchmark_move(rdev, i*1024,
++		for (i = 1; i <= 16384; i <<= 1)
++			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+ 					      RADEON_GEM_DOMAIN_GTT,
+ 					      RADEON_GEM_DOMAIN_VRAM);
+ 		break;
+ 	case 4:
+ 		/* VRAM to GTT, buffer size sweep, powers of 2 */
+-		for (i = 1; i <= 65536; i <<= 1)
+-			radeon_benchmark_move(rdev, i*1024,
++		for (i = 1; i <= 16384; i <<= 1)
++			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+ 					      RADEON_GEM_DOMAIN_VRAM,
+ 					      RADEON_GEM_DOMAIN_GTT);
+ 		break;
+ 	case 5:
+ 		/* VRAM to VRAM, buffer size sweep, powers of 2 */
+-		for (i = 1; i <= 65536; i <<= 1)
+-			radeon_benchmark_move(rdev, i*1024,
++		for (i = 1; i <= 16384; i <<= 1)
++			radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
+ 					      RADEON_GEM_DOMAIN_VRAM,
+ 					      RADEON_GEM_DOMAIN_VRAM);
+ 		break;
+ 	case 6:
+ 		/* GTT to VRAM, buffer size sweep, common modes */
+-		for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
++		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ 			radeon_benchmark_move(rdev, common_modes[i],
+ 					      RADEON_GEM_DOMAIN_GTT,
+ 					      RADEON_GEM_DOMAIN_VRAM);
+ 		break;
+ 	case 7:
+ 		/* VRAM to GTT, buffer size sweep, common modes */
+-		for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
++		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ 			radeon_benchmark_move(rdev, common_modes[i],
+ 					      RADEON_GEM_DOMAIN_VRAM,
+ 					      RADEON_GEM_DOMAIN_GTT);
+ 		break;
+ 	case 8:
+ 		/* VRAM to VRAM, buffer size sweep, common modes */
+-		for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
++		for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ 			radeon_benchmark_move(rdev, common_modes[i],
+ 					      RADEON_GEM_DOMAIN_VRAM,
+ 					      RADEON_GEM_DOMAIN_VRAM);
+diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h
+new file mode 100644
+index 0000000..4ecbe72
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/radeon_blit_common.h
+@@ -0,0 +1,44 @@
++/*
++ * Copyright 2009 Advanced Micro Devices, Inc.
++ * Copyright 2009 Red Hat Inc.
++ * Copyright 2012 Alcatel-Lucent, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __RADEON_BLIT_COMMON_H__
++
++#define DI_PT_RECTLIST        0x11
++#define DI_INDEX_SIZE_16_BIT  0x0
++#define DI_SRC_SEL_AUTO_INDEX 0x2
++
++#define FMT_8                 0x1
++#define FMT_5_6_5             0x8
++#define FMT_8_8_8_8           0x1a
++#define COLOR_8               0x1
++#define COLOR_5_6_5           0x8
++#define COLOR_8_8_8_8         0x1a
++
++#define RECT_UNIT_H           32
++#define RECT_UNIT_W           (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
++
++#define __RADEON_BLIT_COMMON_H__
++#endif
+diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
+index b6e18c8..9c6b29a 100644
+--- a/drivers/gpu/drm/radeon/radeon_clocks.c
++++ b/drivers/gpu/drm/radeon/radeon_clocks.c
+@@ -334,7 +334,7 @@ void radeon_get_clock_info(struct drm_device *dev)
+ 
+ 	if (!rdev->clock.default_sclk)
+ 		rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
+-	if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
++	if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock)
+ 		rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
+ 
+ 	rdev->pm.current_sclk = rdev->clock.default_sclk;
+@@ -633,7 +633,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+ 				tmp &= ~(R300_SCLK_FORCE_VAP);
+ 				tmp |= RADEON_SCLK_FORCE_CP;
+ 				WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+-				udelay(15000);
++				mdelay(15);
+ 
+ 				tmp = RREG32_PLL(R300_SCLK_CNTL2);
+ 				tmp &= ~(R300_SCLK_FORCE_TCL |
+@@ -651,12 +651,12 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+ 			tmp |= (RADEON_ENGIN_DYNCLK_MODE |
+ 				(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
+ 			WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
+-			udelay(15000);
++			mdelay(15);
+ 
+ 			tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
+ 			tmp |= RADEON_SCLK_DYN_START_CNTL;
+ 			WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
+-			udelay(15000);
++			mdelay(15);
+ 
+ 			/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
+ 			   to lockup randomly, leave them as set by BIOS.
+@@ -696,7 +696,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+ 					tmp |= RADEON_SCLK_MORE_FORCEON;
+ 				}
+ 				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+-				udelay(15000);
++				mdelay(15);
+ 			}
+ 
+ 			/* RV200::A11 A12, RV250::A11 A12 */
+@@ -709,7 +709,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+ 				tmp |= RADEON_TCL_BYPASS_DISABLE;
+ 				WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+ 			}
+-			udelay(15000);
++			mdelay(15);
+ 
+ 			/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
+ 			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+@@ -722,14 +722,14 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+ 				RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+ 
+ 			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+-			udelay(15000);
++			mdelay(15);
+ 
+ 			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ 			tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
+ 				RADEON_PIXCLK_DAC_ALWAYS_ONb);
+ 
+ 			WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
+-			udelay(15000);
++			mdelay(15);
+ 		}
+ 	} else {
+ 		/* Turn everything OFF (ForceON to everything) */
+@@ -861,7 +861,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+ 			}
+ 			WREG32_PLL(RADEON_SCLK_CNTL, tmp);
+ 
+-			udelay(16000);
++			mdelay(16);
+ 
+ 			if ((rdev->family == CHIP_R300) ||
+ 			    (rdev->family == CHIP_R350)) {
+@@ -870,7 +870,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+ 					R300_SCLK_FORCE_GA |
+ 					R300_SCLK_FORCE_CBA);
+ 				WREG32_PLL(R300_SCLK_CNTL2, tmp);
+-				udelay(16000);
++				mdelay(16);
+ 			}
+ 
+ 			if (rdev->flags & RADEON_IS_IGP) {
+@@ -878,7 +878,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+ 				tmp &= ~(RADEON_FORCEON_MCLKA |
+ 					 RADEON_FORCEON_YCLKA);
+ 				WREG32_PLL(RADEON_MCLK_CNTL, tmp);
+-				udelay(16000);
++				mdelay(16);
+ 			}
+ 
+ 			if ((rdev->family == CHIP_RV200) ||
+@@ -887,7 +887,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+ 				tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
+ 				tmp |= RADEON_SCLK_MORE_FORCEON;
+ 				WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
+-				udelay(16000);
++				mdelay(16);
+ 			}
+ 
+ 			tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
+@@ -900,7 +900,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
+ 				 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
+ 
+ 			WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
+-			udelay(16000);
++			mdelay(16);
+ 
+ 			tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ 			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 1b98338..2cad9fd 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -1536,9 +1536,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 			   of_machine_is_compatible("PowerBook6,7")) {
+ 			/* ibook */
+ 			rdev->mode_info.connector_table = CT_IBOOK;
+-		} else if (of_machine_is_compatible("PowerMac3,5")) {
+-			/* PowerMac G4 Silver radeon 7500 */
+-			rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
+ 		} else if (of_machine_is_compatible("PowerMac4,4")) {
+ 			/* emac */
+ 			rdev->mode_info.connector_table = CT_EMAC;
+@@ -1564,11 +1561,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 			   (rdev->pdev->subsystem_device == 0x4150)) {
+ 			/* Mac G5 tower 9600 */
+ 			rdev->mode_info.connector_table = CT_MAC_G5_9600;
+-		} else if ((rdev->pdev->device == 0x4c66) &&
+-			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+-			   (rdev->pdev->subsystem_device == 0x4c66)) {
+-			/* SAM440ep RV250 embedded board */
+-			rdev->mode_info.connector_table = CT_SAM440EP;
+ 		} else
+ #endif /* CONFIG_PPC_PMAC */
+ #ifdef CONFIG_PPC64
+@@ -2142,115 +2134,6 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					    CONNECTOR_OBJECT_ID_SVIDEO,
+ 					    &hpd);
+ 		break;
+-	case CT_SAM440EP:
+-		DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
+-			 rdev->mode_info.connector_table);
+-		/* LVDS */
+-		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
+-		hpd.hpd = RADEON_HPD_NONE;
+-		radeon_add_legacy_encoder(dev,
+-					  radeon_get_encoder_enum(dev,
+-								ATOM_DEVICE_LCD1_SUPPORT,
+-								0),
+-					  ATOM_DEVICE_LCD1_SUPPORT);
+-		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+-					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_LVDS,
+-					    &hpd);
+-		/* DVI-I - secondary dac, int tmds */
+-		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+-		hpd.hpd = RADEON_HPD_1; /* ??? */
+-		radeon_add_legacy_encoder(dev,
+-					  radeon_get_encoder_enum(dev,
+-								ATOM_DEVICE_DFP1_SUPPORT,
+-								0),
+-					  ATOM_DEVICE_DFP1_SUPPORT);
+-		radeon_add_legacy_encoder(dev,
+-					  radeon_get_encoder_enum(dev,
+-								ATOM_DEVICE_CRT2_SUPPORT,
+-								2),
+-					  ATOM_DEVICE_CRT2_SUPPORT);
+-		radeon_add_legacy_connector(dev, 1,
+-					    ATOM_DEVICE_DFP1_SUPPORT |
+-					    ATOM_DEVICE_CRT2_SUPPORT,
+-					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+-					    &hpd);
+-		/* VGA - primary dac */
+-		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+-		hpd.hpd = RADEON_HPD_NONE;
+-		radeon_add_legacy_encoder(dev,
+-					  radeon_get_encoder_enum(dev,
+-								ATOM_DEVICE_CRT1_SUPPORT,
+-								1),
+-					  ATOM_DEVICE_CRT1_SUPPORT);
+-		radeon_add_legacy_connector(dev, 2,
+-					    ATOM_DEVICE_CRT1_SUPPORT,
+-					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_VGA,
+-					    &hpd);
+-		/* TV - TV DAC */
+-		ddc_i2c.valid = false;
+-		hpd.hpd = RADEON_HPD_NONE;
+-		radeon_add_legacy_encoder(dev,
+-					  radeon_get_encoder_enum(dev,
+-								ATOM_DEVICE_TV1_SUPPORT,
+-								2),
+-					  ATOM_DEVICE_TV1_SUPPORT);
+-		radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
+-					    DRM_MODE_CONNECTOR_SVIDEO,
+-					    &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SVIDEO,
+-					    &hpd);
+-		break;
+-	case CT_MAC_G4_SILVER:
+-		DRM_INFO("Connector Table: %d (mac g4 silver)\n",
+-			 rdev->mode_info.connector_table);
+-		/* DVI-I - tv dac, int tmds */
+-		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+-		hpd.hpd = RADEON_HPD_1; /* ??? */
+-		radeon_add_legacy_encoder(dev,
+-					  radeon_get_encoder_enum(dev,
+-								ATOM_DEVICE_DFP1_SUPPORT,
+-								0),
+-					  ATOM_DEVICE_DFP1_SUPPORT);
+-		radeon_add_legacy_encoder(dev,
+-					  radeon_get_encoder_enum(dev,
+-								ATOM_DEVICE_CRT2_SUPPORT,
+-								2),
+-					  ATOM_DEVICE_CRT2_SUPPORT);
+-		radeon_add_legacy_connector(dev, 0,
+-					    ATOM_DEVICE_DFP1_SUPPORT |
+-					    ATOM_DEVICE_CRT2_SUPPORT,
+-					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+-					    &hpd);
+-		/* VGA - primary dac */
+-		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+-		hpd.hpd = RADEON_HPD_NONE;
+-		radeon_add_legacy_encoder(dev,
+-					  radeon_get_encoder_enum(dev,
+-								ATOM_DEVICE_CRT1_SUPPORT,
+-								1),
+-					  ATOM_DEVICE_CRT1_SUPPORT);
+-		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+-					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_VGA,
+-					    &hpd);
+-		/* TV - TV DAC */
+-		ddc_i2c.valid = false;
+-		hpd.hpd = RADEON_HPD_NONE;
+-		radeon_add_legacy_encoder(dev,
+-					  radeon_get_encoder_enum(dev,
+-								ATOM_DEVICE_TV1_SUPPORT,
+-								2),
+-					  ATOM_DEVICE_TV1_SUPPORT);
+-		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+-					    DRM_MODE_CONNECTOR_SVIDEO,
+-					    &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SVIDEO,
+-					    &hpd);
+-		break;
+ 	default:
+ 		DRM_INFO("Connector table: %d (invalid)\n",
+ 			 rdev->mode_info.connector_table);
+@@ -2962,7 +2845,7 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
+ 					case 4:
+ 						val = RBIOS16(index);
+ 						index += 2;
+-						udelay(val * 1000);
++						mdelay(val);
+ 						break;
+ 					case 6:
+ 						slave_addr = id & 0xff;
+@@ -3161,7 +3044,7 @@ static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
+ 					udelay(150);
+ 					break;
+ 				case 2:
+-					udelay(1000);
++					mdelay(1);
+ 					break;
+ 				case 3:
+ 					while (tmp--) {
+@@ -3192,13 +3075,13 @@ static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
+ 						/*mclk_cntl |= 0x00001111;*//* ??? */
+ 						WREG32_PLL(RADEON_MCLK_CNTL,
+ 							   mclk_cntl);
+-						udelay(10000);
++						mdelay(10);
+ #endif
+ 						WREG32_PLL
+ 						    (RADEON_CLK_PWRMGT_CNTL,
+ 						     tmp &
+ 						     ~RADEON_CG_NO1_DEBUG_0);
+-						udelay(10000);
++						mdelay(10);
+ 					}
+ 					break;
+ 				default:
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 6fd53b6..ab63bcd 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -846,6 +846,27 @@ static int radeon_dvi_get_modes(struct drm_connector *connector)
+ 	return ret;
+ }
+ 
++static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++	enum drm_connector_status status;
++
++	/* We only trust HPD on R600 and newer ASICS. */
++	if (rdev->family >= CHIP_R600
++	  && radeon_connector->hpd.hpd != RADEON_HPD_NONE) {
++		if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
++			status = connector_status_connected;
++		else
++			status = connector_status_disconnected;
++		if (connector->status == status)
++			return true;
++	}
++
++	return false;
++}
++
+ /*
+  * DVI is complicated
+  * Do a DDC probe, if DDC probe passes, get the full EDID so
+@@ -870,6 +891,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
+ 	enum drm_connector_status ret = connector_status_disconnected;
+ 	bool dret = false;
+ 
++	if (!force && radeon_check_hpd_status_unchanged(connector))
++		return connector->status;
++
+ 	if (radeon_connector->ddc_bus)
+ 		dret = radeon_ddc_probe(radeon_connector, false);
+ 	if (dret) {
+@@ -1080,7 +1104,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
+ 		    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
+ 			return MODE_OK;
+ 		else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
+-			if (ASIC_IS_DCE3(rdev)) {
++			if (ASIC_IS_DCE6(rdev)) {
+ 				/* HDMI 1.3+ supports max clock of 340 Mhz */
+ 				if (mode->clock > 340000)
+ 					return MODE_CLOCK_HIGH;
+@@ -1140,13 +1164,23 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
+ 	    (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
+ 		struct drm_display_mode *mode;
+ 
+-		if (!radeon_dig_connector->edp_on)
+-			atombios_set_edp_panel_power(connector,
+-						     ATOM_TRANSMITTER_ACTION_POWER_ON);
+-		ret = radeon_ddc_get_modes(radeon_connector);
+-		if (!radeon_dig_connector->edp_on)
+-			atombios_set_edp_panel_power(connector,
+-						     ATOM_TRANSMITTER_ACTION_POWER_OFF);
++		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
++			if (!radeon_dig_connector->edp_on)
++				atombios_set_edp_panel_power(connector,
++							     ATOM_TRANSMITTER_ACTION_POWER_ON);
++			ret = radeon_ddc_get_modes(radeon_connector);
++			if (!radeon_dig_connector->edp_on)
++				atombios_set_edp_panel_power(connector,
++							     ATOM_TRANSMITTER_ACTION_POWER_OFF);
++		} else {
++			/* need to setup ddc on the bridge */
++			if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
++			    ENCODER_OBJECT_ID_NONE) {
++				if (encoder)
++					radeon_atom_ext_encoder_setup_ddc(encoder);
++			}
++			ret = radeon_ddc_get_modes(radeon_connector);
++		}
+ 
+ 		if (ret > 0) {
+ 			if (encoder) {
+@@ -1157,7 +1191,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
+ 			return ret;
+ 		}
+ 
+-		encoder = radeon_best_single_encoder(connector);
+ 		if (!encoder)
+ 			return 0;
+ 
+@@ -1264,6 +1297,9 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+ 	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+ 	struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ 
++	if (!force && radeon_check_hpd_status_unchanged(connector))
++		return connector->status;
++
+ 	if (radeon_connector->edid) {
+ 		kfree(radeon_connector->edid);
+ 		radeon_connector->edid = NULL;
+diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
+index 72ae826..0ebb7d4 100644
+--- a/drivers/gpu/drm/radeon/radeon_cp.c
++++ b/drivers/gpu/drm/radeon/radeon_cp.c
+@@ -2115,6 +2115,8 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
+ 		break;
+ 	}
+ 
++	pci_set_master(dev->pdev);
++
+ 	if (drm_pci_device_is_agp(dev))
+ 		dev_priv->flags |= RADEON_IS_AGP;
+ 	else if (pci_is_pcie(dev->pdev))
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 29afd71..cf723c4 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -58,7 +58,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+ 
+ 		duplicate = false;
+ 		r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
+-		for (j = 0; j < p->nrelocs; j++) {
++		for (j = 0; j < i; j++) {
+ 			if (r->handle == p->relocs[j].handle) {
+ 				p->relocs_ptr[i] = &p->relocs[j];
+ 				duplicate = true;
+@@ -84,16 +84,88 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+ 			p->relocs[i].flags = r->flags;
+ 			radeon_bo_list_add_object(&p->relocs[i].lobj,
+ 						  &p->validated);
+-		}
++
++		} else
++			p->relocs[i].handle = 0;
+ 	}
+ 	return radeon_bo_list_validate(&p->validated);
+ }
+ 
++static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
++{
++	p->priority = priority;
++
++	switch (ring) {
++	default:
++		DRM_ERROR("unknown ring id: %d\n", ring);
++		return -EINVAL;
++	case RADEON_CS_RING_GFX:
++		p->ring = RADEON_RING_TYPE_GFX_INDEX;
++		break;
++	case RADEON_CS_RING_COMPUTE:
++		if (p->rdev->family >= CHIP_TAHITI) {
++			if (p->priority > 0)
++				p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
++			else
++				p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
++		} else
++			p->ring = RADEON_RING_TYPE_GFX_INDEX;
++		break;
++	}
++	return 0;
++}
++
++static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
++{
++	bool sync_to_ring[RADEON_NUM_RINGS] = { };
++	int i, r;
++
++	for (i = 0; i < p->nrelocs; i++) {
++		if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
++			continue;
++
++		if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) {
++			struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
++			if (!radeon_fence_signaled(fence)) {
++				sync_to_ring[fence->ring] = true;
++			}
++		}
++	}
++
++	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
++		/* no need to sync to our own or unused rings */
++		if (i == p->ring || !sync_to_ring[i] || !p->rdev->ring[i].ready)
++			continue;
++
++		if (!p->ib->fence->semaphore) {
++			r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
++			if (r)
++				return r;
++		}
++
++		r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
++		if (r)
++			return r;
++		radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
++		radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
++
++		r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
++		if (r)
++			return r;
++		radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
++		radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
++	}
++	return 0;
++}
++
++/* XXX: note that this is called from the legacy UMS CS ioctl as well */
+ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ {
+ 	struct drm_radeon_cs *cs = data;
+ 	uint64_t *chunk_array_ptr;
+-	unsigned size, i, flags = 0;
++	unsigned size, i;
++	u32 ring = RADEON_CS_RING_GFX;
++	s32 priority = 0;
+ 
+ 	if (!cs->num_chunks) {
+ 		return 0;
+@@ -103,6 +175,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ 	p->idx = 0;
+ 	p->chunk_ib_idx = -1;
+ 	p->chunk_relocs_idx = -1;
++	p->chunk_flags_idx = -1;
++	p->chunk_const_ib_idx = -1;
+ 	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ 	if (p->chunks_array == NULL) {
+ 		return -ENOMEM;
+@@ -112,6 +186,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ 			       sizeof(uint64_t)*cs->num_chunks)) {
+ 		return -EFAULT;
+ 	}
++	p->cs_flags = 0;
+ 	p->nchunks = cs->num_chunks;
+ 	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
+ 	if (p->chunks == NULL) {
+@@ -140,16 +215,25 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ 			if (p->chunks[i].length_dw == 0)
+ 				return -EINVAL;
+ 		}
+-		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
+-		    !p->chunks[i].length_dw) {
+-			return -EINVAL;
++		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
++			p->chunk_const_ib_idx = i;
++			/* zero length CONST IB isn't useful */
++			if (p->chunks[i].length_dw == 0)
++				return -EINVAL;
++		}
++		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
++			p->chunk_flags_idx = i;
++			/* zero length flags aren't useful */
++			if (p->chunks[i].length_dw == 0)
++				return -EINVAL;
+ 		}
+ 
+ 		p->chunks[i].length_dw = user_chunk.length_dw;
+ 		p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
+ 
+ 		cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
+-		if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
++		if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
++		    (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
+ 			size = p->chunks[i].length_dw * sizeof(uint32_t);
+ 			p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
+ 			if (p->chunks[i].kdata == NULL) {
+@@ -160,29 +244,55 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+ 				return -EFAULT;
+ 			}
+ 			if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+-				flags = p->chunks[i].kdata[0];
+-			}
+-		} else {
+-			p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+-			p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+-			if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
+-				kfree(p->chunks[i].kpage[0]);
+-				kfree(p->chunks[i].kpage[1]);
+-				return -ENOMEM;
++				p->cs_flags = p->chunks[i].kdata[0];
++				if (p->chunks[i].length_dw > 1)
++					ring = p->chunks[i].kdata[1];
++				if (p->chunks[i].length_dw > 2)
++					priority = (s32)p->chunks[i].kdata[2];
+ 			}
+-			p->chunks[i].kpage_idx[0] = -1;
+-			p->chunks[i].kpage_idx[1] = -1;
+-			p->chunks[i].last_copied_page = -1;
+-			p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
+ 		}
+ 	}
+-	if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+-		DRM_ERROR("cs IB too big: %d\n",
+-			  p->chunks[p->chunk_ib_idx].length_dw);
+-		return -EINVAL;
++
++	/* these are KMS only */
++	if (p->rdev) {
++		if ((p->cs_flags & RADEON_CS_USE_VM) &&
++		    !p->rdev->vm_manager.enabled) {
++			DRM_ERROR("VM not active on asic!\n");
++			return -EINVAL;
++		}
++
++		/* we only support VM on SI+ */
++		if ((p->rdev->family >= CHIP_TAHITI) &&
++		    ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
++			DRM_ERROR("VM required on SI+!\n");
++			return -EINVAL;
++		}
++
++		if (radeon_cs_get_ring(p, ring, priority))
++			return -EINVAL;
++	}
++
++	/* deal with non-vm */
++	if ((p->chunk_ib_idx != -1) &&
++	    ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
++	    (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
++		if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
++			DRM_ERROR("cs IB too big: %d\n",
++				  p->chunks[p->chunk_ib_idx].length_dw);
++			return -EINVAL;
++		}
++		p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
++		p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
++		if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
++		    p->chunks[p->chunk_ib_idx].kpage[1] == NULL)
++			return -ENOMEM;
++		p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
++		p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
++		p->chunks[p->chunk_ib_idx].last_copied_page = -1;
++		p->chunks[p->chunk_ib_idx].last_page_index =
++			((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
+ 	}
+ 
+-	p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
+ 	return 0;
+ }
+ 
+@@ -224,14 +334,186 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+ 	radeon_ib_free(parser->rdev, &parser->ib);
+ }
+ 
++static int radeon_cs_ib_chunk(struct radeon_device *rdev,
++			      struct radeon_cs_parser *parser)
++{
++	struct radeon_cs_chunk *ib_chunk;
++	int r;
++
++	if (parser->chunk_ib_idx == -1)
++		return 0;
++
++	if (parser->cs_flags & RADEON_CS_USE_VM)
++		return 0;
++
++	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
++	/* Copy the packet into the IB, the parser will read from the
++	 * input memory (cached) and write to the IB (which can be
++	 * uncached).
++	 */
++	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
++			   ib_chunk->length_dw * 4);
++	if (r) {
++		DRM_ERROR("Failed to get ib !\n");
++		return r;
++	}
++	parser->ib->length_dw = ib_chunk->length_dw;
++	r = radeon_cs_parse(rdev, parser->ring, parser);
++	if (r || parser->parser_error) {
++		DRM_ERROR("Invalid command stream !\n");
++		return r;
++	}
++	r = radeon_cs_finish_pages(parser);
++	if (r) {
++		DRM_ERROR("Invalid command stream !\n");
++		return r;
++	}
++	r = radeon_cs_sync_rings(parser);
++	if (r) {
++		DRM_ERROR("Failed to synchronize rings !\n");
++	}
++	parser->ib->vm_id = 0;
++	r = radeon_ib_schedule(rdev, parser->ib);
++	if (r) {
++		DRM_ERROR("Failed to schedule IB !\n");
++	}
++	return r;
++}
++
++static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
++				   struct radeon_vm *vm)
++{
++	struct radeon_bo_list *lobj;
++	struct radeon_bo *bo;
++	int r;
++
++	list_for_each_entry(lobj, &parser->validated, tv.head) {
++		bo = lobj->bo;
++		r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
++		if (r) {
++			return r;
++		}
++	}
++	return 0;
++}
++
++static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
++				 struct radeon_cs_parser *parser)
++{
++	struct radeon_cs_chunk *ib_chunk;
++	struct radeon_fpriv *fpriv = parser->filp->driver_priv;
++	struct radeon_vm *vm = &fpriv->vm;
++	int r;
++
++	if (parser->chunk_ib_idx == -1)
++		return 0;
++
++	if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
++		return 0;
++
++	if ((rdev->family >= CHIP_TAHITI) &&
++	    (parser->chunk_const_ib_idx != -1)) {
++		ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
++		if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
++			DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
++			return -EINVAL;
++		}
++		r =  radeon_ib_get(rdev, parser->ring, &parser->const_ib,
++				   ib_chunk->length_dw * 4);
++		if (r) {
++			DRM_ERROR("Failed to get const ib !\n");
++			return r;
++		}
++		parser->const_ib->is_const_ib = true;
++		parser->const_ib->length_dw = ib_chunk->length_dw;
++		/* Copy the packet into the IB */
++		if (DRM_COPY_FROM_USER(parser->const_ib->ptr, ib_chunk->user_ptr,
++				       ib_chunk->length_dw * 4)) {
++			return -EFAULT;
++		}
++		r = radeon_ring_ib_parse(rdev, parser->ring, parser->const_ib);
++		if (r) {
++			return r;
++		}
++	}
++
++	ib_chunk = &parser->chunks[parser->chunk_ib_idx];
++	if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
++		DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
++		return -EINVAL;
++	}
++	r =  radeon_ib_get(rdev, parser->ring, &parser->ib,
++			   ib_chunk->length_dw * 4);
++	if (r) {
++		DRM_ERROR("Failed to get ib !\n");
++		return r;
++	}
++	parser->ib->length_dw = ib_chunk->length_dw;
++	/* Copy the packet into the IB */
++	if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
++			       ib_chunk->length_dw * 4)) {
++		return -EFAULT;
++	}
++	r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
++	if (r) {
++		return r;
++	}
++
++	mutex_lock(&vm->mutex);
++	r = radeon_vm_bind(rdev, vm);
++	if (r) {
++		goto out;
++	}
++	r = radeon_bo_vm_update_pte(parser, vm);
++	if (r) {
++		goto out;
++	}
++	r = radeon_cs_sync_rings(parser);
++	if (r) {
++		DRM_ERROR("Failed to synchronize rings !\n");
++	}
++
++	if ((rdev->family >= CHIP_TAHITI) &&
++	    (parser->chunk_const_ib_idx != -1)) {
++		parser->const_ib->vm_id = vm->id;
++		/* ib pool is bind at 0 in virtual address space to gpu_addr is the
++		 * offset inside the pool bo
++		 */
++		parser->const_ib->gpu_addr = parser->const_ib->sa_bo.offset;
++		r = radeon_ib_schedule(rdev, parser->const_ib);
++		if (r)
++			goto out;
++	}
++
++	parser->ib->vm_id = vm->id;
++	/* ib pool is bind at 0 in virtual address space to gpu_addr is the
++	 * offset inside the pool bo
++	 */
++	parser->ib->gpu_addr = parser->ib->sa_bo.offset;
++	parser->ib->is_const_ib = false;
++	r = radeon_ib_schedule(rdev, parser->ib);
++out:
++	if (!r) {
++		if (vm->fence) {
++			radeon_fence_unref(&vm->fence);
++		}
++		vm->fence = radeon_fence_ref(parser->ib->fence);
++	}
++	mutex_unlock(&fpriv->vm.mutex);
++	return r;
++}
++
+ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ {
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct radeon_cs_parser parser;
+-	struct radeon_cs_chunk *ib_chunk;
+ 	int r;
+ 
+ 	radeon_mutex_lock(&rdev->cs_mutex);
++	if (!rdev->accel_working) {
++		radeon_mutex_unlock(&rdev->cs_mutex);
++		return -EBUSY;
++	}
+ 	/* initialize parser */
+ 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
+ 	parser.filp = filp;
+@@ -245,13 +527,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		radeon_mutex_unlock(&rdev->cs_mutex);
+ 		return r;
+ 	}
+-	r =  radeon_ib_get(rdev, &parser.ib);
+-	if (r) {
+-		DRM_ERROR("Failed to get ib !\n");
+-		radeon_cs_parser_fini(&parser, r);
+-		radeon_mutex_unlock(&rdev->cs_mutex);
+-		return r;
+-	}
+ 	r = radeon_cs_parser_relocs(&parser);
+ 	if (r) {
+ 		if (r != -ERESTARTSYS)
+@@ -260,29 +535,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		radeon_mutex_unlock(&rdev->cs_mutex);
+ 		return r;
+ 	}
+-	/* Copy the packet into the IB, the parser will read from the
+-	 * input memory (cached) and write to the IB (which can be
+-	 * uncached). */
+-	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+-	parser.ib->length_dw = ib_chunk->length_dw;
+-	r = radeon_cs_parse(&parser);
+-	if (r || parser.parser_error) {
+-		DRM_ERROR("Invalid command stream !\n");
+-		radeon_cs_parser_fini(&parser, r);
+-		radeon_mutex_unlock(&rdev->cs_mutex);
+-		return r;
+-	}
+-	r = radeon_cs_finish_pages(&parser);
++	r = radeon_cs_ib_chunk(rdev, &parser);
+ 	if (r) {
+-		DRM_ERROR("Invalid command stream !\n");
+-		radeon_cs_parser_fini(&parser, r);
+-		radeon_mutex_unlock(&rdev->cs_mutex);
+-		return r;
++		goto out;
+ 	}
+-	r = radeon_ib_schedule(rdev, parser.ib);
++	r = radeon_cs_ib_vm_chunk(rdev, &parser);
+ 	if (r) {
+-		DRM_ERROR("Failed to schedule IB !\n");
++		goto out;
+ 	}
++out:
+ 	radeon_cs_parser_fini(&parser, r);
+ 	radeon_mutex_unlock(&rdev->cs_mutex);
+ 	return r;
+diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
+index 2132109..8fb6f41 100644
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -197,7 +197,12 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ 
+ unpin:
+ 	if (radeon_crtc->cursor_bo) {
+-		radeon_gem_object_unpin(radeon_crtc->cursor_bo);
++		robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
++		ret = radeon_bo_reserve(robj, false);
++		if (likely(ret == 0)) {
++			radeon_bo_unpin(robj);
++			radeon_bo_unreserve(robj);
++		}
+ 		drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ 	}
+ 
+@@ -233,7 +238,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ 		y = 0;
+ 	}
+ 
+-	if (ASIC_IS_AVIVO(rdev)) {
++	/* fixed on DCE6 and newer */
++	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
+ 		int i = 0;
+ 		struct drm_crtc *crtc_p;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index c5762e3..de5e0b5 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -89,6 +89,10 @@ static const char radeon_family_name[][16] = {
+ 	"TURKS",
+ 	"CAICOS",
+ 	"CAYMAN",
++	"ARUBA",
++	"TAHITI",
++	"PITCAIRN",
++	"VERDE",
+ 	"LAST",
+ };
+ 
+@@ -237,8 +241,8 @@ int radeon_wb_init(struct radeon_device *rdev)
+ 				rdev->wb.use_event = true;
+ 		}
+ 	}
+-	/* always use writeback/events on NI */
+-	if (ASIC_IS_DCE5(rdev)) {
++	/* always use writeback/events on NI, APUs */
++	if (rdev->family >= CHIP_PALM) {
+ 		rdev->wb.enabled = true;
+ 		rdev->wb.use_event = true;
+ 	}
+@@ -720,18 +724,25 @@ int radeon_device_init(struct radeon_device *rdev,
+ 	/* mutex initialization are all done here so we
+ 	 * can recall function without having locking issues */
+ 	radeon_mutex_init(&rdev->cs_mutex);
+-	mutex_init(&rdev->ib_pool.mutex);
+-	mutex_init(&rdev->cp.mutex);
++	radeon_mutex_init(&rdev->ib_pool.mutex);
++	for (i = 0; i < RADEON_NUM_RINGS; ++i)
++		mutex_init(&rdev->ring[i].mutex);
+ 	mutex_init(&rdev->dc_hw_i2c_mutex);
+ 	if (rdev->family >= CHIP_R600)
+ 		spin_lock_init(&rdev->ih.lock);
+ 	mutex_init(&rdev->gem.mutex);
+ 	mutex_init(&rdev->pm.mutex);
+ 	mutex_init(&rdev->vram_mutex);
+-	rwlock_init(&rdev->fence_drv.lock);
++	rwlock_init(&rdev->fence_lock);
++	rwlock_init(&rdev->semaphore_drv.lock);
+ 	INIT_LIST_HEAD(&rdev->gem.objects);
+ 	init_waitqueue_head(&rdev->irq.vblank_queue);
+ 	init_waitqueue_head(&rdev->irq.idle_queue);
++	INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
++	/* initialize vm here */
++	rdev->vm_manager.use_bitmap = 1;
++	rdev->vm_manager.max_pfn = 1 << 20;
++	INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
+ 
+ 	/* Set asic functions */
+ 	r = radeon_asic_init(rdev);
+@@ -768,8 +779,14 @@ int radeon_device_init(struct radeon_device *rdev,
+ 	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+ 	if (r) {
+ 		rdev->need_dma32 = true;
++		dma_bits = 32;
+ 		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
+ 	}
++	r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
++	if (r) {
++		pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
++		printk(KERN_WARNING "radeon: No coherent DMA available.\n");
++	}
+ 
+ 	/* Registers mapping */
+ 	/* TODO: block userspace mapping of io register */
+@@ -817,15 +834,20 @@ int radeon_device_init(struct radeon_device *rdev,
+ 		if (r)
+ 			return r;
+ 	}
+-	if (radeon_testing) {
++	if ((radeon_testing & 1)) {
+ 		radeon_test_moves(rdev);
+ 	}
++	if ((radeon_testing & 2)) {
++		radeon_test_syncing(rdev);
++	}
+ 	if (radeon_benchmarking) {
+ 		radeon_benchmark(rdev, radeon_benchmarking);
+ 	}
+ 	return 0;
+ }
+ 
++static void radeon_debugfs_remove_files(struct radeon_device *rdev);
++
+ void radeon_device_fini(struct radeon_device *rdev)
+ {
+ 	DRM_INFO("radeon: finishing device.\n");
+@@ -840,6 +862,7 @@ void radeon_device_fini(struct radeon_device *rdev)
+ 	rdev->rio_mem = NULL;
+ 	iounmap(rdev->rmmio);
+ 	rdev->rmmio = NULL;
++	radeon_debugfs_remove_files(rdev);
+ }
+ 
+ 
+@@ -851,7 +874,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+ 	struct radeon_device *rdev;
+ 	struct drm_crtc *crtc;
+ 	struct drm_connector *connector;
+-	int r;
++	int i, r;
+ 
+ 	if (dev == NULL || dev->dev_private == NULL) {
+ 		return -ENODEV;
+@@ -892,7 +915,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+ 	/* evict vram memory */
+ 	radeon_bo_evict_vram(rdev);
+ 	/* wait for gpu to finish processing current batch */
+-	radeon_fence_wait_last(rdev);
++	for (i = 0; i < RADEON_NUM_RINGS; i++)
++		radeon_fence_wait_last(rdev, i);
+ 
+ 	radeon_save_bios_scratch_regs(rdev);
+ 
+@@ -941,9 +965,11 @@ int radeon_resume_kms(struct drm_device *dev)
+ 	radeon_fbdev_set_suspend(rdev, 0);
+ 	console_unlock();
+ 
+-	/* init dig PHYs */
+-	if (rdev->is_atom_bios)
++	/* init dig PHYs, disp eng pll */
++	if (rdev->is_atom_bios) {
+ 		radeon_atom_encoder_init(rdev);
++		radeon_atom_disp_eng_pll_init(rdev);
++	}
+ 	/* reset hpd state */
+ 	radeon_hpd_init(rdev);
+ 	/* blat the mode back in */
+@@ -993,36 +1019,29 @@ int radeon_gpu_reset(struct radeon_device *rdev)
+ /*
+  * Debugfs
+  */
+-struct radeon_debugfs {
+-	struct drm_info_list	*files;
+-	unsigned		num_files;
+-};
+-static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
+-static unsigned _radeon_debugfs_count = 0;
+-
+ int radeon_debugfs_add_files(struct radeon_device *rdev,
+ 			     struct drm_info_list *files,
+ 			     unsigned nfiles)
+ {
+ 	unsigned i;
+ 
+-	for (i = 0; i < _radeon_debugfs_count; i++) {
+-		if (_radeon_debugfs[i].files == files) {
++	for (i = 0; i < rdev->debugfs_count; i++) {
++		if (rdev->debugfs[i].files == files) {
+ 			/* Already registered */
+ 			return 0;
+ 		}
+ 	}
+ 
+-	i = _radeon_debugfs_count + 1;
++	i = rdev->debugfs_count + 1;
+ 	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
+ 		DRM_ERROR("Reached maximum number of debugfs components.\n");
+ 		DRM_ERROR("Report so we increase "
+ 		          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
+ 		return -EINVAL;
+ 	}
+-	_radeon_debugfs[_radeon_debugfs_count].files = files;
+-	_radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
+-	_radeon_debugfs_count = i;
++	rdev->debugfs[rdev->debugfs_count].files = files;
++	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
++	rdev->debugfs_count = i;
+ #if defined(CONFIG_DEBUG_FS)
+ 	drm_debugfs_create_files(files, nfiles,
+ 				 rdev->ddev->control->debugfs_root,
+@@ -1034,6 +1053,22 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
+ 	return 0;
+ }
+ 
++static void radeon_debugfs_remove_files(struct radeon_device *rdev)
++{
++#if defined(CONFIG_DEBUG_FS)
++	unsigned i;
++
++	for (i = 0; i < rdev->debugfs_count; i++) {
++		drm_debugfs_remove_files(rdev->debugfs[i].files,
++					 rdev->debugfs[i].num_files,
++					 rdev->ddev->control);
++		drm_debugfs_remove_files(rdev->debugfs[i].files,
++					 rdev->debugfs[i].num_files,
++					 rdev->ddev->primary);
++	}
++#endif
++}
++
+ #if defined(CONFIG_DEBUG_FS)
+ int radeon_debugfs_init(struct drm_minor *minor)
+ {
+@@ -1042,11 +1077,5 @@ int radeon_debugfs_init(struct drm_minor *minor)
+ 
+ void radeon_debugfs_cleanup(struct drm_minor *minor)
+ {
+-	unsigned i;
+-
+-	for (i = 0; i < _radeon_debugfs_count; i++) {
+-		drm_debugfs_remove_files(_radeon_debugfs[i].files,
+-					 _radeon_debugfs[i].num_files, minor);
+-	}
+ }
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index aec8e0c..1f50727 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -303,8 +303,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+ 	if (update_pending &&
+ 	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+ 							       &vpos, &hpos)) &&
+-	    (vpos >=0) &&
+-	    (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
++	    ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
++	     (vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
++		/* crtc didn't flip in this target vblank interval,
++		 * but flip is pending in crtc. Based on the current
++		 * scanout position we know that the current frame is
++		 * (nearly) complete and the flip will (likely)
++		 * complete before the start of the next frame.
++		 */
++		update_pending = 0;
++	}
++	if (update_pending) {
+ 		/* crtc didn't flip in this target vblank interval,
+ 		 * but flip is pending in crtc. It will complete it
+ 		 * in next vblank interval, so complete the flip at
+@@ -393,7 +402,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ 		DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+ 		goto pflip_cleanup;
+ 	}
+-	r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
++	/* Only 27 bit offset for legacy CRTC */
++	r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
++				     ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
+ 	if (unlikely(r != 0)) {
+ 		radeon_bo_unreserve(rbo);
+ 		r = -EINVAL;
+@@ -406,7 +417,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+ 	if (!ASIC_IS_AVIVO(rdev)) {
+ 		/* crtc offset is from display base addr not FB location */
+ 		base -= radeon_crtc->legacy_display_base_addr;
+-		pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
++		pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
+ 
+ 		if (tiling_flags & RADEON_TILING_MACRO) {
+ 			if (ASIC_IS_R300(rdev)) {
+@@ -522,7 +533,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
+ 		radeon_legacy_init_crtc(dev, radeon_crtc);
+ }
+ 
+-static const char *encoder_names[36] = {
++static const char *encoder_names[37] = {
+ 	"NONE",
+ 	"INTERNAL_LVDS",
+ 	"INTERNAL_TMDS1",
+@@ -559,6 +570,7 @@ static const char *encoder_names[36] = {
+ 	"INTERNAL_UNIPHY2",
+ 	"NUTMEG",
+ 	"TRAVIS",
++	"INTERNAL_VCE"
+ };
+ 
+ static const char *connector_names[15] = {
+@@ -1083,29 +1095,36 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
+ 	.create_handle = radeon_user_framebuffer_create_handle,
+ };
+ 
+-void
++int
+ radeon_framebuffer_init(struct drm_device *dev,
+ 			struct radeon_framebuffer *rfb,
+-			struct drm_mode_fb_cmd *mode_cmd,
++			struct drm_mode_fb_cmd2 *mode_cmd,
+ 			struct drm_gem_object *obj)
+ {
++	int ret;
+ 	rfb->obj = obj;
+-	drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
++	ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
++	if (ret) {
++		rfb->obj = NULL;
++		return ret;
++	}
+ 	drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
++	return 0;
+ }
+ 
+ static struct drm_framebuffer *
+ radeon_user_framebuffer_create(struct drm_device *dev,
+ 			       struct drm_file *file_priv,
+-			       struct drm_mode_fb_cmd *mode_cmd)
++			       struct drm_mode_fb_cmd2 *mode_cmd)
+ {
+ 	struct drm_gem_object *obj;
+ 	struct radeon_framebuffer *radeon_fb;
++	int ret;
+ 
+-	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
++	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ 	if (obj ==  NULL) {
+ 		dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
+-			"can't create framebuffer\n", mode_cmd->handle);
++			"can't create framebuffer\n", mode_cmd->handles[0]);
+ 		return ERR_PTR(-ENOENT);
+ 	}
+ 
+@@ -1113,7 +1132,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
+ 	if (radeon_fb == NULL)
+ 		return ERR_PTR(-ENOMEM);
+ 
+-	radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
++	ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
++	if (ret) {
++		kfree(radeon_fb);
++		drm_gem_object_unreference_unlocked(obj);
++		return ERR_PTR(ret);
++	}
+ 
+ 	return &radeon_fb->base;
+ }
+@@ -1129,11 +1153,6 @@ static const struct drm_mode_config_funcs radeon_mode_funcs = {
+ 	.output_poll_changed = radeon_output_poll_changed
+ };
+ 
+-struct drm_prop_enum_list {
+-	int type;
+-	char *name;
+-};
+-
+ static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
+ {	{ 0, "driver" },
+ 	{ 1, "bios" },
+@@ -1158,86 +1177,53 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
+ 
+ static int radeon_modeset_create_props(struct radeon_device *rdev)
+ {
+-	int i, sz;
++	int sz;
+ 
+ 	if (rdev->is_atom_bios) {
+ 		rdev->mode_info.coherent_mode_property =
+-			drm_property_create(rdev->ddev,
+-					    DRM_MODE_PROP_RANGE,
+-					    "coherent", 2);
++			drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
+ 		if (!rdev->mode_info.coherent_mode_property)
+ 			return -ENOMEM;
+-
+-		rdev->mode_info.coherent_mode_property->values[0] = 0;
+-		rdev->mode_info.coherent_mode_property->values[1] = 1;
+ 	}
+ 
+ 	if (!ASIC_IS_AVIVO(rdev)) {
+ 		sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
+ 		rdev->mode_info.tmds_pll_property =
+-			drm_property_create(rdev->ddev,
+-					    DRM_MODE_PROP_ENUM,
+-					    "tmds_pll", sz);
+-		for (i = 0; i < sz; i++) {
+-			drm_property_add_enum(rdev->mode_info.tmds_pll_property,
+-					      i,
+-					      radeon_tmds_pll_enum_list[i].type,
+-					      radeon_tmds_pll_enum_list[i].name);
+-		}
++			drm_property_create_enum(rdev->ddev, 0,
++					    "tmds_pll",
++					    radeon_tmds_pll_enum_list, sz);
+ 	}
+ 
+ 	rdev->mode_info.load_detect_property =
+-		drm_property_create(rdev->ddev,
+-				    DRM_MODE_PROP_RANGE,
+-				    "load detection", 2);
++		drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
+ 	if (!rdev->mode_info.load_detect_property)
+ 		return -ENOMEM;
+-	rdev->mode_info.load_detect_property->values[0] = 0;
+-	rdev->mode_info.load_detect_property->values[1] = 1;
+ 
+ 	drm_mode_create_scaling_mode_property(rdev->ddev);
+ 
+ 	sz = ARRAY_SIZE(radeon_tv_std_enum_list);
+ 	rdev->mode_info.tv_std_property =
+-		drm_property_create(rdev->ddev,
+-				    DRM_MODE_PROP_ENUM,
+-				    "tv standard", sz);
+-	for (i = 0; i < sz; i++) {
+-		drm_property_add_enum(rdev->mode_info.tv_std_property,
+-				      i,
+-				      radeon_tv_std_enum_list[i].type,
+-				      radeon_tv_std_enum_list[i].name);
+-	}
++		drm_property_create_enum(rdev->ddev, 0,
++				    "tv standard",
++				    radeon_tv_std_enum_list, sz);
+ 
+ 	sz = ARRAY_SIZE(radeon_underscan_enum_list);
+ 	rdev->mode_info.underscan_property =
+-		drm_property_create(rdev->ddev,
+-				    DRM_MODE_PROP_ENUM,
+-				    "underscan", sz);
+-	for (i = 0; i < sz; i++) {
+-		drm_property_add_enum(rdev->mode_info.underscan_property,
+-				      i,
+-				      radeon_underscan_enum_list[i].type,
+-				      radeon_underscan_enum_list[i].name);
+-	}
++		drm_property_create_enum(rdev->ddev, 0,
++				    "underscan",
++				    radeon_underscan_enum_list, sz);
+ 
+ 	rdev->mode_info.underscan_hborder_property =
+-		drm_property_create(rdev->ddev,
+-					DRM_MODE_PROP_RANGE,
+-					"underscan hborder", 2);
++		drm_property_create_range(rdev->ddev, 0,
++					"underscan hborder", 0, 128);
+ 	if (!rdev->mode_info.underscan_hborder_property)
+ 		return -ENOMEM;
+-	rdev->mode_info.underscan_hborder_property->values[0] = 0;
+-	rdev->mode_info.underscan_hborder_property->values[1] = 128;
+ 
+ 	rdev->mode_info.underscan_vborder_property =
+-		drm_property_create(rdev->ddev,
+-					DRM_MODE_PROP_RANGE,
+-					"underscan vborder", 2);
++		drm_property_create_range(rdev->ddev, 0,
++					"underscan vborder", 0, 128);
+ 	if (!rdev->mode_info.underscan_vborder_property)
+ 		return -ENOMEM;
+-	rdev->mode_info.underscan_vborder_property->values[0] = 0;
+-	rdev->mode_info.underscan_vborder_property->values[1] = 128;
+ 
+ 	return 0;
+ }
+@@ -1283,6 +1269,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
+ 		rdev->ddev->mode_config.max_height = 4096;
+ 	}
+ 
++	rdev->ddev->mode_config.preferred_depth = 24;
++	rdev->ddev->mode_config.prefer_shadow = 1;
++
+ 	rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
+ 
+ 	ret = radeon_modeset_create_props(rdev);
+@@ -1310,9 +1299,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
+ 		return ret;
+ 	}
+ 
+-	/* init dig PHYs */
+-	if (rdev->is_atom_bios)
++	/* init dig PHYs, disp eng pll */
++	if (rdev->is_atom_bios) {
+ 		radeon_atom_encoder_init(rdev);
++		radeon_atom_disp_eng_pll_init(rdev);
++	}
+ 
+ 	/* initialize hpd */
+ 	radeon_hpd_init(rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
+index 71499fc..15250fb 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -54,9 +54,13 @@
+  *   2.10.0 - fusion 2D tiling
+  *   2.11.0 - backend map, initial compute support for the CS checker
+  *   2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
++ *   2.13.0 - virtual memory support, streamout
++ *   2.14.0 - add evergreen tiling informations
++ *   2.15.0 - add max_pipes query
++ *   2.16.0 - fix evergreen 2D tiled surface calculation
+  */
+ #define KMS_DRIVER_MAJOR	2
+-#define KMS_DRIVER_MINOR	12
++#define KMS_DRIVER_MINOR	16
+ #define KMS_DRIVER_PATCHLEVEL	0
+ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
+ int radeon_driver_unload_kms(struct drm_device *dev);
+@@ -84,6 +88,10 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
+ 			 struct drm_file *file_priv);
+ int radeon_gem_object_init(struct drm_gem_object *obj);
+ void radeon_gem_object_free(struct drm_gem_object *obj);
++int radeon_gem_object_open(struct drm_gem_object *obj,
++				struct drm_file *file_priv);
++void radeon_gem_object_close(struct drm_gem_object *obj,
++				struct drm_file *file_priv);
+ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+ 				      int *vpos, int *hpos);
+ extern struct drm_ioctl_desc radeon_ioctls_kms[];
+@@ -140,7 +148,7 @@ module_param_named(vramlimit, radeon_vram_limit, int, 0600);
+ MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
+ module_param_named(agpmode, radeon_agpmode, int, 0444);
+ 
+-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32,64, etc)\n");
++MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
+ module_param_named(gartsize, radeon_gart_size, int, 0600);
+ 
+ MODULE_PARM_DESC(benchmark, "Run benchmark");
+@@ -206,6 +214,21 @@ static struct pci_device_id pciidlist[] = {
+ MODULE_DEVICE_TABLE(pci, pciidlist);
+ #endif
+ 
++static const struct file_operations radeon_driver_old_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = drm_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++	.read = drm_read,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = radeon_compat_ioctl,
++#endif
++	.llseek = noop_llseek,
++};
++
+ static struct drm_driver driver_old = {
+ 	.driver_features =
+ 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+@@ -232,21 +255,7 @@ static struct drm_driver driver_old = {
+ 	.reclaim_buffers = drm_core_reclaim_buffers,
+ 	.ioctls = radeon_ioctls,
+ 	.dma_ioctl = radeon_cp_buffers,
+-	.fops = {
+-		 .owner = THIS_MODULE,
+-		 .open = drm_open,
+-		 .release = drm_release,
+-		 .unlocked_ioctl = drm_ioctl,
+-		 .mmap = drm_mmap,
+-		 .poll = drm_poll,
+-		 .fasync = drm_fasync,
+-		 .read = drm_read,
+-#ifdef CONFIG_COMPAT
+-		 .compat_ioctl = radeon_compat_ioctl,
+-#endif
+-		 .llseek = noop_llseek,
+-	},
+-
++	.fops = &radeon_driver_old_fops,
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ 	.date = DRIVER_DATE,
+@@ -304,6 +313,20 @@ radeon_pci_resume(struct pci_dev *pdev)
+ 	return radeon_resume_kms(dev);
+ }
+ 
++static const struct file_operations radeon_driver_kms_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = radeon_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++	.read = drm_read,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = radeon_kms_compat_ioctl,
++#endif
++};
++
+ static struct drm_driver kms_driver = {
+ 	.driver_features =
+ 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+@@ -335,24 +358,13 @@ static struct drm_driver kms_driver = {
+ 	.ioctls = radeon_ioctls_kms,
+ 	.gem_init_object = radeon_gem_object_init,
+ 	.gem_free_object = radeon_gem_object_free,
++	.gem_open_object = radeon_gem_object_open,
++	.gem_close_object = radeon_gem_object_close,
+ 	.dma_ioctl = radeon_dma_ioctl_kms,
+ 	.dumb_create = radeon_mode_dumb_create,
+ 	.dumb_map_offset = radeon_mode_dumb_mmap,
+ 	.dumb_destroy = radeon_mode_dumb_destroy,
+-	.fops = {
+-		 .owner = THIS_MODULE,
+-		 .open = drm_open,
+-		 .release = drm_release,
+-		 .unlocked_ioctl = drm_ioctl,
+-		 .mmap = radeon_mmap,
+-		 .poll = drm_poll,
+-		 .fasync = drm_fasync,
+-		 .read = drm_read,
+-#ifdef CONFIG_COMPAT
+-		 .compat_ioctl = radeon_kms_compat_ioctl,
+-#endif
+-	},
+-
++	.fops = &radeon_driver_kms_fops,
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ 	.date = DRIVER_DATE,
+diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
+index 4b27efa..7467069 100644
+--- a/drivers/gpu/drm/radeon/radeon_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
+@@ -202,6 +202,22 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
+ 	return NULL;
+ }
+ 
++struct drm_connector *
++radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	struct drm_connector *connector;
++	struct radeon_connector *radeon_connector;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		radeon_connector = to_radeon_connector(connector);
++		if (radeon_encoder->devices & radeon_connector->devices)
++			return connector;
++	}
++	return NULL;
++}
++
+ struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
+ {
+ 	struct drm_device *dev = encoder->dev;
+@@ -288,3 +304,64 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder,
+ 
+ }
+ 
++bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
++				    u32 pixel_clock)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct drm_connector *connector;
++	struct radeon_connector *radeon_connector;
++	struct radeon_connector_atom_dig *dig_connector;
++
++	connector = radeon_get_connector_for_encoder(encoder);
++	/* if we don't have an active device yet, just use one of
++	 * the connectors tied to the encoder.
++	 */
++	if (!connector)
++		connector = radeon_get_connector_for_encoder_init(encoder);
++	radeon_connector = to_radeon_connector(connector);
++
++	switch (connector->connector_type) {
++	case DRM_MODE_CONNECTOR_DVII:
++	case DRM_MODE_CONNECTOR_HDMIB:
++		if (radeon_connector->use_digital) {
++			/* HDMI 1.3 supports up to 340 Mhz over single link */
++			if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
++				if (pixel_clock > 340000)
++					return true;
++				else
++					return false;
++			} else {
++				if (pixel_clock > 165000)
++					return true;
++				else
++					return false;
++			}
++		} else
++			return false;
++	case DRM_MODE_CONNECTOR_DVID:
++	case DRM_MODE_CONNECTOR_HDMIA:
++	case DRM_MODE_CONNECTOR_DisplayPort:
++		dig_connector = radeon_connector->con_priv;
++		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
++		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
++			return false;
++		else {
++			/* HDMI 1.3 supports up to 340 Mhz over single link */
++			if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
++				if (pixel_clock > 340000)
++					return true;
++				else
++					return false;
++			} else {
++				if (pixel_clock > 165000)
++					return true;
++				else
++					return false;
++			}
++		}
++	default:
++		return false;
++	}
++}
++
+diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
+index ec2f1ea..d1fafea 100644
+--- a/drivers/gpu/drm/radeon/radeon_family.h
++++ b/drivers/gpu/drm/radeon/radeon_family.h
+@@ -87,6 +87,10 @@ enum radeon_family {
+ 	CHIP_TURKS,
+ 	CHIP_CAICOS,
+ 	CHIP_CAYMAN,
++	CHIP_ARUBA,
++	CHIP_TAHITI,
++	CHIP_PITCAIRN,
++	CHIP_VERDE,
+ 	CHIP_LAST,
+ };
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
+index 0b7b486..5906914 100644
+--- a/drivers/gpu/drm/radeon/radeon_fb.c
++++ b/drivers/gpu/drm/radeon/radeon_fb.c
+@@ -103,7 +103,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
+ }
+ 
+ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+-					 struct drm_mode_fb_cmd *mode_cmd,
++					 struct drm_mode_fb_cmd2 *mode_cmd,
+ 					 struct drm_gem_object **gobj_p)
+ {
+ 	struct radeon_device *rdev = rfbdev->rdev;
+@@ -114,13 +114,17 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+ 	int ret;
+ 	int aligned_size, size;
+ 	int height = mode_cmd->height;
++	u32 bpp, depth;
++
++	drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+ 
+ 	/* need to align pitch with crtc limits */
+-	mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
++	mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
++						  fb_tiled) * ((bpp + 1) / 8);
+ 
+ 	if (rdev->family >= CHIP_R600)
+ 		height = ALIGN(mode_cmd->height, 8);
+-	size = mode_cmd->pitch * height;
++	size = mode_cmd->pitches[0] * height;
+ 	aligned_size = ALIGN(size, PAGE_SIZE);
+ 	ret = radeon_gem_object_create(rdev, aligned_size, 0,
+ 				       RADEON_GEM_DOMAIN_VRAM,
+@@ -137,7 +141,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+ 		tiling_flags = RADEON_TILING_MACRO;
+ 
+ #ifdef __BIG_ENDIAN
+-	switch (mode_cmd->bpp) {
++	switch (bpp) {
+ 	case 32:
+ 		tiling_flags |= RADEON_TILING_SWAP_32BIT;
+ 		break;
+@@ -151,7 +155,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+ 	if (tiling_flags) {
+ 		ret = radeon_bo_set_tiling_flags(rbo,
+ 						 tiling_flags | RADEON_TILING_SURFACE,
+-						 mode_cmd->pitch);
++						 mode_cmd->pitches[0]);
+ 		if (ret)
+ 			dev_err(rdev->dev, "FB failed to set tiling flags\n");
+ 	}
+@@ -160,7 +164,10 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
+ 	ret = radeon_bo_reserve(rbo, false);
+ 	if (unlikely(ret != 0))
+ 		goto out_unref;
+-	ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL);
++	/* Only 27 bit offset for legacy CRTC */
++	ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
++				       ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
++				       NULL);
+ 	if (ret) {
+ 		radeon_bo_unreserve(rbo);
+ 		goto out_unref;
+@@ -187,7 +194,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ 	struct radeon_device *rdev = rfbdev->rdev;
+ 	struct fb_info *info;
+ 	struct drm_framebuffer *fb = NULL;
+-	struct drm_mode_fb_cmd mode_cmd;
++	struct drm_mode_fb_cmd2 mode_cmd;
+ 	struct drm_gem_object *gobj = NULL;
+ 	struct radeon_bo *rbo = NULL;
+ 	struct device *device = &rdev->pdev->dev;
+@@ -201,10 +208,15 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ 	if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
+ 		sizes->surface_bpp = 32;
+ 
+-	mode_cmd.bpp = sizes->surface_bpp;
+-	mode_cmd.depth = sizes->surface_depth;
++	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
++							  sizes->surface_depth);
+ 
+ 	ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
++	if (ret) {
++		DRM_ERROR("failed to create fbcon object %d\n", ret);
++		return ret;
++	}
++
+ 	rbo = gem_to_radeon_bo(gobj);
+ 
+ 	/* okay we have an object now allocate the framebuffer */
+@@ -216,7 +228,11 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ 
+ 	info->par = rfbdev;
+ 
+-	radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
++	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
++	if (ret) {
++		DRM_ERROR("failed to initalise framebuffer %d\n", ret);
++		goto out_unref;
++	}
+ 
+ 	fb = &rfbdev->rfb.base;
+ 
+@@ -228,7 +244,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ 
+ 	strcpy(info->fix.id, "radeondrmfb");
+ 
+-	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
++	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ 
+ 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+ 	info->fbops = &radeonfb_ops;
+@@ -250,11 +266,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ 	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
+ 	info->apertures->ranges[0].size = rdev->mc.aper_size;
+ 
+-	info->pixmap.size = 64*1024;
+-	info->pixmap.buf_align = 8;
+-	info->pixmap.access_align = 32;
+-	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+-	info->pixmap.scan_align = 1;
++	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+ 
+ 	if (info->screen_base == NULL) {
+ 		ret = -ENOSPC;
+@@ -271,7 +283,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
+ 	DRM_INFO("vram apper at 0x%lX\n",  (unsigned long)rdev->mc.aper_base);
+ 	DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
+ 	DRM_INFO("fb depth is %d\n", fb->depth);
+-	DRM_INFO("   pitch is %d\n", fb->pitch);
++	DRM_INFO("   pitch is %d\n", fb->pitches[0]);
+ 
+ 	vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
+index 76ec0e9..4bd36a3 100644
+--- a/drivers/gpu/drm/radeon/radeon_fence.c
++++ b/drivers/gpu/drm/radeon/radeon_fence.c
+@@ -40,32 +40,24 @@
+ #include "radeon.h"
+ #include "radeon_trace.h"
+ 
+-static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
++static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
+ {
+ 	if (rdev->wb.enabled) {
+-		u32 scratch_index;
+-		if (rdev->wb.use_event)
+-			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+-		else
+-			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+-		rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
+-	} else
+-		WREG32(rdev->fence_drv.scratch_reg, seq);
++		*rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
++	} else {
++		WREG32(rdev->fence_drv[ring].scratch_reg, seq);
++	}
+ }
+ 
+-static u32 radeon_fence_read(struct radeon_device *rdev)
++static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
+ {
+-	u32 seq;
++	u32 seq = 0;
+ 
+ 	if (rdev->wb.enabled) {
+-		u32 scratch_index;
+-		if (rdev->wb.use_event)
+-			scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+-		else
+-			scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
+-		seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
+-	} else
+-		seq = RREG32(rdev->fence_drv.scratch_reg);
++		seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
++	} else {
++		seq = RREG32(rdev->fence_drv[ring].scratch_reg);
++	}
+ 	return seq;
+ }
+ 
+@@ -73,28 +65,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
+ {
+ 	unsigned long irq_flags;
+ 
+-	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+-	if (fence->emited) {
+-		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
++	write_lock_irqsave(&rdev->fence_lock, irq_flags);
++	if (fence->emitted) {
++		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ 		return 0;
+ 	}
+-	fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
+-	if (!rdev->cp.ready)
++	fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
++	if (!rdev->ring[fence->ring].ready)
+ 		/* FIXME: cp is not running assume everythings is done right
+ 		 * away
+ 		 */
+-		radeon_fence_write(rdev, fence->seq);
++		radeon_fence_write(rdev, fence->seq, fence->ring);
+ 	else
+-		radeon_fence_ring_emit(rdev, fence);
++		radeon_fence_ring_emit(rdev, fence->ring, fence);
+ 
+ 	trace_radeon_fence_emit(rdev->ddev, fence->seq);
+-	fence->emited = true;
+-	list_move_tail(&fence->list, &rdev->fence_drv.emited);
+-	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
++	fence->emitted = true;
++	list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
++	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ 	return 0;
+ }
+ 
+-static bool radeon_fence_poll_locked(struct radeon_device *rdev)
++static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
+ {
+ 	struct radeon_fence *fence;
+ 	struct list_head *i, *n;
+@@ -102,34 +94,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
+ 	bool wake = false;
+ 	unsigned long cjiffies;
+ 
+-	seq = radeon_fence_read(rdev);
+-	if (seq != rdev->fence_drv.last_seq) {
+-		rdev->fence_drv.last_seq = seq;
+-		rdev->fence_drv.last_jiffies = jiffies;
+-		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
++	seq = radeon_fence_read(rdev, ring);
++	if (seq != rdev->fence_drv[ring].last_seq) {
++		rdev->fence_drv[ring].last_seq = seq;
++		rdev->fence_drv[ring].last_jiffies = jiffies;
++		rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ 	} else {
+ 		cjiffies = jiffies;
+-		if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
+-			cjiffies -= rdev->fence_drv.last_jiffies;
+-			if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
++		if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
++			cjiffies -= rdev->fence_drv[ring].last_jiffies;
++			if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
+ 				/* update the timeout */
+-				rdev->fence_drv.last_timeout -= cjiffies;
++				rdev->fence_drv[ring].last_timeout -= cjiffies;
+ 			} else {
+ 				/* the 500ms timeout is elapsed we should test
+ 				 * for GPU lockup
+ 				 */
+-				rdev->fence_drv.last_timeout = 1;
++				rdev->fence_drv[ring].last_timeout = 1;
+ 			}
+ 		} else {
+ 			/* wrap around update last jiffies, we will just wait
+ 			 * a little longer
+ 			 */
+-			rdev->fence_drv.last_jiffies = cjiffies;
++			rdev->fence_drv[ring].last_jiffies = cjiffies;
+ 		}
+ 		return false;
+ 	}
+ 	n = NULL;
+-	list_for_each(i, &rdev->fence_drv.emited) {
++	list_for_each(i, &rdev->fence_drv[ring].emitted) {
+ 		fence = list_entry(i, struct radeon_fence, list);
+ 		if (fence->seq == seq) {
+ 			n = i;
+@@ -141,11 +133,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
+ 		i = n;
+ 		do {
+ 			n = i->prev;
+-			list_move_tail(i, &rdev->fence_drv.signaled);
++			list_move_tail(i, &rdev->fence_drv[ring].signaled);
+ 			fence = list_entry(i, struct radeon_fence, list);
+ 			fence->signaled = true;
+ 			i = n;
+-		} while (i != &rdev->fence_drv.emited);
++		} while (i != &rdev->fence_drv[ring].emitted);
+ 		wake = true;
+ 	}
+ 	return wake;
+@@ -157,14 +149,18 @@ static void radeon_fence_destroy(struct kref *kref)
+         struct radeon_fence *fence;
+ 
+ 	fence = container_of(kref, struct radeon_fence, kref);
+-	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
++	write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
+ 	list_del(&fence->list);
+-	fence->emited = false;
+-	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
++	fence->emitted = false;
++	write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
++	if (fence->semaphore)
++		radeon_semaphore_free(fence->rdev, fence->semaphore);
+ 	kfree(fence);
+ }
+ 
+-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
++int radeon_fence_create(struct radeon_device *rdev,
++			struct radeon_fence **fence,
++			int ring)
+ {
+ 	unsigned long irq_flags;
+ 
+@@ -174,18 +170,19 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
+ 	}
+ 	kref_init(&((*fence)->kref));
+ 	(*fence)->rdev = rdev;
+-	(*fence)->emited = false;
++	(*fence)->emitted = false;
+ 	(*fence)->signaled = false;
+ 	(*fence)->seq = 0;
++	(*fence)->ring = ring;
++	(*fence)->semaphore = NULL;
+ 	INIT_LIST_HEAD(&(*fence)->list);
+ 
+-	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+-	list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
+-	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
++	write_lock_irqsave(&rdev->fence_lock, irq_flags);
++	list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
++	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ 	return 0;
+ }
+ 
+-
+ bool radeon_fence_signaled(struct radeon_fence *fence)
+ {
+ 	unsigned long irq_flags;
+@@ -197,21 +194,21 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
+ 	if (fence->rdev->gpu_lockup)
+ 		return true;
+ 
+-	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
++	write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
+ 	signaled = fence->signaled;
+ 	/* if we are shuting down report all fence as signaled */
+ 	if (fence->rdev->shutdown) {
+ 		signaled = true;
+ 	}
+-	if (!fence->emited) {
+-		WARN(1, "Querying an unemited fence : %p !\n", fence);
++	if (!fence->emitted) {
++		WARN(1, "Querying an unemitted fence : %p !\n", fence);
+ 		signaled = true;
+ 	}
+ 	if (!signaled) {
+-		radeon_fence_poll_locked(fence->rdev);
++		radeon_fence_poll_locked(fence->rdev, fence->ring);
+ 		signaled = fence->signaled;
+ 	}
+-	write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
++	write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
+ 	return signaled;
+ }
+ 
+@@ -230,24 +227,24 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
+ 	if (radeon_fence_signaled(fence)) {
+ 		return 0;
+ 	}
+-	timeout = rdev->fence_drv.last_timeout;
++	timeout = rdev->fence_drv[fence->ring].last_timeout;
+ retry:
+ 	/* save current sequence used to check for GPU lockup */
+-	seq = rdev->fence_drv.last_seq;
++	seq = rdev->fence_drv[fence->ring].last_seq;
+ 	trace_radeon_fence_wait_begin(rdev->ddev, seq);
+ 	if (intr) {
+-		radeon_irq_kms_sw_irq_get(rdev);
+-		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
++		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
++		r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
+ 				radeon_fence_signaled(fence), timeout);
+-		radeon_irq_kms_sw_irq_put(rdev);
++		radeon_irq_kms_sw_irq_put(rdev, fence->ring);
+ 		if (unlikely(r < 0)) {
+ 			return r;
+ 		}
+ 	} else {
+-		radeon_irq_kms_sw_irq_get(rdev);
+-		r = wait_event_timeout(rdev->fence_drv.queue,
++		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
++		r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
+ 			 radeon_fence_signaled(fence), timeout);
+-		radeon_irq_kms_sw_irq_put(rdev);
++		radeon_irq_kms_sw_irq_put(rdev, fence->ring);
+ 	}
+ 	trace_radeon_fence_wait_end(rdev->ddev, seq);
+ 	if (unlikely(!radeon_fence_signaled(fence))) {
+@@ -258,10 +255,11 @@ retry:
+ 			timeout = r;
+ 			goto retry;
+ 		}
+-		/* don't protect read access to rdev->fence_drv.last_seq
++		/* don't protect read access to rdev->fence_drv[t].last_seq
+ 		 * if we experiencing a lockup the value doesn't change
+ 		 */
+-		if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
++		if (seq == rdev->fence_drv[fence->ring].last_seq &&
++		    radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
+ 			/* good news we believe it's a lockup */
+ 			printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
+ 			     fence->seq, seq);
+@@ -272,20 +270,20 @@ retry:
+ 			r = radeon_gpu_reset(rdev);
+ 			if (r)
+ 				return r;
+-			radeon_fence_write(rdev, fence->seq);
++			radeon_fence_write(rdev, fence->seq, fence->ring);
+ 			rdev->gpu_lockup = false;
+ 		}
+ 		timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+-		write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+-		rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+-		rdev->fence_drv.last_jiffies = jiffies;
+-		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
++		write_lock_irqsave(&rdev->fence_lock, irq_flags);
++		rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
++		rdev->fence_drv[fence->ring].last_jiffies = jiffies;
++		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ 		goto retry;
+ 	}
+ 	return 0;
+ }
+ 
+-int radeon_fence_wait_next(struct radeon_device *rdev)
++int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
+ {
+ 	unsigned long irq_flags;
+ 	struct radeon_fence *fence;
+@@ -294,21 +292,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
+ 	if (rdev->gpu_lockup) {
+ 		return 0;
+ 	}
+-	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+-	if (list_empty(&rdev->fence_drv.emited)) {
+-		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
++	write_lock_irqsave(&rdev->fence_lock, irq_flags);
++	if (list_empty(&rdev->fence_drv[ring].emitted)) {
++		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ 		return 0;
+ 	}
+-	fence = list_entry(rdev->fence_drv.emited.next,
++	fence = list_entry(rdev->fence_drv[ring].emitted.next,
+ 			   struct radeon_fence, list);
+ 	radeon_fence_ref(fence);
+-	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
++	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ 	r = radeon_fence_wait(fence, false);
+ 	radeon_fence_unref(&fence);
+ 	return r;
+ }
+ 
+-int radeon_fence_wait_last(struct radeon_device *rdev)
++int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
+ {
+ 	unsigned long irq_flags;
+ 	struct radeon_fence *fence;
+@@ -317,15 +315,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
+ 	if (rdev->gpu_lockup) {
+ 		return 0;
+ 	}
+-	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+-	if (list_empty(&rdev->fence_drv.emited)) {
+-		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
++	write_lock_irqsave(&rdev->fence_lock, irq_flags);
++	if (list_empty(&rdev->fence_drv[ring].emitted)) {
++		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ 		return 0;
+ 	}
+-	fence = list_entry(rdev->fence_drv.emited.prev,
++	fence = list_entry(rdev->fence_drv[ring].emitted.prev,
+ 			   struct radeon_fence, list);
+ 	radeon_fence_ref(fence);
+-	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
++	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ 	r = radeon_fence_wait(fence, false);
+ 	radeon_fence_unref(&fence);
+ 	return r;
+@@ -347,39 +345,97 @@ void radeon_fence_unref(struct radeon_fence **fence)
+ 	}
+ }
+ 
+-void radeon_fence_process(struct radeon_device *rdev)
++void radeon_fence_process(struct radeon_device *rdev, int ring)
+ {
+ 	unsigned long irq_flags;
+ 	bool wake;
+ 
+-	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+-	wake = radeon_fence_poll_locked(rdev);
+-	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
++	write_lock_irqsave(&rdev->fence_lock, irq_flags);
++	wake = radeon_fence_poll_locked(rdev, ring);
++	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ 	if (wake) {
+-		wake_up_all(&rdev->fence_drv.queue);
++		wake_up_all(&rdev->fence_drv[ring].queue);
+ 	}
+ }
+ 
+-int radeon_fence_driver_init(struct radeon_device *rdev)
++int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
++{
++	unsigned long irq_flags;
++	int not_processed = 0;
++
++	read_lock_irqsave(&rdev->fence_lock, irq_flags);
++	if (!rdev->fence_drv[ring].initialized) {
++		read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
++		return 0;
++	}
++
++	if (!list_empty(&rdev->fence_drv[ring].emitted)) {
++		struct list_head *ptr;
++		list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
++			/* count up to 3, that's enought info */
++			if (++not_processed >= 3)
++				break;
++		}
++	}
++	read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
++	return not_processed;
++}
++
++int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
+ {
+ 	unsigned long irq_flags;
++	uint64_t index;
+ 	int r;
+ 
+-	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+-	r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
+-	if (r) {
+-		dev_err(rdev->dev, "fence failed to get scratch register\n");
+-		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+-		return r;
++	write_lock_irqsave(&rdev->fence_lock, irq_flags);
++	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
++	if (rdev->wb.use_event) {
++		rdev->fence_drv[ring].scratch_reg = 0;
++		index = R600_WB_EVENT_OFFSET + ring * 4;
++	} else {
++		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
++		if (r) {
++			dev_err(rdev->dev, "fence failed to get scratch register\n");
++			write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
++			return r;
++		}
++		index = RADEON_WB_SCRATCH_OFFSET +
++			rdev->fence_drv[ring].scratch_reg -
++			rdev->scratch.reg_base;
++	}
++	rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
++	rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
++	radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
++	rdev->fence_drv[ring].initialized = true;
++	DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
++		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
++	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
++	return 0;
++}
++
++static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
++{
++	rdev->fence_drv[ring].scratch_reg = -1;
++	rdev->fence_drv[ring].cpu_addr = NULL;
++	rdev->fence_drv[ring].gpu_addr = 0;
++	atomic_set(&rdev->fence_drv[ring].seq, 0);
++	INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
++	INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
++	INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
++	init_waitqueue_head(&rdev->fence_drv[ring].queue);
++	rdev->fence_drv[ring].initialized = false;
++}
++
++int radeon_fence_driver_init(struct radeon_device *rdev)
++{
++	unsigned long irq_flags;
++	int ring;
++
++	write_lock_irqsave(&rdev->fence_lock, irq_flags);
++	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
++		radeon_fence_driver_init_ring(rdev, ring);
+ 	}
+-	radeon_fence_write(rdev, 0);
+-	atomic_set(&rdev->fence_drv.seq, 0);
+-	INIT_LIST_HEAD(&rdev->fence_drv.created);
+-	INIT_LIST_HEAD(&rdev->fence_drv.emited);
+-	INIT_LIST_HEAD(&rdev->fence_drv.signaled);
+-	init_waitqueue_head(&rdev->fence_drv.queue);
+-	rdev->fence_drv.initialized = true;
+-	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
++	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ 	if (radeon_debugfs_fence_init(rdev)) {
+ 		dev_err(rdev->dev, "fence debugfs file creation failed\n");
+ 	}
+@@ -389,14 +445,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
+ void radeon_fence_driver_fini(struct radeon_device *rdev)
+ {
+ 	unsigned long irq_flags;
+-
+-	if (!rdev->fence_drv.initialized)
+-		return;
+-	wake_up_all(&rdev->fence_drv.queue);
+-	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+-	radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
+-	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+-	rdev->fence_drv.initialized = false;
++	int ring;
++
++	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
++		if (!rdev->fence_drv[ring].initialized)
++			continue;
++		radeon_fence_wait_last(rdev, ring);
++		wake_up_all(&rdev->fence_drv[ring].queue);
++		write_lock_irqsave(&rdev->fence_lock, irq_flags);
++		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
++		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
++		rdev->fence_drv[ring].initialized = false;
++	}
+ }
+ 
+ 
+@@ -410,14 +470,21 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
+ 	struct drm_device *dev = node->minor->dev;
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct radeon_fence *fence;
+-
+-	seq_printf(m, "Last signaled fence 0x%08X\n",
+-		   radeon_fence_read(rdev));
+-	if (!list_empty(&rdev->fence_drv.emited)) {
+-		   fence = list_entry(rdev->fence_drv.emited.prev,
+-				      struct radeon_fence, list);
+-		   seq_printf(m, "Last emited fence %p with 0x%08X\n",
+-			      fence,  fence->seq);
++	int i;
++
++	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
++		if (!rdev->fence_drv[i].initialized)
++			continue;
++
++		seq_printf(m, "--- ring %d ---\n", i);
++		seq_printf(m, "Last signaled fence 0x%08X\n",
++			   radeon_fence_read(rdev, i));
++		if (!list_empty(&rdev->fence_drv[i].emitted)) {
++			fence = list_entry(rdev->fence_drv[i].emitted.prev,
++					   struct radeon_fence, list);
++			seq_printf(m, "Last emitted fence %p with 0x%08X\n",
++				   fence,  fence->seq);
++		}
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
+index ba7ab79..2a4c592 100644
+--- a/drivers/gpu/drm/radeon/radeon_gart.c
++++ b/drivers/gpu/drm/radeon/radeon_gart.c
+@@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+ 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+ 	for (i = 0; i < pages; i++, p++) {
+ 		if (rdev->gart.pages[p]) {
+-			if (!rdev->gart.ttm_alloced[p])
+-				pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
+-						PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ 			rdev->gart.pages[p] = NULL;
+ 			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
+ 			page_base = rdev->gart.pages_addr[p];
+@@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+ 	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+ 
+ 	for (i = 0; i < pages; i++, p++) {
+-		/* we reverted the patch using dma_addr in TTM for now but this
+-		 * code stops building on alpha so just comment it out for now */
+-		if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
+-			rdev->gart.ttm_alloced[p] = true;
+-			rdev->gart.pages_addr[p] = dma_addr[i];
+-		} else {
+-			/* we need to support large memory configurations */
+-			/* assume that unbind have already been call on the range */
+-			rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
+-							0, PAGE_SIZE,
+-							PCI_DMA_BIDIRECTIONAL);
+-			if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
+-				/* FIXME: failed to map page (return -ENOMEM?) */
+-				radeon_gart_unbind(rdev, offset, pages);
+-				return -ENOMEM;
+-			}
+-		}
++		rdev->gart.pages_addr[p] = dma_addr[i];
+ 		rdev->gart.pages[p] = pagelist[i];
+ 		if (rdev->gart.ptr) {
+ 			page_base = rdev->gart.pages_addr[p];
+@@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev)
+ 		radeon_gart_fini(rdev);
+ 		return -ENOMEM;
+ 	}
+-	rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
+-					 rdev->gart.num_cpu_pages, GFP_KERNEL);
+-	if (rdev->gart.ttm_alloced == NULL) {
+-		radeon_gart_fini(rdev);
+-		return -ENOMEM;
+-	}
+ 	/* set GART entry to point to the dummy page by default */
+ 	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
+ 		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
+@@ -296,10 +271,418 @@ void radeon_gart_fini(struct radeon_device *rdev)
+ 	rdev->gart.ready = false;
+ 	kfree(rdev->gart.pages);
+ 	kfree(rdev->gart.pages_addr);
+-	kfree(rdev->gart.ttm_alloced);
+ 	rdev->gart.pages = NULL;
+ 	rdev->gart.pages_addr = NULL;
+-	rdev->gart.ttm_alloced = NULL;
+ 
+ 	radeon_dummy_page_fini(rdev);
+ }
++
++/*
++ * vm helpers
++ *
++ * TODO bind a default page at vm initialization for default address
++ */
++int radeon_vm_manager_init(struct radeon_device *rdev)
++{
++	int r;
++
++	rdev->vm_manager.enabled = false;
++
++	/* mark first vm as always in use, it's the system one */
++	/* allocate enough for 2 full VM pts */
++	r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
++				      rdev->vm_manager.max_pfn * 8 * 2,
++				      RADEON_GEM_DOMAIN_VRAM);
++	if (r) {
++		dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
++			(rdev->vm_manager.max_pfn * 8) >> 10);
++		return r;
++	}
++
++	r = rdev->vm_manager.funcs->init(rdev);
++	if (r == 0)
++		rdev->vm_manager.enabled = true;
++
++	return r;
++}
++
++/* cs mutex must be lock */
++static void radeon_vm_unbind_locked(struct radeon_device *rdev,
++				    struct radeon_vm *vm)
++{
++	struct radeon_bo_va *bo_va;
++
++	if (vm->id == -1) {
++		return;
++	}
++
++	/* wait for vm use to end */
++	if (vm->fence) {
++		radeon_fence_wait(vm->fence, false);
++		radeon_fence_unref(&vm->fence);
++	}
++
++	/* hw unbind */
++	rdev->vm_manager.funcs->unbind(rdev, vm);
++	rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
++	list_del_init(&vm->list);
++	vm->id = -1;
++	radeon_sa_bo_free(rdev, &vm->sa_bo);
++	vm->pt = NULL;
++
++	list_for_each_entry(bo_va, &vm->va, vm_list) {
++		bo_va->valid = false;
++	}
++}
++
++void radeon_vm_manager_fini(struct radeon_device *rdev)
++{
++	if (rdev->vm_manager.sa_manager.bo == NULL)
++		return;
++	radeon_vm_manager_suspend(rdev);
++	rdev->vm_manager.funcs->fini(rdev);
++	radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
++	rdev->vm_manager.enabled = false;
++}
++
++int radeon_vm_manager_start(struct radeon_device *rdev)
++{
++	if (rdev->vm_manager.sa_manager.bo == NULL) {
++		return -EINVAL;
++	}
++	return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
++}
++
++int radeon_vm_manager_suspend(struct radeon_device *rdev)
++{
++	struct radeon_vm *vm, *tmp;
++
++	radeon_mutex_lock(&rdev->cs_mutex);
++	/* unbind all active vm */
++	list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
++		radeon_vm_unbind_locked(rdev, vm);
++	}
++	rdev->vm_manager.funcs->fini(rdev);
++	radeon_mutex_unlock(&rdev->cs_mutex);
++	return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
++}
++
++/* cs mutex must be lock */
++void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
++{
++	mutex_lock(&vm->mutex);
++	radeon_vm_unbind_locked(rdev, vm);
++	mutex_unlock(&vm->mutex);
++}
++
++/* cs mutex must be lock & vm mutex must be lock */
++int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
++{
++	struct radeon_vm *vm_evict;
++	unsigned i;
++	int id = -1, r;
++
++	if (vm == NULL) {
++		return -EINVAL;
++	}
++
++	if (vm->id != -1) {
++		/* update lru */
++		list_del_init(&vm->list);
++		list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
++		return 0;
++	}
++
++retry:
++	r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
++			     RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
++			     RADEON_GPU_PAGE_SIZE);
++	if (r) {
++		if (list_empty(&rdev->vm_manager.lru_vm)) {
++			return r;
++		}
++		vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
++		radeon_vm_unbind(rdev, vm_evict);
++		goto retry;
++	}
++	vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
++	vm->pt += (vm->sa_bo.offset >> 3);
++	vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
++	vm->pt_gpu_addr += vm->sa_bo.offset;
++	memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
++
++retry_id:
++	/* search for free vm */
++	for (i = 0; i < rdev->vm_manager.nvm; i++) {
++		if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
++			id = i;
++			break;
++		}
++	}
++	/* evict vm if necessary */
++	if (id == -1) {
++		vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
++		radeon_vm_unbind(rdev, vm_evict);
++		goto retry_id;
++	}
++
++	/* do hw bind */
++	r = rdev->vm_manager.funcs->bind(rdev, vm, id);
++	if (r) {
++		radeon_sa_bo_free(rdev, &vm->sa_bo);
++		return r;
++	}
++	rdev->vm_manager.use_bitmap |= 1 << id;
++	vm->id = id;
++	list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
++	return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
++				       &rdev->ib_pool.sa_manager.bo->tbo.mem);
++}
++
++/* object have to be reserved */
++int radeon_vm_bo_add(struct radeon_device *rdev,
++		     struct radeon_vm *vm,
++		     struct radeon_bo *bo,
++		     uint64_t offset,
++		     uint32_t flags)
++{
++	struct radeon_bo_va *bo_va, *tmp;
++	struct list_head *head;
++	uint64_t size = radeon_bo_size(bo), last_offset = 0;
++	unsigned last_pfn;
++
++	bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
++	if (bo_va == NULL) {
++		return -ENOMEM;
++	}
++	bo_va->vm = vm;
++	bo_va->bo = bo;
++	bo_va->soffset = offset;
++	bo_va->eoffset = offset + size;
++	bo_va->flags = flags;
++	bo_va->valid = false;
++	INIT_LIST_HEAD(&bo_va->bo_list);
++	INIT_LIST_HEAD(&bo_va->vm_list);
++	/* make sure object fit at this offset */
++	if (bo_va->soffset >= bo_va->eoffset) {
++		kfree(bo_va);
++		return -EINVAL;
++	}
++
++	last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
++	if (last_pfn > rdev->vm_manager.max_pfn) {
++		kfree(bo_va);
++		dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
++			last_pfn, rdev->vm_manager.max_pfn);
++		return -EINVAL;
++	}
++
++	mutex_lock(&vm->mutex);
++	if (last_pfn > vm->last_pfn) {
++		/* release mutex and lock in right order */
++		mutex_unlock(&vm->mutex);
++		radeon_mutex_lock(&rdev->cs_mutex);
++		mutex_lock(&vm->mutex);
++		/* and check again */
++		if (last_pfn > vm->last_pfn) {
++			/* grow va space 32M by 32M */
++			unsigned align = ((32 << 20) >> 12) - 1;
++			radeon_vm_unbind_locked(rdev, vm);
++			vm->last_pfn = (last_pfn + align) & ~align;
++		}
++		radeon_mutex_unlock(&rdev->cs_mutex);
++	}
++	head = &vm->va;
++	last_offset = 0;
++	list_for_each_entry(tmp, &vm->va, vm_list) {
++		if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
++			/* bo can be added before this one */
++			break;
++		}
++		if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
++			/* bo and tmp overlap, invalid offset */
++			dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
++				bo, (unsigned)bo_va->soffset, tmp->bo,
++				(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
++			kfree(bo_va);
++			mutex_unlock(&vm->mutex);
++			return -EINVAL;
++		}
++		last_offset = tmp->eoffset;
++		head = &tmp->vm_list;
++	}
++	list_add(&bo_va->vm_list, head);
++	list_add_tail(&bo_va->bo_list, &bo->va);
++	mutex_unlock(&vm->mutex);
++	return 0;
++}
++
++static u64 radeon_vm_get_addr(struct radeon_device *rdev,
++			      struct ttm_mem_reg *mem,
++			      unsigned pfn)
++{
++	u64 addr = 0;
++
++	switch (mem->mem_type) {
++	case TTM_PL_VRAM:
++		addr = (mem->start << PAGE_SHIFT);
++		addr += pfn * RADEON_GPU_PAGE_SIZE;
++		addr += rdev->vm_manager.vram_base_offset;
++		break;
++	case TTM_PL_TT:
++		/* offset inside page table */
++		addr = mem->start << PAGE_SHIFT;
++		addr += pfn * RADEON_GPU_PAGE_SIZE;
++		addr = addr >> PAGE_SHIFT;
++		/* page table offset */
++		addr = rdev->gart.pages_addr[addr];
++		/* in case cpu page size != gpu page size*/
++		addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
++		break;
++	default:
++		break;
++	}
++	return addr;
++}
++
++/* object have to be reserved & cs mutex took & vm mutex took */
++int radeon_vm_bo_update_pte(struct radeon_device *rdev,
++			    struct radeon_vm *vm,
++			    struct radeon_bo *bo,
++			    struct ttm_mem_reg *mem)
++{
++	struct radeon_bo_va *bo_va;
++	unsigned ngpu_pages, i;
++	uint64_t addr = 0, pfn;
++	uint32_t flags;
++
++	/* nothing to do if vm isn't bound */
++	if (vm->id == -1)
++		return 0;;
++
++	bo_va = radeon_bo_va(bo, vm);
++	if (bo_va == NULL) {
++		dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
++		return -EINVAL;
++	}
++
++	if (bo_va->valid)
++		return 0;
++
++	ngpu_pages = radeon_bo_ngpu_pages(bo);
++	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
++	bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
++	if (mem) {
++		if (mem->mem_type != TTM_PL_SYSTEM) {
++			bo_va->flags |= RADEON_VM_PAGE_VALID;
++			bo_va->valid = true;
++		}
++		if (mem->mem_type == TTM_PL_TT) {
++			bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
++		}
++	}
++	pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
++	flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
++	for (i = 0, addr = 0; i < ngpu_pages; i++) {
++		if (mem && bo_va->valid) {
++			addr = radeon_vm_get_addr(rdev, mem, i);
++		}
++		rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
++	}
++	rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
++	return 0;
++}
++
++/* object have to be reserved */
++int radeon_vm_bo_rmv(struct radeon_device *rdev,
++		     struct radeon_vm *vm,
++		     struct radeon_bo *bo)
++{
++	struct radeon_bo_va *bo_va;
++
++	bo_va = radeon_bo_va(bo, vm);
++	if (bo_va == NULL)
++		return 0;
++
++	radeon_mutex_lock(&rdev->cs_mutex);
++	mutex_lock(&vm->mutex);
++	radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
++	radeon_mutex_unlock(&rdev->cs_mutex);
++	list_del(&bo_va->vm_list);
++	mutex_unlock(&vm->mutex);
++	list_del(&bo_va->bo_list);
++
++	kfree(bo_va);
++	return 0;
++}
++
++void radeon_vm_bo_invalidate(struct radeon_device *rdev,
++			     struct radeon_bo *bo)
++{
++	struct radeon_bo_va *bo_va;
++
++	BUG_ON(!atomic_read(&bo->tbo.reserved));
++	list_for_each_entry(bo_va, &bo->va, bo_list) {
++		bo_va->valid = false;
++	}
++}
++
++int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
++{
++	int r;
++
++	vm->id = -1;
++	vm->fence = NULL;
++	mutex_init(&vm->mutex);
++	INIT_LIST_HEAD(&vm->list);
++	INIT_LIST_HEAD(&vm->va);
++	/* SI requires equal sized PTs for all VMs, so always set
++	 * last_pfn to max_pfn.  cayman allows variable sized
++	 * pts so we can grow then as needed.  Once we switch
++	 * to two level pts we can unify this again.
++	 */
++	if (rdev->family >= CHIP_TAHITI)
++		vm->last_pfn = rdev->vm_manager.max_pfn;
++	else
++		vm->last_pfn = 0;
++	/* map the ib pool buffer at 0 in virtual address space, set
++	 * read only
++	 */
++	r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
++			     RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
++	return r;
++}
++
++void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
++{
++	struct radeon_bo_va *bo_va, *tmp;
++	int r;
++
++	radeon_mutex_lock(&rdev->cs_mutex);
++	mutex_lock(&vm->mutex);
++	radeon_vm_unbind_locked(rdev, vm);
++	radeon_mutex_unlock(&rdev->cs_mutex);
++
++	/* remove all bo */
++	r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
++	if (!r) {
++		bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
++		list_del_init(&bo_va->bo_list);
++		list_del_init(&bo_va->vm_list);
++		radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
++		kfree(bo_va);
++	}
++	if (!list_empty(&vm->va)) {
++		dev_err(rdev->dev, "still active bo inside vm\n");
++	}
++	list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
++		list_del_init(&bo_va->vm_list);
++		r = radeon_bo_reserve(bo_va->bo, false);
++		if (!r) {
++			list_del_init(&bo_va->bo_list);
++			radeon_bo_unreserve(bo_va->bo);
++			kfree(bo_va);
++		}
++	}
++	mutex_unlock(&vm->mutex);
++}
+diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
+index aa1ca2d..c7008b5 100644
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -75,32 +75,6 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
+ 	return 0;
+ }
+ 
+-int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+-			  uint64_t *gpu_addr)
+-{
+-	struct radeon_bo *robj = gem_to_radeon_bo(obj);
+-	int r;
+-
+-	r = radeon_bo_reserve(robj, false);
+-	if (unlikely(r != 0))
+-		return r;
+-	r = radeon_bo_pin(robj, pin_domain, gpu_addr);
+-	radeon_bo_unreserve(robj);
+-	return r;
+-}
+-
+-void radeon_gem_object_unpin(struct drm_gem_object *obj)
+-{
+-	struct radeon_bo *robj = gem_to_radeon_bo(obj);
+-	int r;
+-
+-	r = radeon_bo_reserve(robj, false);
+-	if (likely(r == 0)) {
+-		radeon_bo_unpin(robj);
+-		radeon_bo_unreserve(robj);
+-	}
+-}
+-
+ int radeon_gem_set_domain(struct drm_gem_object *gobj,
+ 			  uint32_t rdomain, uint32_t wdomain)
+ {
+@@ -142,6 +116,44 @@ void radeon_gem_fini(struct radeon_device *rdev)
+ 	radeon_bo_force_delete(rdev);
+ }
+ 
++/*
++ * Call from drm_gem_handle_create which appear in both new and open ioctl
++ * case.
++ */
++int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
++{
++	return 0;
++}
++
++void radeon_gem_object_close(struct drm_gem_object *obj,
++			     struct drm_file *file_priv)
++{
++	struct radeon_bo *rbo = gem_to_radeon_bo(obj);
++	struct radeon_device *rdev = rbo->rdev;
++	struct radeon_fpriv *fpriv = file_priv->driver_priv;
++	struct radeon_vm *vm = &fpriv->vm;
++	struct radeon_bo_va *bo_va, *tmp;
++
++	if (rdev->family < CHIP_CAYMAN) {
++		return;
++	}
++
++	if (radeon_bo_reserve(rbo, false)) {
++		return;
++	}
++	list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
++		if (bo_va->vm == vm) {
++			/* remove from this vm address space */
++			mutex_lock(&vm->mutex);
++			list_del(&bo_va->vm_list);
++			mutex_unlock(&vm->mutex);
++			list_del(&bo_va->bo_list);
++			kfree(bo_va);
++		}
++	}
++	radeon_bo_unreserve(rbo);
++}
++
+ 
+ /*
+  * GEM ioctls.
+@@ -152,6 +164,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_radeon_gem_info *args = data;
+ 	struct ttm_mem_type_manager *man;
++	unsigned i;
+ 
+ 	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+ 
+@@ -160,8 +173,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+ 	if (rdev->stollen_vga_memory)
+ 		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+ 	args->vram_visible -= radeon_fbdev_total_size(rdev);
+-	args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
+-		RADEON_IB_POOL_SIZE*64*1024;
++	args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
++	for(i = 0; i < RADEON_NUM_RINGS; ++i)
++		args->gart_size -= rdev->ring[i].ring_size;
+ 	return 0;
+ }
+ 
+@@ -352,6 +366,109 @@ out:
+ 	return r;
+ }
+ 
++int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
++			  struct drm_file *filp)
++{
++	struct drm_radeon_gem_va *args = data;
++	struct drm_gem_object *gobj;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_fpriv *fpriv = filp->driver_priv;
++	struct radeon_bo *rbo;
++	struct radeon_bo_va *bo_va;
++	u32 invalid_flags;
++	int r = 0;
++
++	if (!rdev->vm_manager.enabled) {
++		args->operation = RADEON_VA_RESULT_ERROR;
++		return -ENOTTY;
++	}
++
++	/* !! DONT REMOVE !!
++	 * We don't support vm_id yet, to be sure we don't have have broken
++	 * userspace, reject anyone trying to use non 0 value thus moving
++	 * forward we can use those fields without breaking existant userspace
++	 */
++	if (args->vm_id) {
++		args->operation = RADEON_VA_RESULT_ERROR;
++		return -EINVAL;
++	}
++
++	if (args->offset < RADEON_VA_RESERVED_SIZE) {
++		dev_err(&dev->pdev->dev,
++			"offset 0x%lX is in reserved area 0x%X\n",
++			(unsigned long)args->offset,
++			RADEON_VA_RESERVED_SIZE);
++		args->operation = RADEON_VA_RESULT_ERROR;
++		return -EINVAL;
++	}
++
++	/* don't remove, we need to enforce userspace to set the snooped flag
++	 * otherwise we will endup with broken userspace and we won't be able
++	 * to enable this feature without adding new interface
++	 */
++	invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
++	if ((args->flags & invalid_flags)) {
++		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
++			args->flags, invalid_flags);
++		args->operation = RADEON_VA_RESULT_ERROR;
++		return -EINVAL;
++	}
++	if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
++		dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
++		args->operation = RADEON_VA_RESULT_ERROR;
++		return -EINVAL;
++	}
++
++	switch (args->operation) {
++	case RADEON_VA_MAP:
++	case RADEON_VA_UNMAP:
++		break;
++	default:
++		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
++			args->operation);
++		args->operation = RADEON_VA_RESULT_ERROR;
++		return -EINVAL;
++	}
++
++	gobj = drm_gem_object_lookup(dev, filp, args->handle);
++	if (gobj == NULL) {
++		args->operation = RADEON_VA_RESULT_ERROR;
++		return -ENOENT;
++	}
++	rbo = gem_to_radeon_bo(gobj);
++	r = radeon_bo_reserve(rbo, false);
++	if (r) {
++		args->operation = RADEON_VA_RESULT_ERROR;
++		drm_gem_object_unreference_unlocked(gobj);
++		return r;
++	}
++	switch (args->operation) {
++	case RADEON_VA_MAP:
++		bo_va = radeon_bo_va(rbo, &fpriv->vm);
++		if (bo_va) {
++			args->operation = RADEON_VA_RESULT_VA_EXIST;
++			args->offset = bo_va->soffset;
++			goto out;
++		}
++		r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
++				     args->offset, args->flags);
++		break;
++	case RADEON_VA_UNMAP:
++		r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
++		break;
++	default:
++		break;
++	}
++	args->operation = RADEON_VA_RESULT_OK;
++	if (r) {
++		args->operation = RADEON_VA_RESULT_ERROR;
++	}
++out:
++	radeon_bo_unreserve(rbo);
++	drm_gem_object_unreference_unlocked(gobj);
++	return r;
++}
++
+ int radeon_mode_dumb_create(struct drm_file *file_priv,
+ 			    struct drm_device *dev,
+ 			    struct drm_mode_create_dumb *args)
+diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
+index cf20351..6076e85 100644
+--- a/drivers/gpu/drm/radeon/radeon_i2c.c
++++ b/drivers/gpu/drm/radeon/radeon_i2c.c
+@@ -26,10 +26,15 @@
+ #include <linux/export.h>
+ 
+ #include "drmP.h"
++#include "drm_edid.h"
+ #include "radeon_drm.h"
+ #include "radeon.h"
+ #include "atom.h"
+ 
++extern int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
++				   struct i2c_msg *msgs, int num);
++extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
++
+ /**
+  * radeon_ddc_probe
+  *
+@@ -41,13 +46,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
+ 	int ret;
+ 	struct i2c_msg msgs[] = {
+ 		{
+-			.addr = 0x50,
++			.addr = DDC_ADDR,
+ 			.flags = 0,
+ 			.len = 1,
+ 			.buf = &out,
+ 		},
+ 		{
+-			.addr = 0x50,
++			.addr = DDC_ADDR,
+ 			.flags = I2C_M_RD,
+ 			.len = 8,
+ 			.buf = buf,
+@@ -888,6 +893,11 @@ static const struct i2c_algorithm radeon_i2c_algo = {
+ 	.functionality = radeon_hw_i2c_func,
+ };
+ 
++static const struct i2c_algorithm radeon_atom_i2c_algo = {
++	.master_xfer = radeon_atom_hw_i2c_xfer,
++	.functionality = radeon_atom_hw_i2c_func,
++};
++
+ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ 					  struct radeon_i2c_bus_rec *rec,
+ 					  const char *name)
+@@ -907,6 +917,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ 	i2c->rec = *rec;
+ 	i2c->adapter.owner = THIS_MODULE;
+ 	i2c->adapter.class = I2C_CLASS_DDC;
++	i2c->adapter.dev.parent = &dev->pdev->dev;
+ 	i2c->dev = dev;
+ 	i2c_set_adapdata(&i2c->adapter, i2c);
+ 	if (rec->mm_i2c ||
+@@ -923,6 +934,18 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ 			DRM_ERROR("Failed to register hw i2c %s\n", name);
+ 			goto out_free;
+ 		}
++	} else if (rec->hw_capable &&
++		   radeon_hw_i2c &&
++		   ASIC_IS_DCE3(rdev)) {
++		/* hw i2c using atom */
++		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
++			 "Radeon i2c hw bus %s", name);
++		i2c->adapter.algo = &radeon_atom_i2c_algo;
++		ret = i2c_add_adapter(&i2c->adapter);
++		if (ret) {
++			DRM_ERROR("Failed to register hw i2c %s\n", name);
++			goto out_free;
++		}
+ 	} else {
+ 		/* set the radeon bit adapter */
+ 		snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+@@ -934,10 +957,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ 		i2c->algo.bit.setscl = set_clock;
+ 		i2c->algo.bit.getsda = get_data;
+ 		i2c->algo.bit.getscl = get_clock;
+-		i2c->algo.bit.udelay = 20;
+-		/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
+-		 * make this, 2 jiffies is a lot more reliable */
+-		i2c->algo.bit.timeout = 2;
++		i2c->algo.bit.udelay = 10;
++		i2c->algo.bit.timeout = usecs_to_jiffies(2200);	/* from VESA */
+ 		i2c->algo.bit.data = i2c;
+ 		ret = i2c_bit_add_bus(&i2c->adapter);
+ 		if (ret) {
+@@ -967,6 +988,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+ 	i2c->rec = *rec;
+ 	i2c->adapter.owner = THIS_MODULE;
+ 	i2c->adapter.class = I2C_CLASS_DDC;
++	i2c->adapter.dev.parent = &dev->pdev->dev;
+ 	i2c->dev = dev;
+ 	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
+ 		 "Radeon aux bus %s", name);
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index 4f9496e..645dcbf 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -65,7 +65,8 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
+ 	unsigned i;
+ 
+ 	/* Disable *all* interrupts */
+-	rdev->irq.sw_int = false;
++	for (i = 0; i < RADEON_NUM_RINGS; i++)
++		rdev->irq.sw_int[i] = false;
+ 	rdev->irq.gui_idle = false;
+ 	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
+ 		rdev->irq.hpd[i] = false;
+@@ -81,9 +82,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
+ int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
+ {
+ 	struct radeon_device *rdev = dev->dev_private;
++	unsigned i;
+ 
+ 	dev->max_vblank_count = 0x001fffff;
+-	rdev->irq.sw_int = true;
++	for (i = 0; i < RADEON_NUM_RINGS; i++)
++		rdev->irq.sw_int[i] = true;
+ 	radeon_irq_set(rdev);
+ 	return 0;
+ }
+@@ -97,7 +100,8 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
+ 		return;
+ 	}
+ 	/* Disable *all* interrupts */
+-	rdev->irq.sw_int = false;
++	for (i = 0; i < RADEON_NUM_RINGS; i++)
++		rdev->irq.sw_int[i] = false;
+ 	rdev->irq.gui_idle = false;
+ 	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
+ 		rdev->irq.hpd[i] = false;
+@@ -216,26 +220,26 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
+ 	flush_work_sync(&rdev->hotplug_work);
+ }
+ 
+-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
++void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
+ {
+ 	unsigned long irqflags;
+ 
+ 	spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+-	if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
+-		rdev->irq.sw_int = true;
++	if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
++		rdev->irq.sw_int[ring] = true;
+ 		radeon_irq_set(rdev);
+ 	}
+ 	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+ }
+ 
+-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
++void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
+ {
+ 	unsigned long irqflags;
+ 
+ 	spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+-	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
+-	if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
+-		rdev->irq.sw_int = false;
++	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
++	if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
++		rdev->irq.sw_int[ring] = false;
+ 		radeon_irq_set(rdev);
+ 	}
+ 	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index be2c122..3c2628b 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -57,6 +57,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
+ 	}
+ 	dev->dev_private = (void *)rdev;
+ 
++	pci_set_master(dev->pdev);
++
+ 	/* update BUS flag */
+ 	if (drm_pci_device_is_agp(dev)) {
+ 		flags |= RADEON_IS_AGP;
+@@ -169,7 +171,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		value = rdev->accel_working;
+ 		break;
+ 	case RADEON_INFO_TILING_CONFIG:
+-		if (rdev->family >= CHIP_CAYMAN)
++		if (rdev->family >= CHIP_TAHITI)
++			value = rdev->config.si.tile_config;
++		else if (rdev->family >= CHIP_CAYMAN)
+ 			value = rdev->config.cayman.tile_config;
+ 		else if (rdev->family >= CHIP_CEDAR)
+ 			value = rdev->config.evergreen.tile_config;
+@@ -208,7 +212,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		value = rdev->clock.spll.reference_freq * 10;
+ 		break;
+ 	case RADEON_INFO_NUM_BACKENDS:
+-		if (rdev->family >= CHIP_CAYMAN)
++		if (rdev->family >= CHIP_TAHITI)
++			value = rdev->config.si.max_backends_per_se *
++				rdev->config.si.max_shader_engines;
++		else if (rdev->family >= CHIP_CAYMAN)
+ 			value = rdev->config.cayman.max_backends_per_se *
+ 				rdev->config.cayman.max_shader_engines;
+ 		else if (rdev->family >= CHIP_CEDAR)
+@@ -222,7 +229,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		}
+ 		break;
+ 	case RADEON_INFO_NUM_TILE_PIPES:
+-		if (rdev->family >= CHIP_CAYMAN)
++		if (rdev->family >= CHIP_TAHITI)
++			value = rdev->config.si.max_tile_pipes;
++		else if (rdev->family >= CHIP_CAYMAN)
+ 			value = rdev->config.cayman.max_tile_pipes;
+ 		else if (rdev->family >= CHIP_CEDAR)
+ 			value = rdev->config.evergreen.max_tile_pipes;
+@@ -238,7 +247,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 		value = 1;
+ 		break;
+ 	case RADEON_INFO_BACKEND_MAP:
+-		if (rdev->family >= CHIP_CAYMAN)
++		if (rdev->family >= CHIP_TAHITI)
++			value = rdev->config.si.backend_map;
++		else if (rdev->family >= CHIP_CAYMAN)
+ 			value = rdev->config.cayman.backend_map;
+ 		else if (rdev->family >= CHIP_CEDAR)
+ 			value = rdev->config.evergreen.backend_map;
+@@ -250,6 +261,33 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 			return -EINVAL;
+ 		}
+ 		break;
++	case RADEON_INFO_VA_START:
++		/* this is where we report if vm is supported or not */
++		if (rdev->family < CHIP_CAYMAN)
++			return -EINVAL;
++		value = RADEON_VA_RESERVED_SIZE;
++		break;
++	case RADEON_INFO_IB_VM_MAX_SIZE:
++		/* this is where we report if vm is supported or not */
++		if (rdev->family < CHIP_CAYMAN)
++			return -EINVAL;
++		value = RADEON_IB_VM_MAX_SIZE;
++		break;
++	case RADEON_INFO_MAX_PIPES:
++		if (rdev->family >= CHIP_TAHITI)
++			value = rdev->config.si.max_pipes_per_simd;
++		else if (rdev->family >= CHIP_CAYMAN)
++			value = rdev->config.cayman.max_pipes_per_simd;
++		else if (rdev->family >= CHIP_CEDAR)
++			value = rdev->config.evergreen.max_pipes;
++		else if (rdev->family >= CHIP_RV770)
++			value = rdev->config.rv770.max_pipes;
++		else if (rdev->family >= CHIP_R600)
++			value = rdev->config.r600.max_pipes;
++		else {
++			return -EINVAL;
++		}
++		break;
+ 	default:
+ 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
+ 		return -EINVAL;
+@@ -270,7 +308,6 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
+ 	return 0;
+ }
+ 
+-
+ void radeon_driver_lastclose_kms(struct drm_device *dev)
+ {
+ 	vga_switcheroo_process_delayed_switch();
+@@ -278,12 +315,45 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
+ 
+ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
+ {
++	struct radeon_device *rdev = dev->dev_private;
++
++	file_priv->driver_priv = NULL;
++
++	/* new gpu have virtual address space support */
++	if (rdev->family >= CHIP_CAYMAN) {
++		struct radeon_fpriv *fpriv;
++		int r;
++
++		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
++		if (unlikely(!fpriv)) {
++			return -ENOMEM;
++		}
++
++		r = radeon_vm_init(rdev, &fpriv->vm);
++		if (r) {
++			radeon_vm_fini(rdev, &fpriv->vm);
++			kfree(fpriv);
++			return r;
++		}
++
++		file_priv->driver_priv = fpriv;
++	}
+ 	return 0;
+ }
+ 
+ void radeon_driver_postclose_kms(struct drm_device *dev,
+ 				 struct drm_file *file_priv)
+ {
++	struct radeon_device *rdev = dev->dev_private;
++
++	/* new gpu have virtual address space support */
++	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
++		struct radeon_fpriv *fpriv = file_priv->driver_priv;
++
++		radeon_vm_fini(rdev, &fpriv->vm);
++		kfree(fpriv);
++		file_priv->driver_priv = NULL;
++	}
+ }
+ 
+ void radeon_driver_preclose_kms(struct drm_device *dev,
+@@ -451,5 +521,6 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
+ 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ 	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
++	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ };
+ int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+index daadf21..9760e5a 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+@@ -419,7 +419,9 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ 	r = radeon_bo_reserve(rbo, false);
+ 	if (unlikely(r != 0))
+ 		return r;
+-	r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
++	/* Only 27 bit offset for legacy CRTC */
++	r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM, 1 << 27,
++				     &base);
+ 	if (unlikely(r != 0)) {
+ 		radeon_bo_unreserve(rbo);
+ 		return -EINVAL;
+@@ -437,7 +439,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
+ 
+ 	crtc_offset_cntl = 0;
+ 
+-	pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
++	pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
+ 	crtc_pitch  = (((pitch_pixels * target_fb->bits_per_pixel) +
+ 			((target_fb->bits_per_pixel * 8) - 1)) /
+ 		       (target_fb->bits_per_pixel * 8));
+@@ -1023,9 +1025,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
+ 
+ static void radeon_crtc_prepare(struct drm_crtc *crtc)
+ {
++	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_crtc *crtci;
+ 
++	radeon_crtc->in_mode_set = true;
+ 	/*
+ 	* The hardware wedges sometimes if you reconfigure one CRTC
+ 	* whilst another is running (see fdo bug #24611).
+@@ -1036,6 +1040,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc)
+ 
+ static void radeon_crtc_commit(struct drm_crtc *crtc)
+ {
++	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_crtc *crtci;
+ 
+@@ -1046,6 +1051,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
+ 		if (crtci->enabled)
+ 			radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+ 	}
++	radeon_crtc->in_mode_set = false;
+ }
+ 
+ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 8165953..53b07a3 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -88,7 +88,7 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
+ 		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+ 		lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
+ 		WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
+-		udelay(1000);
++		mdelay(1);
+ 
+ 		lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
+ 		lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
+@@ -101,7 +101,7 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
+ 				  (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
+ 		if (is_mac)
+ 			lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
+-		udelay(panel_pwr_delay * 1000);
++		mdelay(panel_pwr_delay);
+ 		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ 		break;
+ 	case DRM_MODE_DPMS_STANDBY:
+@@ -118,10 +118,10 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
+ 			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ 			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
+ 		}
+-		udelay(panel_pwr_delay * 1000);
++		mdelay(panel_pwr_delay);
+ 		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ 		WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+-		udelay(panel_pwr_delay * 1000);
++		mdelay(panel_pwr_delay);
+ 		break;
+ 	}
+ 
+@@ -617,6 +617,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
+ 	enum drm_connector_status found = connector_status_disconnected;
+ 	bool color = true;
+ 
++	/* just don't bother on RN50 those chip are often connected to remoting
++	 * console hw and often we get failure to load detect those. So to make
++	 * everyone happy report the encoder as always connected.
++	 */
++	if (ASIC_IS_RN50(rdev)) {
++		return connector_status_connected;
++	}
++
+ 	/* save the regs we need */
+ 	vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
+ 	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
+@@ -657,7 +665,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
+ 
+ 	WREG32(RADEON_DAC_MACRO_CNTL, tmp);
+ 
+-	udelay(2000);
++	mdelay(2);
+ 
+ 	if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
+ 		found = connector_status_connected;
+@@ -1496,7 +1504,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
+ 	tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
+ 	WREG32(RADEON_DAC_CNTL2, tmp);
+ 
+-	udelay(10000);
++	mdelay(10);
+ 
+ 	if (ASIC_IS_R300(rdev)) {
+ 		if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
+diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
+index bb42df4..dabfefd 100644
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -210,8 +210,6 @@ enum radeon_connector_table {
+ 	CT_RN50_POWER,
+ 	CT_MAC_X800,
+ 	CT_MAC_G5_9600,
+-	CT_SAM440EP,
+-	CT_MAC_G4_SILVER
+ };
+ 
+ enum radeon_dvo_chip {
+@@ -268,6 +266,7 @@ struct radeon_crtc {
+ 	u16 lut_r[256], lut_g[256], lut_b[256];
+ 	bool enabled;
+ 	bool can_tile;
++	bool in_mode_set;
+ 	uint32_t crtc_offset;
+ 	struct drm_gem_object *cursor_bo;
+ 	uint64_t cursor_addr;
+@@ -469,6 +468,10 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
+ 
+ extern struct drm_connector *
+ radeon_get_connector_for_encoder(struct drm_encoder *encoder);
++extern struct drm_connector *
++radeon_get_connector_for_encoder_init(struct drm_encoder *encoder);
++extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
++				    u32 pixel_clock);
+ 
+ extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
+ extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
+@@ -489,6 +492,7 @@ extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+ 				    struct drm_connector *connector);
+ extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
+ extern void radeon_atom_encoder_init(struct radeon_device *rdev);
++extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
+ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+ 					   int action, uint8_t lane_num,
+ 					   uint8_t lane_set);
+@@ -646,9 +650,9 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ 				     u16 blue, int regno);
+ extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ 				     u16 *blue, int regno);
+-void radeon_framebuffer_init(struct drm_device *dev,
++int radeon_framebuffer_init(struct drm_device *dev,
+ 			     struct radeon_framebuffer *rfb,
+-			     struct drm_mode_fb_cmd *mode_cmd,
++			     struct drm_mode_fb_cmd2 *mode_cmd,
+ 			     struct drm_gem_object *obj);
+ 
+ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index f3ae607..df6a4db 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -46,6 +46,20 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
+  * function are calling it.
+  */
+ 
++void radeon_bo_clear_va(struct radeon_bo *bo)
++{
++	struct radeon_bo_va *bo_va, *tmp;
++
++	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
++		/* remove from all vm address space */
++		mutex_lock(&bo_va->vm->mutex);
++		list_del(&bo_va->vm_list);
++		mutex_unlock(&bo_va->vm->mutex);
++		list_del(&bo_va->bo_list);
++		kfree(bo_va);
++	}
++}
++
+ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ {
+ 	struct radeon_bo *bo;
+@@ -55,6 +69,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ 	list_del_init(&bo->list);
+ 	mutex_unlock(&bo->rdev->gem.mutex);
+ 	radeon_bo_clear_surface_reg(bo);
++	radeon_bo_clear_va(bo);
+ 	drm_gem_object_release(&bo->gem_base);
+ 	kfree(bo);
+ }
+@@ -95,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev,
+ 	enum ttm_bo_type type;
+ 	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ 	unsigned long max_size = 0;
++	size_t acc_size;
+ 	int r;
+ 
+ 	size = ALIGN(size, PAGE_SIZE);
+@@ -117,6 +133,9 @@ int radeon_bo_create(struct radeon_device *rdev,
+ 		return -ENOMEM;
+ 	}
+ 
++	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
++				       sizeof(struct radeon_bo));
++
+ retry:
+ 	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ 	if (bo == NULL)
+@@ -130,12 +149,13 @@ retry:
+ 	bo->gem_base.driver_private = NULL;
+ 	bo->surface_reg = -1;
+ 	INIT_LIST_HEAD(&bo->list);
++	INIT_LIST_HEAD(&bo->va);
+ 	radeon_ttm_placement_from_domain(bo, domain);
+ 	/* Kernel allocation are uninterruptible */
+ 	mutex_lock(&rdev->vram_mutex);
+ 	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
+-			&bo->placement, page_align, 0, !kernel, NULL, size,
+-			&radeon_ttm_bo_destroy);
++			&bo->placement, page_align, 0, !kernel, NULL,
++			acc_size, &radeon_ttm_bo_destroy);
+ 	mutex_unlock(&rdev->vram_mutex);
+ 	if (unlikely(r != 0)) {
+ 		if (r != -ERESTARTSYS) {
+@@ -213,7 +233,18 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
+ 		bo->pin_count++;
+ 		if (gpu_addr)
+ 			*gpu_addr = radeon_bo_gpu_offset(bo);
+-		WARN_ON_ONCE(max_offset != 0);
++
++		if (max_offset != 0) {
++			u64 domain_start;
++
++			if (domain == RADEON_GEM_DOMAIN_VRAM)
++				domain_start = bo->rdev->mc.vram_start;
++			else
++				domain_start = bo->rdev->mc.gtt_start;
++			WARN_ON_ONCE(max_offset <
++				     (radeon_bo_gpu_offset(bo) - domain_start));
++		}
++
+ 		return 0;
+ 	}
+ 	radeon_ttm_placement_from_domain(bo, domain);
+@@ -441,8 +472,54 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
+ int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+ 				uint32_t tiling_flags, uint32_t pitch)
+ {
++	struct radeon_device *rdev = bo->rdev;
+ 	int r;
+ 
++	if (rdev->family >= CHIP_CEDAR) {
++		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
++
++		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
++		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
++		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
++		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
++		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
++		switch (bankw) {
++		case 0:
++		case 1:
++		case 2:
++		case 4:
++		case 8:
++			break;
++		default:
++			return -EINVAL;
++		}
++		switch (bankh) {
++		case 0:
++		case 1:
++		case 2:
++		case 4:
++		case 8:
++			break;
++		default:
++			return -EINVAL;
++		}
++		switch (mtaspect) {
++		case 0:
++		case 1:
++		case 2:
++		case 4:
++		case 8:
++			break;
++		default:
++			return -EINVAL;
++		}
++		if (tilesplit > 6) {
++			return -EINVAL;
++		}
++		if (stilesplit > 6) {
++			return -EINVAL;
++		}
++	}
+ 	r = radeon_bo_reserve(bo, false);
+ 	if (unlikely(r != 0))
+ 		return r;
+@@ -499,6 +576,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+ 		return;
+ 	rbo = container_of(bo, struct radeon_bo, tbo);
+ 	radeon_bo_check_tiling(rbo, 0, 1);
++	radeon_vm_bo_invalidate(rbo->rdev, rbo);
+ }
+ 
+ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+@@ -572,3 +650,16 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+ 	}
+ 	return 0;
+ }
++
++/* object have to be reserved */
++struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
++{
++	struct radeon_bo_va *bo_va;
++
++	list_for_each_entry(bo_va, &rbo->va, bo_list) {
++		if (bo_va->vm == vm) {
++			return bo_va;
++		}
++	}
++	return NULL;
++}
+diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
+index fb3f433..f9104be 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.h
++++ b/drivers/gpu/drm/radeon/radeon_object.h
+@@ -83,6 +83,16 @@ static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+ 	return !!atomic_read(&bo->tbo.reserved);
+ }
+ 
++static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
++{
++	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
++}
++
++static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
++{
++	return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
++}
++
+ /**
+  * radeon_bo_mmap_offset - return mmap offset of bo
+  * @bo:	radeon object for which we query the offset
+@@ -130,4 +140,26 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+ 					struct ttm_mem_reg *mem);
+ extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+ extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
++extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
++					 struct radeon_vm *vm);
++
++/*
++ * sub allocation
++ */
++extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
++				     struct radeon_sa_manager *sa_manager,
++				     unsigned size, u32 domain);
++extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
++				      struct radeon_sa_manager *sa_manager);
++extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
++				      struct radeon_sa_manager *sa_manager);
++extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
++					struct radeon_sa_manager *sa_manager);
++extern int radeon_sa_bo_new(struct radeon_device *rdev,
++			    struct radeon_sa_manager *sa_manager,
++			    struct radeon_sa_bo *sa_bo,
++			    unsigned size, unsigned align);
++extern void radeon_sa_bo_free(struct radeon_device *rdev,
++			      struct radeon_sa_bo *sa_bo);
++
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index ebd6c51..b8459bd 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -221,7 +221,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
+ 		}
+ 
+ 		/* set memory clock */
+-		if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
++		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
+ 			radeon_pm_debug_check_in_vbl(rdev, false);
+ 			radeon_set_memory_clock(rdev, mclk);
+ 			radeon_pm_debug_check_in_vbl(rdev, true);
+@@ -252,7 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
+ 
+ 	mutex_lock(&rdev->ddev->struct_mutex);
+ 	mutex_lock(&rdev->vram_mutex);
+-	mutex_lock(&rdev->cp.mutex);
++	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
++		if (rdev->ring[i].ring_obj)
++			mutex_lock(&rdev->ring[i].mutex);
++	}
+ 
+ 	/* gui idle int has issues on older chips it seems */
+ 	if (rdev->family >= CHIP_R600) {
+@@ -268,12 +271,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
+ 			radeon_irq_set(rdev);
+ 		}
+ 	} else {
+-		if (rdev->cp.ready) {
++		struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++		if (ring->ready) {
+ 			struct radeon_fence *fence;
+-			radeon_ring_alloc(rdev, 64);
+-			radeon_fence_create(rdev, &fence);
++			radeon_ring_alloc(rdev, ring, 64);
++			radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
+ 			radeon_fence_emit(rdev, fence);
+-			radeon_ring_commit(rdev);
++			radeon_ring_commit(rdev, ring);
+ 			radeon_fence_wait(fence, false);
+ 			radeon_fence_unref(&fence);
+ 		}
+@@ -307,7 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
+ 
+ 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
+ 
+-	mutex_unlock(&rdev->cp.mutex);
++	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
++		if (rdev->ring[i].ring_obj)
++			mutex_unlock(&rdev->ring[i].mutex);
++	}
+ 	mutex_unlock(&rdev->vram_mutex);
+ 	mutex_unlock(&rdev->ddev->struct_mutex);
+ }
+@@ -467,6 +474,9 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
+ 	case THERMAL_TYPE_SUMO:
+ 		temp = sumo_get_temp(rdev);
+ 		break;
++	case THERMAL_TYPE_SI:
++		temp = si_get_temp(rdev);
++		break;
+ 	default:
+ 		temp = 0;
+ 		break;
+@@ -507,6 +517,10 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
+ 	case THERMAL_TYPE_EVERGREEN:
+ 	case THERMAL_TYPE_NI:
+ 	case THERMAL_TYPE_SUMO:
++	case THERMAL_TYPE_SI:
++		/* No support for TN yet */
++		if (rdev->family == CHIP_ARUBA)
++			return err;
+ 		rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
+ 		if (IS_ERR(rdev->pm.int_hwmon_dev)) {
+ 			err = PTR_ERR(rdev->pm.int_hwmon_dev);
+@@ -799,19 +813,14 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
+ 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+ 	mutex_lock(&rdev->pm.mutex);
+ 	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
+-		unsigned long irq_flags;
+ 		int not_processed = 0;
++		int i;
+ 
+-		read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+-		if (!list_empty(&rdev->fence_drv.emited)) {
+-			struct list_head *ptr;
+-			list_for_each(ptr, &rdev->fence_drv.emited) {
+-				/* count up to 3, that's enought info */
+-				if (++not_processed >= 3)
+-					break;
+-			}
++		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
++			not_processed += radeon_fence_count_emitted(rdev, i);
++			if (not_processed >= 3)
++				break;
+ 		}
+-		read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ 
+ 		if (not_processed >= 3) { /* should upclock */
+ 			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
+@@ -865,11 +874,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+ 	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
+ 	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ 	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
+-	if (rdev->asic->get_memory_clock)
++	if (rdev->asic->pm.get_memory_clock)
+ 		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ 	if (rdev->pm.current_vddc)
+ 		seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
+-	if (rdev->asic->get_pcie_lanes)
++	if (rdev->asic->pm.get_pcie_lanes)
+ 		seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
+index b4ce864..5d8f735 100644
+--- a/drivers/gpu/drm/radeon/radeon_reg.h
++++ b/drivers/gpu/drm/radeon/radeon_reg.h
+@@ -56,6 +56,7 @@
+ #include "r600_reg.h"
+ #include "evergreen_reg.h"
+ #include "ni_reg.h"
++#include "si_reg.h"
+ 
+ #define RADEON_MC_AGP_LOCATION		0x014c
+ #define		RADEON_MC_AGP_START_MASK	0x0000FFFF
+@@ -539,9 +540,11 @@
+ 
+ #define RADEON_CRTC2_PITCH                  0x032c
+ #define RADEON_CRTC_STATUS                  0x005c
++#       define RADEON_CRTC_VBLANK_CUR       (1 <<  0)
+ #       define RADEON_CRTC_VBLANK_SAVE      (1 <<  1)
+ #       define RADEON_CRTC_VBLANK_SAVE_CLEAR  (1 <<  1)
+ #define RADEON_CRTC2_STATUS                  0x03fc
++#       define RADEON_CRTC2_VBLANK_CUR       (1 <<  0)
+ #       define RADEON_CRTC2_VBLANK_SAVE      (1 <<  1)
+ #       define RADEON_CRTC2_VBLANK_SAVE_CLEAR  (1 <<  1)
+ #define RADEON_CRTC_V_SYNC_STRT_WID         0x020c
+diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
+index 49d5820..cc33b3d 100644
+--- a/drivers/gpu/drm/radeon/radeon_ring.c
++++ b/drivers/gpu/drm/radeon/radeon_ring.c
+@@ -34,6 +34,7 @@
+ #include "atom.h"
+ 
+ int radeon_debugfs_ib_init(struct radeon_device *rdev);
++int radeon_debugfs_ring_init(struct radeon_device *rdev);
+ 
+ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+ {
+@@ -60,105 +61,107 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+ 	return idx_value;
+ }
+ 
+-void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
++void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+ {
+ #if DRM_DEBUG_CODE
+-	if (rdev->cp.count_dw <= 0) {
++	if (ring->count_dw <= 0) {
+ 		DRM_ERROR("radeon: writting more dword to ring than expected !\n");
+ 	}
+ #endif
+-	rdev->cp.ring[rdev->cp.wptr++] = v;
+-	rdev->cp.wptr &= rdev->cp.ptr_mask;
+-	rdev->cp.count_dw--;
+-	rdev->cp.ring_free_dw--;
++	ring->ring[ring->wptr++] = v;
++	ring->wptr &= ring->ptr_mask;
++	ring->count_dw--;
++	ring->ring_free_dw--;
+ }
+ 
+-void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
++/*
++ * IB.
++ */
++bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
+ {
+-	struct radeon_ib *ib, *n;
+-
+-	list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
+-		list_del(&ib->list);
+-		vfree(ib->ptr);
+-		kfree(ib);
++	bool done = false;
++
++	/* only free ib which have been emited */
++	if (ib->fence && ib->fence->emitted) {
++		if (radeon_fence_signaled(ib->fence)) {
++			radeon_fence_unref(&ib->fence);
++			radeon_sa_bo_free(rdev, &ib->sa_bo);
++			done = true;
++		}
+ 	}
++	return done;
+ }
+ 
+-void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+-{
+-	struct radeon_ib *bib;
+-
+-	bib = kmalloc(sizeof(*bib), GFP_KERNEL);
+-	if (bib == NULL)
+-		return;
+-	bib->ptr = vmalloc(ib->length_dw * 4);
+-	if (bib->ptr == NULL) {
+-		kfree(bib);
+-		return;
+-	}
+-	memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
+-	bib->length_dw = ib->length_dw;
+-	mutex_lock(&rdev->ib_pool.mutex);
+-	list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
+-	mutex_unlock(&rdev->ib_pool.mutex);
+-}
+-
+-/*
+- * IB.
+- */
+-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
++int radeon_ib_get(struct radeon_device *rdev, int ring,
++		  struct radeon_ib **ib, unsigned size)
+ {
+ 	struct radeon_fence *fence;
+-	struct radeon_ib *nib;
+-	int r = 0, i, c;
++	unsigned cretry = 0;
++	int r = 0, i, idx;
+ 
+ 	*ib = NULL;
+-	r = radeon_fence_create(rdev, &fence);
++	/* align size on 256 bytes */
++	size = ALIGN(size, 256);
++
++	r = radeon_fence_create(rdev, &fence, ring);
+ 	if (r) {
+ 		dev_err(rdev->dev, "failed to create fence for new IB\n");
+ 		return r;
+ 	}
+-	mutex_lock(&rdev->ib_pool.mutex);
+-	for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
+-		i &= (RADEON_IB_POOL_SIZE - 1);
+-		if (rdev->ib_pool.ibs[i].free) {
+-			nib = &rdev->ib_pool.ibs[i];
+-			break;
+-		}
+-	}
+-	if (nib == NULL) {
+-		/* This should never happen, it means we allocated all
+-		 * IB and haven't scheduled one yet, return EBUSY to
+-		 * userspace hoping that on ioctl recall we get better
+-		 * luck
+-		 */
+-		dev_err(rdev->dev, "no free indirect buffer !\n");
+-		mutex_unlock(&rdev->ib_pool.mutex);
++
++	radeon_mutex_lock(&rdev->ib_pool.mutex);
++	idx = rdev->ib_pool.head_id;
++retry:
++	if (cretry > 5) {
++		dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
++		radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ 		radeon_fence_unref(&fence);
+-		return -EBUSY;
++		return -ENOMEM;
+ 	}
+-	rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+-	nib->free = false;
+-	if (nib->fence) {
+-		mutex_unlock(&rdev->ib_pool.mutex);
+-		r = radeon_fence_wait(nib->fence, false);
+-		if (r) {
+-			dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
+-				nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
+-			mutex_lock(&rdev->ib_pool.mutex);
+-			nib->free = true;
+-			mutex_unlock(&rdev->ib_pool.mutex);
+-			radeon_fence_unref(&fence);
+-			return r;
++	cretry++;
++	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
++		radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
++		if (rdev->ib_pool.ibs[idx].fence == NULL) {
++			r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
++					     &rdev->ib_pool.ibs[idx].sa_bo,
++					     size, 256);
++			if (!r) {
++				*ib = &rdev->ib_pool.ibs[idx];
++				(*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
++				(*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
++				(*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
++				(*ib)->gpu_addr += (*ib)->sa_bo.offset;
++				(*ib)->fence = fence;
++				(*ib)->vm_id = 0;
++				(*ib)->is_const_ib = false;
++				/* ib are most likely to be allocated in a ring fashion
++				 * thus rdev->ib_pool.head_id should be the id of the
++				 * oldest ib
++				 */
++				rdev->ib_pool.head_id = (1 + idx);
++				rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
++				radeon_mutex_unlock(&rdev->ib_pool.mutex);
++				return 0;
++			}
+ 		}
+-		mutex_lock(&rdev->ib_pool.mutex);
++		idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ 	}
+-	radeon_fence_unref(&nib->fence);
+-	nib->fence = fence;
+-	nib->length_dw = 0;
+-	mutex_unlock(&rdev->ib_pool.mutex);
+-	*ib = nib;
+-	return 0;
++	/* this should be rare event, ie all ib scheduled none signaled yet.
++	 */
++	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
++		if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
++			r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
++			if (!r) {
++				goto retry;
++			}
++			/* an error happened */
++			break;
++		}
++		idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
++	}
++	radeon_mutex_unlock(&rdev->ib_pool.mutex);
++	radeon_fence_unref(&fence);
++	return r;
+ }
+ 
+ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+@@ -169,247 +172,258 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+ 	if (tmp == NULL) {
+ 		return;
+ 	}
+-	if (!tmp->fence->emited)
++	radeon_mutex_lock(&rdev->ib_pool.mutex);
++	if (tmp->fence && !tmp->fence->emitted) {
++		radeon_sa_bo_free(rdev, &tmp->sa_bo);
+ 		radeon_fence_unref(&tmp->fence);
+-	mutex_lock(&rdev->ib_pool.mutex);
+-	tmp->free = true;
+-	mutex_unlock(&rdev->ib_pool.mutex);
++	}
++	radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ }
+ 
+ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
+ {
++	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ 	int r = 0;
+ 
+-	if (!ib->length_dw || !rdev->cp.ready) {
++	if (!ib->length_dw || !ring->ready) {
+ 		/* TODO: Nothings in the ib we should report. */
+ 		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* 64 dwords should be enough for fence too */
+-	r = radeon_ring_lock(rdev, 64);
++	r = radeon_ring_lock(rdev, ring, 64);
+ 	if (r) {
+ 		DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
+ 		return r;
+ 	}
+-	radeon_ring_ib_execute(rdev, ib);
++	radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
+ 	radeon_fence_emit(rdev, ib->fence);
+-	mutex_lock(&rdev->ib_pool.mutex);
+-	/* once scheduled IB is considered free and protected by the fence */
+-	ib->free = true;
+-	mutex_unlock(&rdev->ib_pool.mutex);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_unlock_commit(rdev, ring);
+ 	return 0;
+ }
+ 
+ int radeon_ib_pool_init(struct radeon_device *rdev)
+ {
+-	void *ptr;
+-	uint64_t gpu_addr;
+-	int i;
+-	int r = 0;
++	struct radeon_sa_manager tmp;
++	int i, r;
+ 
+-	if (rdev->ib_pool.robj)
+-		return 0;
+-	INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
+-	/* Allocate 1M object buffer */
+-	r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
+-			     PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
+-			     &rdev->ib_pool.robj);
++	r = radeon_sa_bo_manager_init(rdev, &tmp,
++				      RADEON_IB_POOL_SIZE*64*1024,
++				      RADEON_GEM_DOMAIN_GTT);
+ 	if (r) {
+-		DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
+ 		return r;
+ 	}
+-	r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+-	if (unlikely(r != 0))
+-		return r;
+-	r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+-	if (r) {
+-		radeon_bo_unreserve(rdev->ib_pool.robj);
+-		DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
+-		return r;
+-	}
+-	r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
+-	radeon_bo_unreserve(rdev->ib_pool.robj);
+-	if (r) {
+-		DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
+-		return r;
++
++	radeon_mutex_lock(&rdev->ib_pool.mutex);
++	if (rdev->ib_pool.ready) {
++		radeon_mutex_unlock(&rdev->ib_pool.mutex);
++		radeon_sa_bo_manager_fini(rdev, &tmp);
++		return 0;
+ 	}
+-	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+-		unsigned offset;
+ 
+-		offset = i * 64 * 1024;
+-		rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
+-		rdev->ib_pool.ibs[i].ptr = ptr + offset;
++	rdev->ib_pool.sa_manager = tmp;
++	INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
++	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
++		rdev->ib_pool.ibs[i].fence = NULL;
+ 		rdev->ib_pool.ibs[i].idx = i;
+ 		rdev->ib_pool.ibs[i].length_dw = 0;
+-		rdev->ib_pool.ibs[i].free = true;
++		INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
+ 	}
+ 	rdev->ib_pool.head_id = 0;
+ 	rdev->ib_pool.ready = true;
+ 	DRM_INFO("radeon: ib pool ready.\n");
++
+ 	if (radeon_debugfs_ib_init(rdev)) {
+ 		DRM_ERROR("Failed to register debugfs file for IB !\n");
+ 	}
+-	return r;
++	if (radeon_debugfs_ring_init(rdev)) {
++		DRM_ERROR("Failed to register debugfs file for rings !\n");
++	}
++	radeon_mutex_unlock(&rdev->ib_pool.mutex);
++	return 0;
+ }
+ 
+ void radeon_ib_pool_fini(struct radeon_device *rdev)
+ {
+-	int r;
+-	struct radeon_bo *robj;
++	unsigned i;
+ 
+-	if (!rdev->ib_pool.ready) {
+-		return;
+-	}
+-	mutex_lock(&rdev->ib_pool.mutex);
+-	radeon_ib_bogus_cleanup(rdev);
+-	robj = rdev->ib_pool.robj;
+-	rdev->ib_pool.robj = NULL;
+-	mutex_unlock(&rdev->ib_pool.mutex);
+-
+-	if (robj) {
+-		r = radeon_bo_reserve(robj, false);
+-		if (likely(r == 0)) {
+-			radeon_bo_kunmap(robj);
+-			radeon_bo_unpin(robj);
+-			radeon_bo_unreserve(robj);
++	radeon_mutex_lock(&rdev->ib_pool.mutex);
++	if (rdev->ib_pool.ready) {
++		for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
++			radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
++			radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
+ 		}
+-		radeon_bo_unref(&robj);
++		radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
++		rdev->ib_pool.ready = false;
+ 	}
++	radeon_mutex_unlock(&rdev->ib_pool.mutex);
++}
++
++int radeon_ib_pool_start(struct radeon_device *rdev)
++{
++	return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
+ }
+ 
++int radeon_ib_pool_suspend(struct radeon_device *rdev)
++{
++	return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
++}
+ 
+ /*
+  * Ring.
+  */
+-void radeon_ring_free_size(struct radeon_device *rdev)
++int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+-	if (rdev->wb.enabled)
+-		rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
+-	else {
+-		if (rdev->family >= CHIP_R600)
+-			rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
+-		else
+-			rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
++	/* r1xx-r5xx only has CP ring */
++	if (rdev->family < CHIP_R600)
++		return RADEON_RING_TYPE_GFX_INDEX;
++
++	if (rdev->family >= CHIP_CAYMAN) {
++		if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
++			return CAYMAN_RING_TYPE_CP1_INDEX;
++		else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
++			return CAYMAN_RING_TYPE_CP2_INDEX;
+ 	}
++	return RADEON_RING_TYPE_GFX_INDEX;
++}
++
++void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
++{
++	u32 rptr;
++
++	if (rdev->wb.enabled)
++		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
++	else
++		rptr = RREG32(ring->rptr_reg);
++	ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+ 	/* This works because ring_size is a power of 2 */
+-	rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
+-	rdev->cp.ring_free_dw -= rdev->cp.wptr;
+-	rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
+-	if (!rdev->cp.ring_free_dw) {
+-		rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
++	ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
++	ring->ring_free_dw -= ring->wptr;
++	ring->ring_free_dw &= ring->ptr_mask;
++	if (!ring->ring_free_dw) {
++		ring->ring_free_dw = ring->ring_size / 4;
+ 	}
+ }
+ 
+-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
++
++int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+ {
+ 	int r;
+ 
+ 	/* Align requested size with padding so unlock_commit can
+ 	 * pad safely */
+-	ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
+-	while (ndw > (rdev->cp.ring_free_dw - 1)) {
+-		radeon_ring_free_size(rdev);
+-		if (ndw < rdev->cp.ring_free_dw) {
++	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
++	while (ndw > (ring->ring_free_dw - 1)) {
++		radeon_ring_free_size(rdev, ring);
++		if (ndw < ring->ring_free_dw) {
+ 			break;
+ 		}
+-		r = radeon_fence_wait_next(rdev);
++		r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
+ 		if (r)
+ 			return r;
+ 	}
+-	rdev->cp.count_dw = ndw;
+-	rdev->cp.wptr_old = rdev->cp.wptr;
++	ring->count_dw = ndw;
++	ring->wptr_old = ring->wptr;
+ 	return 0;
+ }
+ 
+-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
++int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
+ {
+ 	int r;
+ 
+-	mutex_lock(&rdev->cp.mutex);
+-	r = radeon_ring_alloc(rdev, ndw);
++	mutex_lock(&ring->mutex);
++	r = radeon_ring_alloc(rdev, ring, ndw);
+ 	if (r) {
+-		mutex_unlock(&rdev->cp.mutex);
++		mutex_unlock(&ring->mutex);
+ 		return r;
+ 	}
+ 	return 0;
+ }
+ 
+-void radeon_ring_commit(struct radeon_device *rdev)
++void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	unsigned count_dw_pad;
+ 	unsigned i;
+ 
+ 	/* We pad to match fetch size */
+-	count_dw_pad = (rdev->cp.align_mask + 1) -
+-		       (rdev->cp.wptr & rdev->cp.align_mask);
++	count_dw_pad = (ring->align_mask + 1) -
++		       (ring->wptr & ring->align_mask);
+ 	for (i = 0; i < count_dw_pad; i++) {
+-		radeon_ring_write(rdev, 2 << 30);
++		radeon_ring_write(ring, ring->nop);
+ 	}
+ 	DRM_MEMORYBARRIER();
+-	radeon_cp_commit(rdev);
++	WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
++	(void)RREG32(ring->wptr_reg);
+ }
+ 
+-void radeon_ring_unlock_commit(struct radeon_device *rdev)
++void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+-	radeon_ring_commit(rdev);
+-	mutex_unlock(&rdev->cp.mutex);
++	radeon_ring_commit(rdev, ring);
++	mutex_unlock(&ring->mutex);
+ }
+ 
+-void radeon_ring_unlock_undo(struct radeon_device *rdev)
++void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+-	rdev->cp.wptr = rdev->cp.wptr_old;
+-	mutex_unlock(&rdev->cp.mutex);
++	ring->wptr = ring->wptr_old;
++	mutex_unlock(&ring->mutex);
+ }
+ 
+-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
++int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
++		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
++		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
+ {
+ 	int r;
+ 
+-	rdev->cp.ring_size = ring_size;
++	ring->ring_size = ring_size;
++	ring->rptr_offs = rptr_offs;
++	ring->rptr_reg = rptr_reg;
++	ring->wptr_reg = wptr_reg;
++	ring->ptr_reg_shift = ptr_reg_shift;
++	ring->ptr_reg_mask = ptr_reg_mask;
++	ring->nop = nop;
+ 	/* Allocate ring buffer */
+-	if (rdev->cp.ring_obj == NULL) {
+-		r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
++	if (ring->ring_obj == NULL) {
++		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
+ 					RADEON_GEM_DOMAIN_GTT,
+-					&rdev->cp.ring_obj);
++					&ring->ring_obj);
+ 		if (r) {
+ 			dev_err(rdev->dev, "(%d) ring create failed\n", r);
+ 			return r;
+ 		}
+-		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
++		r = radeon_bo_reserve(ring->ring_obj, false);
+ 		if (unlikely(r != 0))
+ 			return r;
+-		r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
+-					&rdev->cp.gpu_addr);
++		r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
++					&ring->gpu_addr);
+ 		if (r) {
+-			radeon_bo_unreserve(rdev->cp.ring_obj);
++			radeon_bo_unreserve(ring->ring_obj);
+ 			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
+ 			return r;
+ 		}
+-		r = radeon_bo_kmap(rdev->cp.ring_obj,
+-				       (void **)&rdev->cp.ring);
+-		radeon_bo_unreserve(rdev->cp.ring_obj);
++		r = radeon_bo_kmap(ring->ring_obj,
++				       (void **)&ring->ring);
++		radeon_bo_unreserve(ring->ring_obj);
+ 		if (r) {
+ 			dev_err(rdev->dev, "(%d) ring map failed\n", r);
+ 			return r;
+ 		}
+ 	}
+-	rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
+-	rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
++	ring->ptr_mask = (ring->ring_size / 4) - 1;
++	ring->ring_free_dw = ring->ring_size / 4;
+ 	return 0;
+ }
+ 
+-void radeon_ring_fini(struct radeon_device *rdev)
++void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	int r;
+ 	struct radeon_bo *ring_obj;
+ 
+-	mutex_lock(&rdev->cp.mutex);
+-	ring_obj = rdev->cp.ring_obj;
+-	rdev->cp.ring = NULL;
+-	rdev->cp.ring_obj = NULL;
+-	mutex_unlock(&rdev->cp.mutex);
++	mutex_lock(&ring->mutex);
++	ring_obj = ring->ring_obj;
++	ring->ring = NULL;
++	ring->ring_obj = NULL;
++	mutex_unlock(&ring->mutex);
+ 
+ 	if (ring_obj) {
+ 		r = radeon_bo_reserve(ring_obj, false);
+@@ -422,78 +436,96 @@ void radeon_ring_fini(struct radeon_device *rdev)
+ 	}
+ }
+ 
+-
+ /*
+  * Debugfs info
+  */
+ #if defined(CONFIG_DEBUG_FS)
+-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
++
++static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
+ {
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+-	struct radeon_ib *ib = node->info_ent->data;
+-	unsigned i;
+-
+-	if (ib == NULL) {
+-		return 0;
+-	}
+-	seq_printf(m, "IB %04u\n", ib->idx);
+-	seq_printf(m, "IB fence %p\n", ib->fence);
+-	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+-	for (i = 0; i < ib->length_dw; i++) {
+-		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
++	struct drm_device *dev = node->minor->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	int ridx = *(int*)node->info_ent->data;
++	struct radeon_ring *ring = &rdev->ring[ridx];
++	unsigned count, i, j;
++
++	radeon_ring_free_size(rdev, ring);
++	count = (ring->ring_size / 4) - ring->ring_free_dw;
++	seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
++	seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
++	seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
++	seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
++	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
++	seq_printf(m, "%u dwords in ring\n", count);
++	i = ring->rptr;
++	for (j = 0; j <= count; j++) {
++		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
++		i = (i + 1) & ring->ptr_mask;
+ 	}
+ 	return 0;
+ }
+ 
+-static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
++static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
++static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
++static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
++
++static struct drm_info_list radeon_debugfs_ring_info_list[] = {
++	{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
++	{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
++	{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
++};
++
++static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+ {
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+-	struct radeon_device *rdev = node->info_ent->data;
+-	struct radeon_ib *ib;
++	struct drm_device *dev = node->minor->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_ib *ib = &rdev->ib_pool.ibs[*((unsigned*)node->info_ent->data)];
+ 	unsigned i;
+ 
+-	mutex_lock(&rdev->ib_pool.mutex);
+-	if (list_empty(&rdev->ib_pool.bogus_ib)) {
+-		mutex_unlock(&rdev->ib_pool.mutex);
+-		seq_printf(m, "no bogus IB recorded\n");
++	if (ib == NULL) {
+ 		return 0;
+ 	}
+-	ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
+-	list_del_init(&ib->list);
+-	mutex_unlock(&rdev->ib_pool.mutex);
++	seq_printf(m, "IB %04u\n", ib->idx);
++	seq_printf(m, "IB fence %p\n", ib->fence);
+ 	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+ 	for (i = 0; i < ib->length_dw; i++) {
+ 		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+ 	}
+-	vfree(ib->ptr);
+-	kfree(ib);
+ 	return 0;
+ }
+ 
+ static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
+ static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
++static unsigned radeon_debugfs_ib_idx[RADEON_IB_POOL_SIZE];
++#endif
+ 
+-static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
+-	{"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
+-};
++int radeon_debugfs_ring_init(struct radeon_device *rdev)
++{
++#if defined(CONFIG_DEBUG_FS)
++	if (rdev->family >= CHIP_CAYMAN)
++		return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
++						ARRAY_SIZE(radeon_debugfs_ring_info_list));
++	else
++		return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1);
++#else
++	return 0;
+ #endif
++}
+ 
+ int radeon_debugfs_ib_init(struct radeon_device *rdev)
+ {
+ #if defined(CONFIG_DEBUG_FS)
+ 	unsigned i;
+-	int r;
+ 
+-	radeon_debugfs_ib_bogus_info_list[0].data = rdev;
+-	r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
+-	if (r)
+-		return r;
+ 	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ 		sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
++		radeon_debugfs_ib_idx[i] = i;
+ 		radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
+ 		radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
+ 		radeon_debugfs_ib_list[i].driver_features = 0;
+-		radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
++		radeon_debugfs_ib_list[i].data = &radeon_debugfs_ib_idx[i];
+ 	}
+ 	return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
+ 					RADEON_IB_POOL_SIZE);
+diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
+new file mode 100644
+index 0000000..4cce47e
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/radeon_sa.c
+@@ -0,0 +1,189 @@
++/*
++ * Copyright 2011 Red Hat Inc.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ *    Jerome Glisse <glisse at freedesktop.org>
++ */
++#include "drmP.h"
++#include "drm.h"
++#include "radeon.h"
++
++int radeon_sa_bo_manager_init(struct radeon_device *rdev,
++			      struct radeon_sa_manager *sa_manager,
++			      unsigned size, u32 domain)
++{
++	int r;
++
++	sa_manager->bo = NULL;
++	sa_manager->size = size;
++	sa_manager->domain = domain;
++	INIT_LIST_HEAD(&sa_manager->sa_bo);
++
++	r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
++			     RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
++	if (r) {
++		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
++		return r;
++	}
++
++	return r;
++}
++
++void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
++			       struct radeon_sa_manager *sa_manager)
++{
++	struct radeon_sa_bo *sa_bo, *tmp;
++
++	if (!list_empty(&sa_manager->sa_bo)) {
++		dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
++	}
++	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
++		list_del_init(&sa_bo->list);
++	}
++	radeon_bo_unref(&sa_manager->bo);
++	sa_manager->size = 0;
++}
++
++int radeon_sa_bo_manager_start(struct radeon_device *rdev,
++			       struct radeon_sa_manager *sa_manager)
++{
++	int r;
++
++	if (sa_manager->bo == NULL) {
++		dev_err(rdev->dev, "no bo for sa manager\n");
++		return -EINVAL;
++	}
++
++	/* map the buffer */
++	r = radeon_bo_reserve(sa_manager->bo, false);
++	if (r) {
++		dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
++		return r;
++	}
++	r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
++	if (r) {
++		radeon_bo_unreserve(sa_manager->bo);
++		dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
++		return r;
++	}
++	r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
++	radeon_bo_unreserve(sa_manager->bo);
++	return r;
++}
++
++int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
++				 struct radeon_sa_manager *sa_manager)
++{
++	int r;
++
++	if (sa_manager->bo == NULL) {
++		dev_err(rdev->dev, "no bo for sa manager\n");
++		return -EINVAL;
++	}
++
++	r = radeon_bo_reserve(sa_manager->bo, false);
++	if (!r) {
++		radeon_bo_kunmap(sa_manager->bo);
++		radeon_bo_unpin(sa_manager->bo);
++		radeon_bo_unreserve(sa_manager->bo);
++	}
++	return r;
++}
++
++/*
++ * Principe is simple, we keep a list of sub allocation in offset
++ * order (first entry has offset == 0, last entry has the highest
++ * offset).
++ *
++ * When allocating new object we first check if there is room at
++ * the end total_size - (last_object_offset + last_object_size) >=
++ * alloc_size. If so we allocate new object there.
++ *
++ * When there is not enough room at the end, we start waiting for
++ * each sub object until we reach object_offset+object_size >=
++ * alloc_size, this object then become the sub object we return.
++ *
++ * Alignment can't be bigger than page size
++ */
++int radeon_sa_bo_new(struct radeon_device *rdev,
++		     struct radeon_sa_manager *sa_manager,
++		     struct radeon_sa_bo *sa_bo,
++		     unsigned size, unsigned align)
++{
++	struct radeon_sa_bo *tmp;
++	struct list_head *head;
++	unsigned offset = 0, wasted = 0;
++
++	BUG_ON(align > RADEON_GPU_PAGE_SIZE);
++	BUG_ON(size > sa_manager->size);
++
++	/* no one ? */
++	head = sa_manager->sa_bo.prev;
++	if (list_empty(&sa_manager->sa_bo)) {
++		goto out;
++	}
++
++	/* look for a hole big enough */
++	offset = 0;
++	list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
++		/* room before this object ? */
++		if ((tmp->offset - offset) >= size) {
++			head = tmp->list.prev;
++			goto out;
++		}
++		offset = tmp->offset + tmp->size;
++		wasted = offset % align;
++		if (wasted) {
++			wasted = align - wasted;
++		}
++		offset += wasted;
++	}
++	/* room at the end ? */
++	head = sa_manager->sa_bo.prev;
++	tmp = list_entry(head, struct radeon_sa_bo, list);
++	offset = tmp->offset + tmp->size;
++	wasted = offset % align;
++	if (wasted) {
++		wasted = align - wasted;
++	}
++	offset += wasted;
++	if ((sa_manager->size - offset) < size) {
++		/* failed to find somethings big enough */
++		return -ENOMEM;
++	}
++
++out:
++	sa_bo->manager = sa_manager;
++	sa_bo->offset = offset;
++	sa_bo->size = size;
++	list_add(&sa_bo->list, head);
++	return 0;
++}
++
++void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
++{
++	list_del_init(&sa_bo->list);
++}
+diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
+new file mode 100644
+index 0000000..61dd4e3
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
+@@ -0,0 +1,178 @@
++/*
++ * Copyright 2011 Christian König.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++/*
++ * Authors:
++ *    Christian König <deathsimple at vodafone.de>
++ */
++#include "drmP.h"
++#include "drm.h"
++#include "radeon.h"
++
++static int radeon_semaphore_add_bo(struct radeon_device *rdev)
++{
++	struct radeon_semaphore_bo *bo;
++	unsigned long irq_flags;
++	uint64_t gpu_addr;
++	uint32_t *cpu_ptr;
++	int r, i;
++
++
++	bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
++	if (bo == NULL) {
++		return -ENOMEM;
++	}
++	INIT_LIST_HEAD(&bo->free);
++	INIT_LIST_HEAD(&bo->list);
++	bo->nused = 0;
++
++	r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
++	if (r) {
++		dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
++		kfree(bo);
++		return r;
++	}
++	gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
++	gpu_addr += bo->ib->sa_bo.offset;
++	cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
++	cpu_ptr += (bo->ib->sa_bo.offset >> 2);
++	for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
++		bo->semaphores[i].gpu_addr = gpu_addr;
++		bo->semaphores[i].cpu_ptr = cpu_ptr;
++		bo->semaphores[i].bo = bo;
++		list_add_tail(&bo->semaphores[i].list, &bo->free);
++		gpu_addr += 8;
++		cpu_ptr += 2;
++	}
++	write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
++	list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
++	write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
++	return 0;
++}
++
++static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
++					   struct radeon_semaphore_bo *bo)
++{
++	radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
++	radeon_fence_unref(&bo->ib->fence);
++	list_del(&bo->list);
++	kfree(bo);
++}
++
++void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
++{
++	struct radeon_semaphore_bo *bo, *n;
++
++	if (list_empty(&rdev->semaphore_drv.bo)) {
++		return;
++	}
++	/* only shrink if first bo has free semaphore */
++	bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
++	if (list_empty(&bo->free)) {
++		return;
++	}
++	list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
++		if (bo->nused)
++			continue;
++		radeon_semaphore_del_bo_locked(rdev, bo);
++	}
++}
++
++int radeon_semaphore_create(struct radeon_device *rdev,
++			    struct radeon_semaphore **semaphore)
++{
++	struct radeon_semaphore_bo *bo;
++	unsigned long irq_flags;
++	bool do_retry = true;
++	int r;
++
++retry:
++	*semaphore = NULL;
++	write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
++	list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
++		if (list_empty(&bo->free))
++			continue;
++		*semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
++		(*semaphore)->cpu_ptr[0] = 0;
++		(*semaphore)->cpu_ptr[1] = 0;
++		list_del(&(*semaphore)->list);
++		bo->nused++;
++		break;
++	}
++	write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
++
++	if (*semaphore == NULL) {
++		if (do_retry) {
++			do_retry = false;
++			r = radeon_semaphore_add_bo(rdev);
++			if (r)
++				return r;
++			goto retry;
++		}
++		return -ENOMEM;
++	}
++
++	return 0;
++}
++
++void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
++			          struct radeon_semaphore *semaphore)
++{
++	radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
++}
++
++void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
++			        struct radeon_semaphore *semaphore)
++{
++	radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
++}
++
++void radeon_semaphore_free(struct radeon_device *rdev,
++			   struct radeon_semaphore *semaphore)
++{
++	unsigned long irq_flags;
++
++	write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
++	semaphore->bo->nused--;
++	list_add_tail(&semaphore->list, &semaphore->bo->free);
++	radeon_semaphore_shrink_locked(rdev);
++	write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
++}
++
++void radeon_semaphore_driver_fini(struct radeon_device *rdev)
++{
++	struct radeon_semaphore_bo *bo, *n;
++	unsigned long irq_flags;
++
++	write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
++	/* we force to free everything */
++	list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
++		if (!list_empty(&bo->free)) {
++			dev_err(rdev->dev, "still in use semaphore\n");
++		}
++		radeon_semaphore_del_bo_locked(rdev, bo);
++	}
++	write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
++}
+diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
+index 602fa35..dc5dcf4 100644
+--- a/drivers/gpu/drm/radeon/radeon_test.c
++++ b/drivers/gpu/drm/radeon/radeon_test.c
+@@ -42,7 +42,9 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 	/* Number of tests =
+ 	 * (Total GTT - IB pool - writeback page - ring buffers) / test size
+ 	 */
+-	n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
++	n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
++	for (i = 0; i < RADEON_NUM_RINGS; ++i)
++		n -= rdev->ring[i].ring_size;
+ 	if (rdev->wb.wb_obj)
+ 		n -= RADEON_GPU_PAGE_SIZE;
+ 	if (rdev->ih.ring_obj)
+@@ -104,7 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 
+ 		radeon_bo_kunmap(gtt_obj[i]);
+ 
+-		r = radeon_fence_create(rdev, &fence);
++		r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+ 		if (r) {
+ 			DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
+ 			goto out_cleanup;
+@@ -153,7 +155,7 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 
+ 		radeon_bo_kunmap(vram_obj);
+ 
+-		r = radeon_fence_create(rdev, &fence);
++		r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
+ 		if (r) {
+ 			DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
+ 			goto out_cleanup;
+@@ -232,3 +234,264 @@ out_cleanup:
+ 		printk(KERN_WARNING "Error while testing BO move.\n");
+ 	}
+ }
++
++void radeon_test_ring_sync(struct radeon_device *rdev,
++			   struct radeon_ring *ringA,
++			   struct radeon_ring *ringB)
++{
++	struct radeon_fence *fence1 = NULL, *fence2 = NULL;
++	struct radeon_semaphore *semaphore = NULL;
++	int ridxA = radeon_ring_index(rdev, ringA);
++	int ridxB = radeon_ring_index(rdev, ringB);
++	int r;
++
++	r = radeon_fence_create(rdev, &fence1, ridxA);
++	if (r) {
++		DRM_ERROR("Failed to create sync fence 1\n");
++		goto out_cleanup;
++	}
++	r = radeon_fence_create(rdev, &fence2, ridxA);
++	if (r) {
++		DRM_ERROR("Failed to create sync fence 2\n");
++		goto out_cleanup;
++	}
++
++	r = radeon_semaphore_create(rdev, &semaphore);
++	if (r) {
++		DRM_ERROR("Failed to create semaphore\n");
++		goto out_cleanup;
++	}
++
++	r = radeon_ring_lock(rdev, ringA, 64);
++	if (r) {
++		DRM_ERROR("Failed to lock ring A %d\n", ridxA);
++		goto out_cleanup;
++	}
++	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
++	radeon_fence_emit(rdev, fence1);
++	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
++	radeon_fence_emit(rdev, fence2);
++	radeon_ring_unlock_commit(rdev, ringA);
++
++	mdelay(1000);
++
++	if (radeon_fence_signaled(fence1)) {
++		DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
++		goto out_cleanup;
++	}
++
++	r = radeon_ring_lock(rdev, ringB, 64);
++	if (r) {
++		DRM_ERROR("Failed to lock ring B %p\n", ringB);
++		goto out_cleanup;
++	}
++	radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
++	radeon_ring_unlock_commit(rdev, ringB);
++
++	r = radeon_fence_wait(fence1, false);
++	if (r) {
++		DRM_ERROR("Failed to wait for sync fence 1\n");
++		goto out_cleanup;
++	}
++
++	mdelay(1000);
++
++	if (radeon_fence_signaled(fence2)) {
++		DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
++		goto out_cleanup;
++	}
++
++	r = radeon_ring_lock(rdev, ringB, 64);
++	if (r) {
++		DRM_ERROR("Failed to lock ring B %p\n", ringB);
++		goto out_cleanup;
++	}
++	radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
++	radeon_ring_unlock_commit(rdev, ringB);
++
++	r = radeon_fence_wait(fence2, false);
++	if (r) {
++		DRM_ERROR("Failed to wait for sync fence 1\n");
++		goto out_cleanup;
++	}
++
++out_cleanup:
++	if (semaphore)
++		radeon_semaphore_free(rdev, semaphore);
++
++	if (fence1)
++		radeon_fence_unref(&fence1);
++
++	if (fence2)
++		radeon_fence_unref(&fence2);
++
++	if (r)
++		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
++}
++
++void radeon_test_ring_sync2(struct radeon_device *rdev,
++			    struct radeon_ring *ringA,
++			    struct radeon_ring *ringB,
++			    struct radeon_ring *ringC)
++{
++	struct radeon_fence *fenceA = NULL, *fenceB = NULL;
++	struct radeon_semaphore *semaphore = NULL;
++	int ridxA = radeon_ring_index(rdev, ringA);
++	int ridxB = radeon_ring_index(rdev, ringB);
++	int ridxC = radeon_ring_index(rdev, ringC);
++	bool sigA, sigB;
++	int i, r;
++
++	r = radeon_fence_create(rdev, &fenceA, ridxA);
++	if (r) {
++		DRM_ERROR("Failed to create sync fence 1\n");
++		goto out_cleanup;
++	}
++	r = radeon_fence_create(rdev, &fenceB, ridxB);
++	if (r) {
++		DRM_ERROR("Failed to create sync fence 2\n");
++		goto out_cleanup;
++	}
++
++	r = radeon_semaphore_create(rdev, &semaphore);
++	if (r) {
++		DRM_ERROR("Failed to create semaphore\n");
++		goto out_cleanup;
++	}
++
++	r = radeon_ring_lock(rdev, ringA, 64);
++	if (r) {
++		DRM_ERROR("Failed to lock ring A %d\n", ridxA);
++		goto out_cleanup;
++	}
++	radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
++	radeon_fence_emit(rdev, fenceA);
++	radeon_ring_unlock_commit(rdev, ringA);
++
++	r = radeon_ring_lock(rdev, ringB, 64);
++	if (r) {
++		DRM_ERROR("Failed to lock ring B %d\n", ridxB);
++		goto out_cleanup;
++	}
++	radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
++	radeon_fence_emit(rdev, fenceB);
++	radeon_ring_unlock_commit(rdev, ringB);
++
++	mdelay(1000);
++
++	if (radeon_fence_signaled(fenceA)) {
++		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
++		goto out_cleanup;
++	}
++	if (radeon_fence_signaled(fenceB)) {
++		DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
++		goto out_cleanup;
++	}
++
++	r = radeon_ring_lock(rdev, ringC, 64);
++	if (r) {
++		DRM_ERROR("Failed to lock ring B %p\n", ringC);
++		goto out_cleanup;
++	}
++	radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
++	radeon_ring_unlock_commit(rdev, ringC);
++
++	for (i = 0; i < 30; ++i) {
++		mdelay(100);
++		sigA = radeon_fence_signaled(fenceA);
++		sigB = radeon_fence_signaled(fenceB);
++		if (sigA || sigB)
++			break;
++	}
++
++	if (!sigA && !sigB) {
++		DRM_ERROR("Neither fence A nor B has been signaled\n");
++		goto out_cleanup;
++	} else if (sigA && sigB) {
++		DRM_ERROR("Both fence A and B has been signaled\n");
++		goto out_cleanup;
++	}
++
++	DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
++
++	r = radeon_ring_lock(rdev, ringC, 64);
++	if (r) {
++		DRM_ERROR("Failed to lock ring B %p\n", ringC);
++		goto out_cleanup;
++	}
++	radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
++	radeon_ring_unlock_commit(rdev, ringC);
++
++	mdelay(1000);
++
++	r = radeon_fence_wait(fenceA, false);
++	if (r) {
++		DRM_ERROR("Failed to wait for sync fence A\n");
++		goto out_cleanup;
++	}
++	r = radeon_fence_wait(fenceB, false);
++	if (r) {
++		DRM_ERROR("Failed to wait for sync fence B\n");
++		goto out_cleanup;
++	}
++
++out_cleanup:
++	if (semaphore)
++		radeon_semaphore_free(rdev, semaphore);
++
++	if (fenceA)
++		radeon_fence_unref(&fenceA);
++
++	if (fenceB)
++		radeon_fence_unref(&fenceB);
++
++	if (r)
++		printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
++}
++
++void radeon_test_syncing(struct radeon_device *rdev)
++{
++	int i, j, k;
++
++	for (i = 1; i < RADEON_NUM_RINGS; ++i) {
++		struct radeon_ring *ringA = &rdev->ring[i];
++		if (!ringA->ready)
++			continue;
++
++		for (j = 0; j < i; ++j) {
++			struct radeon_ring *ringB = &rdev->ring[j];
++			if (!ringB->ready)
++				continue;
++
++			DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
++			radeon_test_ring_sync(rdev, ringA, ringB);
++
++			DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
++			radeon_test_ring_sync(rdev, ringB, ringA);
++
++			for (k = 0; k < j; ++k) {
++				struct radeon_ring *ringC = &rdev->ring[k];
++				if (!ringC->ready)
++					continue;
++
++				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
++				radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
++
++				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
++				radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
++
++				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
++				radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
++
++				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
++				radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
++
++				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
++				radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
++
++				DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
++				radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
++			}
++		}
++	}
++}
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 0b5468b..f493c64 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
+ 	}
+ }
+ 
+-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
+-
+-static struct ttm_backend*
+-radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+-{
+-	struct radeon_device *rdev;
+-
+-	rdev = radeon_get_rdev(bdev);
+-#if __OS_HAS_AGP
+-	if (rdev->flags & RADEON_IS_AGP) {
+-		return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
+-	} else
+-#endif
+-	{
+-		return radeon_ttm_backend_create(rdev);
+-	}
+-}
+-
+ static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+ {
+ 	return 0;
+@@ -206,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
+ 	rbo = container_of(bo, struct radeon_bo, tbo);
+ 	switch (bo->mem.mem_type) {
+ 	case TTM_PL_VRAM:
+-		if (rbo->rdev->cp.ready == false)
++		if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+ 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+ 		else
+ 			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+@@ -241,10 +223,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
+ 	struct radeon_device *rdev;
+ 	uint64_t old_start, new_start;
+ 	struct radeon_fence *fence;
+-	int r;
++	int r, i;
+ 
+ 	rdev = radeon_get_rdev(bo->bdev);
+-	r = radeon_fence_create(rdev, &fence);
++	r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
+ 	if (unlikely(r)) {
+ 		return r;
+ 	}
+@@ -273,13 +255,43 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
+ 		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
+ 		return -EINVAL;
+ 	}
+-	if (!rdev->cp.ready) {
+-		DRM_ERROR("Trying to move memory with CP turned off.\n");
++	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
++		DRM_ERROR("Trying to move memory with ring turned off.\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+ 
++	/* sync other rings */
++	if (rdev->family >= CHIP_R600) {
++		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
++			/* no need to sync to our own or unused rings */
++			if (i == radeon_copy_ring_index(rdev) || !rdev->ring[i].ready)
++				continue;
++
++			if (!fence->semaphore) {
++				r = radeon_semaphore_create(rdev, &fence->semaphore);
++				/* FIXME: handle semaphore error */
++				if (r)
++					continue;
++			}
++
++			r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
++			/* FIXME: handle ring lock error */
++			if (r)
++				continue;
++			radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
++			radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
++
++			r = radeon_ring_lock(rdev, &rdev->ring[radeon_copy_ring_index(rdev)], 3);
++			/* FIXME: handle ring lock error */
++			if (r)
++				continue;
++			radeon_semaphore_emit_wait(rdev, radeon_copy_ring_index(rdev), fence->semaphore);
++			radeon_ring_unlock_commit(rdev, &rdev->ring[radeon_copy_ring_index(rdev)]);
++		}
++	}
++
+ 	r = radeon_copy(rdev, old_start, new_start,
+ 			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+ 			fence);
+@@ -398,7 +410,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
+ 		radeon_move_null(bo, new_mem);
+ 		return 0;
+ 	}
+-	if (!rdev->cp.ready || rdev->asic->copy == NULL) {
++	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
++	    rdev->asic->copy.copy == NULL) {
+ 		/* use memcpy */
+ 		goto memcpy;
+ 	}
+@@ -515,8 +528,166 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
+ 	return radeon_fence_signaled((struct radeon_fence *)sync_obj);
+ }
+ 
++/*
++ * TTM backend functions.
++ */
++struct radeon_ttm_tt {
++	struct ttm_dma_tt		ttm;
++	struct radeon_device		*rdev;
++	u64				offset;
++};
++
++static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
++				   struct ttm_mem_reg *bo_mem)
++{
++	struct radeon_ttm_tt *gtt = (void*)ttm;
++	int r;
++
++	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
++	if (!ttm->num_pages) {
++		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
++		     ttm->num_pages, bo_mem, ttm);
++	}
++	r = radeon_gart_bind(gtt->rdev, gtt->offset,
++			     ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
++	if (r) {
++		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
++			  ttm->num_pages, (unsigned)gtt->offset);
++		return r;
++	}
++	return 0;
++}
++
++static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
++{
++	struct radeon_ttm_tt *gtt = (void *)ttm;
++
++	radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
++	return 0;
++}
++
++static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
++{
++	struct radeon_ttm_tt *gtt = (void *)ttm;
++
++	ttm_dma_tt_fini(&gtt->ttm);
++	kfree(gtt);
++}
++
++static struct ttm_backend_func radeon_backend_func = {
++	.bind = &radeon_ttm_backend_bind,
++	.unbind = &radeon_ttm_backend_unbind,
++	.destroy = &radeon_ttm_backend_destroy,
++};
++
++struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
++				    unsigned long size, uint32_t page_flags,
++				    struct page *dummy_read_page)
++{
++	struct radeon_device *rdev;
++	struct radeon_ttm_tt *gtt;
++
++	rdev = radeon_get_rdev(bdev);
++#if __OS_HAS_AGP
++	if (rdev->flags & RADEON_IS_AGP) {
++		return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
++					 size, page_flags, dummy_read_page);
++	}
++#endif
++
++	gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
++	if (gtt == NULL) {
++		return NULL;
++	}
++	gtt->ttm.ttm.func = &radeon_backend_func;
++	gtt->rdev = rdev;
++	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
++		kfree(gtt);
++		return NULL;
++	}
++	return &gtt->ttm.ttm;
++}
++
++static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
++{
++	struct radeon_device *rdev;
++	struct radeon_ttm_tt *gtt = (void *)ttm;
++	unsigned i;
++	int r;
++
++	if (ttm->state != tt_unpopulated)
++		return 0;
++
++	rdev = radeon_get_rdev(ttm->bdev);
++#if __OS_HAS_AGP
++	if (rdev->flags & RADEON_IS_AGP) {
++		return ttm_agp_tt_populate(ttm);
++	}
++#endif
++
++#ifdef CONFIG_SWIOTLB
++	if (swiotlb_nr_tbl()) {
++		return ttm_dma_populate(&gtt->ttm, rdev->dev);
++	}
++#endif
++
++	r = ttm_pool_populate(ttm);
++	if (r) {
++		return r;
++	}
++
++	for (i = 0; i < ttm->num_pages; i++) {
++		gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
++						       0, PAGE_SIZE,
++						       PCI_DMA_BIDIRECTIONAL);
++		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
++			while (--i) {
++				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
++					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++				gtt->ttm.dma_address[i] = 0;
++			}
++			ttm_pool_unpopulate(ttm);
++			return -EFAULT;
++		}
++	}
++	return 0;
++}
++
++static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
++{
++	struct radeon_device *rdev;
++	struct radeon_ttm_tt *gtt = (void *)ttm;
++	unsigned i;
++
++	rdev = radeon_get_rdev(ttm->bdev);
++#if __OS_HAS_AGP
++	if (rdev->flags & RADEON_IS_AGP) {
++		ttm_agp_tt_unpopulate(ttm);
++		return;
++	}
++#endif
++
++#ifdef CONFIG_SWIOTLB
++	if (swiotlb_nr_tbl()) {
++		ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
++		return;
++	}
++#endif
++
++	for (i = 0; i < ttm->num_pages; i++) {
++		if (gtt->ttm.dma_address[i]) {
++			pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
++				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++		}
++	}
++
++	ttm_pool_unpopulate(ttm);
++}
++
+ static struct ttm_bo_driver radeon_bo_driver = {
+-	.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
++	.ttm_tt_create = &radeon_ttm_tt_create,
++	.ttm_tt_populate = &radeon_ttm_tt_populate,
++	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
+ 	.invalidate_caches = &radeon_invalidate_caches,
+ 	.init_mem_type = &radeon_init_mem_type,
+ 	.evict_flags = &radeon_evict_flags,
+@@ -680,124 +851,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
+ }
+ 
+ 
+-/*
+- * TTM backend functions.
+- */
+-struct radeon_ttm_backend {
+-	struct ttm_backend		backend;
+-	struct radeon_device		*rdev;
+-	unsigned long			num_pages;
+-	struct page			**pages;
+-	struct page			*dummy_read_page;
+-	dma_addr_t			*dma_addrs;
+-	bool				populated;
+-	bool				bound;
+-	unsigned			offset;
+-};
+-
+-static int radeon_ttm_backend_populate(struct ttm_backend *backend,
+-				       unsigned long num_pages,
+-				       struct page **pages,
+-				       struct page *dummy_read_page,
+-				       dma_addr_t *dma_addrs)
+-{
+-	struct radeon_ttm_backend *gtt;
+-
+-	gtt = container_of(backend, struct radeon_ttm_backend, backend);
+-	gtt->pages = pages;
+-	gtt->dma_addrs = dma_addrs;
+-	gtt->num_pages = num_pages;
+-	gtt->dummy_read_page = dummy_read_page;
+-	gtt->populated = true;
+-	return 0;
+-}
+-
+-static void radeon_ttm_backend_clear(struct ttm_backend *backend)
+-{
+-	struct radeon_ttm_backend *gtt;
+-
+-	gtt = container_of(backend, struct radeon_ttm_backend, backend);
+-	gtt->pages = NULL;
+-	gtt->dma_addrs = NULL;
+-	gtt->num_pages = 0;
+-	gtt->dummy_read_page = NULL;
+-	gtt->populated = false;
+-	gtt->bound = false;
+-}
+-
+-
+-static int radeon_ttm_backend_bind(struct ttm_backend *backend,
+-				   struct ttm_mem_reg *bo_mem)
+-{
+-	struct radeon_ttm_backend *gtt;
+-	int r;
+-
+-	gtt = container_of(backend, struct radeon_ttm_backend, backend);
+-	gtt->offset = bo_mem->start << PAGE_SHIFT;
+-	if (!gtt->num_pages) {
+-		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+-		     gtt->num_pages, bo_mem, backend);
+-	}
+-	r = radeon_gart_bind(gtt->rdev, gtt->offset,
+-			     gtt->num_pages, gtt->pages, gtt->dma_addrs);
+-	if (r) {
+-		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+-			  gtt->num_pages, gtt->offset);
+-		return r;
+-	}
+-	gtt->bound = true;
+-	return 0;
+-}
+-
+-static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
+-{
+-	struct radeon_ttm_backend *gtt;
+-
+-	gtt = container_of(backend, struct radeon_ttm_backend, backend);
+-	radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
+-	gtt->bound = false;
+-	return 0;
+-}
+-
+-static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
+-{
+-	struct radeon_ttm_backend *gtt;
+-
+-	gtt = container_of(backend, struct radeon_ttm_backend, backend);
+-	if (gtt->bound) {
+-		radeon_ttm_backend_unbind(backend);
+-	}
+-	kfree(gtt);
+-}
+-
+-static struct ttm_backend_func radeon_backend_func = {
+-	.populate = &radeon_ttm_backend_populate,
+-	.clear = &radeon_ttm_backend_clear,
+-	.bind = &radeon_ttm_backend_bind,
+-	.unbind = &radeon_ttm_backend_unbind,
+-	.destroy = &radeon_ttm_backend_destroy,
+-};
+-
+-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
+-{
+-	struct radeon_ttm_backend *gtt;
+-
+-	gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
+-	if (gtt == NULL) {
+-		return NULL;
+-	}
+-	gtt->backend.bdev = &rdev->mman.bdev;
+-	gtt->backend.flags = 0;
+-	gtt->backend.func = &radeon_backend_func;
+-	gtt->rdev = rdev;
+-	gtt->pages = NULL;
+-	gtt->num_pages = 0;
+-	gtt->dummy_read_page = NULL;
+-	gtt->populated = false;
+-	gtt->bound = false;
+-	return &gtt->backend;
+-}
+-
+ #define RADEON_DEBUGFS_MEM_TYPES 2
+ 
+ #if defined(CONFIG_DEBUG_FS)
+@@ -820,8 +873,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
+ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+ {
+ #if defined(CONFIG_DEBUG_FS)
+-	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
+-	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
++	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
++	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
+ 	unsigned i;
+ 
+ 	for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
+@@ -843,8 +896,17 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
+ 	radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ 	radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+ 	radeon_mem_types_list[i].driver_features = 0;
+-	radeon_mem_types_list[i].data = NULL;
+-	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
++	radeon_mem_types_list[i++].data = NULL;
++#ifdef CONFIG_SWIOTLB
++	if (swiotlb_nr_tbl()) {
++		sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
++		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
++		radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
++		radeon_mem_types_list[i].driver_features = 0;
++		radeon_mem_types_list[i++].data = NULL;
++	}
++#endif
++	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
+ 
+ #endif
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
+index 2316977..0f656b1 100644
+--- a/drivers/gpu/drm/radeon/reg_srcs/cayman
++++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
+@@ -1,5 +1,8 @@
+ cayman 0x9400
+ 0x0000802C GRBM_GFX_INDEX
++0x000084FC CP_STRMOUT_CNTL
++0x000085F0 CP_COHER_CNTL
++0x000085F4 CP_COHER_SIZE
+ 0x000088B0 VGT_VTX_VECT_EJECT_REG
+ 0x000088C4 VGT_CACHE_INVALIDATION
+ 0x000088D4 VGT_GS_VERTEX_REUSE
+@@ -77,7 +80,6 @@ cayman 0x9400
+ 0x0002802C DB_DEPTH_CLEAR
+ 0x00028030 PA_SC_SCREEN_SCISSOR_TL
+ 0x00028034 PA_SC_SCREEN_SCISSOR_BR
+-0x0002805C DB_DEPTH_SLICE
+ 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+ 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+ 0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+@@ -206,7 +208,6 @@ cayman 0x9400
+ 0x00028344 PA_SC_VPORT_ZMAX_14
+ 0x00028348 PA_SC_VPORT_ZMIN_15
+ 0x0002834C PA_SC_VPORT_ZMAX_15
+-0x00028350 SX_MISC
+ 0x00028354 SX_SURFACE_SYNC
+ 0x0002835C SX_SCATTER_EXPORT_SIZE
+ 0x00028380 SQ_VTX_SEMANTIC_0
+@@ -508,10 +509,16 @@ cayman 0x9400
+ 0x00028AA8 IA_MULTI_VGT_PARAM
+ 0x00028AB4 VGT_REUSE_OFF
+ 0x00028AB8 VGT_VTX_CNT_EN
+-0x00028ABC DB_HTILE_SURFACE
+ 0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+ 0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+ 0x00028AC8 DB_PRELOAD_CONTROL
++0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
++0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
++0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
++0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
++0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
++0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
++0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0x00028B38 VGT_GS_MAX_VERT_OUT
+ 0x00028B54 VGT_SHADER_STAGES_EN
+ 0x00028B58 VGT_LS_HS_CONFIG
+@@ -551,6 +558,18 @@ cayman 0x9400
+ 0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
+ 0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
+ 0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
++0x00028C78 CB_COLOR0_DIM
++0x00028CB4 CB_COLOR1_DIM
++0x00028CF0 CB_COLOR2_DIM
++0x00028D2C CB_COLOR3_DIM
++0x00028D68 CB_COLOR4_DIM
++0x00028DA4 CB_COLOR5_DIM
++0x00028DE0 CB_COLOR6_DIM
++0x00028E1C CB_COLOR7_DIM
++0x00028E58 CB_COLOR8_DIM
++0x00028E74 CB_COLOR9_DIM
++0x00028E90 CB_COLOR10_DIM
++0x00028EAC CB_COLOR11_DIM
+ 0x00028C8C CB_COLOR0_CLEAR_WORD0
+ 0x00028C90 CB_COLOR0_CLEAR_WORD1
+ 0x00028C94 CB_COLOR0_CLEAR_WORD2
+diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
+index 161737a..b912a37 100644
+--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
++++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
+@@ -4,6 +4,9 @@ evergreen 0x9400
+ 0x00008044 WAIT_UNTIL_POLL_CNTL
+ 0x00008048 WAIT_UNTIL_POLL_MASK
+ 0x0000804c WAIT_UNTIL_POLL_REFDATA
++0x000084FC CP_STRMOUT_CNTL
++0x000085F0 CP_COHER_CNTL
++0x000085F4 CP_COHER_SIZE
+ 0x000088B0 VGT_VTX_VECT_EJECT_REG
+ 0x000088C4 VGT_CACHE_INVALIDATION
+ 0x000088D4 VGT_GS_VERTEX_REUSE
+@@ -93,7 +96,6 @@ evergreen 0x9400
+ 0x0002802C DB_DEPTH_CLEAR
+ 0x00028030 PA_SC_SCREEN_SCISSOR_TL
+ 0x00028034 PA_SC_SCREEN_SCISSOR_BR
+-0x0002805C DB_DEPTH_SLICE
+ 0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+ 0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+ 0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+@@ -222,7 +224,6 @@ evergreen 0x9400
+ 0x00028344 PA_SC_VPORT_ZMAX_14
+ 0x00028348 PA_SC_VPORT_ZMIN_15
+ 0x0002834C PA_SC_VPORT_ZMAX_15
+-0x00028350 SX_MISC
+ 0x00028354 SX_SURFACE_SYNC
+ 0x00028380 SQ_VTX_SEMANTIC_0
+ 0x00028384 SQ_VTX_SEMANTIC_1
+@@ -518,10 +519,16 @@ evergreen 0x9400
+ 0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+ 0x00028AB4 VGT_REUSE_OFF
+ 0x00028AB8 VGT_VTX_CNT_EN
+-0x00028ABC DB_HTILE_SURFACE
+ 0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+ 0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+ 0x00028AC8 DB_PRELOAD_CONTROL
++0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
++0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
++0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
++0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
++0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
++0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
++0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0x00028B38 VGT_GS_MAX_VERT_OUT
+ 0x00028B54 VGT_SHADER_STAGES_EN
+ 0x00028B58 VGT_LS_HS_CONFIG
+@@ -554,6 +561,18 @@ evergreen 0x9400
+ 0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
+ 0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
+ 0x00028C3C PA_SC_AA_MASK
++0x00028C78 CB_COLOR0_DIM
++0x00028CB4 CB_COLOR1_DIM
++0x00028CF0 CB_COLOR2_DIM
++0x00028D2C CB_COLOR3_DIM
++0x00028D68 CB_COLOR4_DIM
++0x00028DA4 CB_COLOR5_DIM
++0x00028DE0 CB_COLOR6_DIM
++0x00028E1C CB_COLOR7_DIM
++0x00028E58 CB_COLOR8_DIM
++0x00028E74 CB_COLOR9_DIM
++0x00028E90 CB_COLOR10_DIM
++0x00028EAC CB_COLOR11_DIM
+ 0x00028C8C CB_COLOR0_CLEAR_WORD0
+ 0x00028C90 CB_COLOR0_CLEAR_WORD1
+ 0x00028C94 CB_COLOR0_CLEAR_WORD2
+diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
+index 0380c5c..5e659b0 100644
+--- a/drivers/gpu/drm/radeon/reg_srcs/r600
++++ b/drivers/gpu/drm/radeon/reg_srcs/r600
+@@ -3,6 +3,9 @@ r600 0x9400
+ 0x00028230 R7xx_PA_SC_EDGERULE
+ 0x000286C8 R7xx_SPI_THREAD_GROUPING
+ 0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
++0x00008490 CP_STRMOUT_CNTL
++0x000085F0 CP_COHER_CNTL
++0x000085F4 CP_COHER_SIZE
+ 0x000088C4 VGT_CACHE_INVALIDATION
+ 0x00028A50 VGT_ENHANCE
+ 0x000088CC VGT_ES_PER_GS
+@@ -38,6 +41,13 @@ r600 0x9400
+ 0x00028AB4 VGT_REUSE_OFF
+ 0x00028AB8 VGT_VTX_CNT_EN
+ 0x000088B0 VGT_VTX_VECT_EJECT_REG
++0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
++0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
++0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
++0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
++0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
++0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
++0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0x00028810 PA_CL_CLIP_CNTL
+ 0x00008A14 PA_CL_ENHANCE
+ 0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+@@ -428,7 +438,7 @@ r600 0x9400
+ 0x00028638 SPI_VS_OUT_ID_9
+ 0x00028438 SX_ALPHA_REF
+ 0x00028410 SX_ALPHA_TEST_CONTROL
+-0x00028350 SX_MISC
++0x00028354 SX_SURFACE_SYNC
+ 0x00009014 SX_MEMORY_EXPORT_SIZE
+ 0x00009604 TC_INVALIDATE
+ 0x00009400 TD_FILTER4
+@@ -703,7 +713,6 @@ r600 0x9400
+ 0x0000A710 TD_VS_SAMPLER17_BORDER_RED
+ 0x00009508 TA_CNTL_AUX
+ 0x0002802C DB_DEPTH_CLEAR
+-0x00028D24 DB_HTILE_SURFACE
+ 0x00028D34 DB_PREFETCH_LIMIT
+ 0x00028D30 DB_PRELOAD_CONTROL
+ 0x00028D0C DB_RENDER_CONTROL
+@@ -743,14 +752,6 @@ r600 0x9400
+ 0x00028114 CB_COLOR5_MASK
+ 0x00028118 CB_COLOR6_MASK
+ 0x0002811C CB_COLOR7_MASK
+-0x00028080 CB_COLOR0_VIEW
+-0x00028084 CB_COLOR1_VIEW
+-0x00028088 CB_COLOR2_VIEW
+-0x0002808C CB_COLOR3_VIEW
+-0x00028090 CB_COLOR4_VIEW
+-0x00028094 CB_COLOR5_VIEW
+-0x00028098 CB_COLOR6_VIEW
+-0x0002809C CB_COLOR7_VIEW
+ 0x00028808 CB_COLOR_CONTROL
+ 0x0002842C CB_FOG_BLUE
+ 0x00028428 CB_FOG_GREEN
+diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
+index 06b90c8..4cf381b 100644
+--- a/drivers/gpu/drm/radeon/rs400.c
++++ b/drivers/gpu/drm/radeon/rs400.c
+@@ -410,6 +410,12 @@ static int rs400_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	r100_irq_set(rdev);
+ 	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+@@ -419,16 +425,25 @@ static int rs400_startup(struct radeon_device *rdev)
+ 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ 		return r;
+ 	}
+-	r = r100_ib_init(rdev);
++
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ 	if (r) {
+-		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
++		dev_err(rdev->dev, "failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
++
+ 	return 0;
+ }
+ 
+ int rs400_resume(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	/* Make sur GART are not working */
+ 	rs400_gart_disable(rdev);
+ 	/* Resume clock before doing reset */
+@@ -447,11 +462,18 @@ int rs400_resume(struct radeon_device *rdev)
+ 	r300_clock_startup(rdev);
+ 	/* Initialize surface registers */
+ 	radeon_surface_init(rdev);
+-	return rs400_startup(rdev);
++
++	rdev->accel_working = true;
++	r = rs400_startup(rdev);
++	if (r) {
++		rdev->accel_working = false;
++	}
++	return r;
+ }
+ 
+ int rs400_suspend(struct radeon_device *rdev)
+ {
++	radeon_ib_pool_suspend(rdev);
+ 	r100_cp_disable(rdev);
+ 	radeon_wb_disable(rdev);
+ 	r100_irq_disable(rdev);
+@@ -530,7 +552,14 @@ int rs400_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	r300_set_reg_safe(rdev);
++
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = rs400_startup(rdev);
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index ee898e9..d25cf86 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -46,6 +46,25 @@
+ void rs600_gpu_init(struct radeon_device *rdev);
+ int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+ 
++void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
++{
++	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
++	int i;
++
++	if (RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset) & AVIVO_CRTC_EN) {
++		for (i = 0; i < rdev->usec_timeout; i++) {
++			if (!(RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK))
++				break;
++			udelay(1);
++		}
++		for (i = 0; i < rdev->usec_timeout; i++) {
++			if (RREG32(AVIVO_D1CRTC_STATUS + radeon_crtc->crtc_offset) & AVIVO_D1CRTC_V_BLANK)
++				break;
++			udelay(1);
++		}
++	}
++}
++
+ void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
+ {
+ 	/* enable the pflip int */
+@@ -175,7 +194,7 @@ void rs600_pm_misc(struct radeon_device *rdev)
+ 	/* set pcie lanes */
+ 	if ((rdev->flags & RADEON_IS_PCIE) &&
+ 	    !(rdev->flags & RADEON_IS_IGP) &&
+-	    rdev->asic->set_pcie_lanes &&
++	    rdev->asic->pm.set_pcie_lanes &&
+ 	    (ps->pcie_lanes !=
+ 	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
+ 		radeon_set_pcie_lanes(rdev,
+@@ -322,16 +341,6 @@ void rs600_hpd_fini(struct radeon_device *rdev)
+ 	}
+ }
+ 
+-void rs600_bm_disable(struct radeon_device *rdev)
+-{
+-	u16 tmp;
+-
+-	/* disable bus mastering */
+-	pci_read_config_word(rdev->pdev, 0x4, &tmp);
+-	pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+-	mdelay(1);
+-}
+-
+ int rs600_asic_reset(struct radeon_device *rdev)
+ {
+ 	struct rv515_mc_save save;
+@@ -355,7 +364,8 @@ int rs600_asic_reset(struct radeon_device *rdev)
+ 	WREG32(RADEON_CP_RB_CNTL, tmp);
+ 	pci_save_state(rdev->pdev);
+ 	/* disable bus mastering */
+-	rs600_bm_disable(rdev);
++	pci_clear_master(rdev->pdev);
++	mdelay(1);
+ 	/* reset GA+VAP */
+ 	WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
+ 					S_0000F0_SOFT_RESET_GA(1));
+@@ -549,7 +559,7 @@ int rs600_irq_set(struct radeon_device *rdev)
+ 		WREG32(R_000040_GEN_INT_CNTL, 0);
+ 		return -EINVAL;
+ 	}
+-	if (rdev->irq.sw_int) {
++	if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ 		tmp |= S_000040_SW_INT_EN(1);
+ 	}
+ 	if (rdev->irq.gui_idle) {
+@@ -642,7 +652,7 @@ int rs600_irq_process(struct radeon_device *rdev)
+ 	while (status || rdev->irq.stat_regs.r500.disp_int) {
+ 		/* SW interrupt */
+ 		if (G_000044_SW_INT(status)) {
+-			radeon_fence_process(rdev);
++			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ 		}
+ 		/* GUI idle */
+ 		if (G_000040_GUI_IDLE(status)) {
+@@ -847,6 +857,12 @@ static int rs600_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	rs600_irq_set(rdev);
+ 	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+@@ -856,15 +872,21 @@ static int rs600_startup(struct radeon_device *rdev)
+ 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ 		return r;
+ 	}
+-	r = r100_ib_init(rdev);
++
++	r = r600_audio_init(rdev);
+ 	if (r) {
+-		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
++		dev_err(rdev->dev, "failed initializing audio\n");
+ 		return r;
+ 	}
+ 
+-	r = r600_audio_init(rdev);
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ 	if (r) {
+-		dev_err(rdev->dev, "failed initializing audio\n");
++		dev_err(rdev->dev, "failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
+ 
+@@ -873,6 +895,8 @@ static int rs600_startup(struct radeon_device *rdev)
+ 
+ int rs600_resume(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	/* Make sur GART are not working */
+ 	rs600_gart_disable(rdev);
+ 	/* Resume clock before doing reset */
+@@ -889,11 +913,18 @@ int rs600_resume(struct radeon_device *rdev)
+ 	rv515_clock_startup(rdev);
+ 	/* Initialize surface registers */
+ 	radeon_surface_init(rdev);
+-	return rs600_startup(rdev);
++
++	rdev->accel_working = true;
++	r = rs600_startup(rdev);
++	if (r) {
++		rdev->accel_working = false;
++	}
++	return r;
+ }
+ 
+ int rs600_suspend(struct radeon_device *rdev)
+ {
++	radeon_ib_pool_suspend(rdev);
+ 	r600_audio_fini(rdev);
+ 	r100_cp_disable(rdev);
+ 	radeon_wb_disable(rdev);
+@@ -974,7 +1005,14 @@ int rs600_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	rs600_set_safe_registers(rdev);
++
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = rs600_startup(rdev);
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index a9049ed..f2c3b9d 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -31,7 +31,7 @@
+ #include "atom.h"
+ #include "rs690d.h"
+ 
+-static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
++int rs690_mc_wait_for_idle(struct radeon_device *rdev)
+ {
+ 	unsigned i;
+ 	uint32_t tmp;
+@@ -621,6 +621,12 @@ static int rs690_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	rs600_irq_set(rdev);
+ 	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+@@ -630,15 +636,21 @@ static int rs690_startup(struct radeon_device *rdev)
+ 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ 		return r;
+ 	}
+-	r = r100_ib_init(rdev);
++
++	r = r600_audio_init(rdev);
+ 	if (r) {
+-		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
++		dev_err(rdev->dev, "failed initializing audio\n");
+ 		return r;
+ 	}
+ 
+-	r = r600_audio_init(rdev);
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ 	if (r) {
+-		dev_err(rdev->dev, "failed initializing audio\n");
++		dev_err(rdev->dev, "failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
+ 
+@@ -647,6 +659,8 @@ static int rs690_startup(struct radeon_device *rdev)
+ 
+ int rs690_resume(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	/* Make sur GART are not working */
+ 	rs400_gart_disable(rdev);
+ 	/* Resume clock before doing reset */
+@@ -663,11 +677,18 @@ int rs690_resume(struct radeon_device *rdev)
+ 	rv515_clock_startup(rdev);
+ 	/* Initialize surface registers */
+ 	radeon_surface_init(rdev);
+-	return rs690_startup(rdev);
++
++	rdev->accel_working = true;
++	r = rs690_startup(rdev);
++	if (r) {
++		rdev->accel_working = false;
++	}
++	return r;
+ }
+ 
+ int rs690_suspend(struct radeon_device *rdev)
+ {
++	radeon_ib_pool_suspend(rdev);
+ 	r600_audio_fini(rdev);
+ 	r100_cp_disable(rdev);
+ 	radeon_wb_disable(rdev);
+@@ -749,7 +770,14 @@ int rs690_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	rs600_set_safe_registers(rdev);
++
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = rs690_startup(rdev);
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
+index d5f45b4..43af363 100644
+--- a/drivers/gpu/drm/radeon/rv515.c
++++ b/drivers/gpu/drm/radeon/rv515.c
+@@ -53,46 +53,46 @@ void rv515_debugfs(struct radeon_device *rdev)
+ 	}
+ }
+ 
+-void rv515_ring_start(struct radeon_device *rdev)
++void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
+ {
+ 	int r;
+ 
+-	r = radeon_ring_lock(rdev, 64);
++	r = radeon_ring_lock(rdev, ring, 64);
+ 	if (r) {
+ 		return;
+ 	}
+-	radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
++	radeon_ring_write(ring,
+ 			  ISYNC_ANY2D_IDLE3D |
+ 			  ISYNC_ANY3D_IDLE2D |
+ 			  ISYNC_WAIT_IDLEGUI |
+ 			  ISYNC_CPSCRATCH_IDLEGUI);
+-	radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
+-	radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+-	radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+-	radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
+-	radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
+-	radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
+-	radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
+-	radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
+-	radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
+-	radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+-	radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
+-	radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+-	radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
+-	radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
++	radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
++	radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
++	radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
++	radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
++	radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
++	radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
++	radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
++	radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
++	radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
++	radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
++	radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
++	radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
++	radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
++	radeon_ring_write(ring,
+ 			  ((6 << MS_X0_SHIFT) |
+ 			   (6 << MS_Y0_SHIFT) |
+ 			   (6 << MS_X1_SHIFT) |
+@@ -101,8 +101,8 @@ void rv515_ring_start(struct radeon_device *rdev)
+ 			   (6 << MS_Y2_SHIFT) |
+ 			   (6 << MSBD0_Y_SHIFT) |
+ 			   (6 << MSBD0_X_SHIFT)));
+-	radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
+-	radeon_ring_write(rdev,
++	radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
++	radeon_ring_write(ring,
+ 			  ((6 << MS_X3_SHIFT) |
+ 			   (6 << MS_Y3_SHIFT) |
+ 			   (6 << MS_X4_SHIFT) |
+@@ -110,15 +110,15 @@ void rv515_ring_start(struct radeon_device *rdev)
+ 			   (6 << MS_X5_SHIFT) |
+ 			   (6 << MS_Y5_SHIFT) |
+ 			   (6 << MSBD1_SHIFT)));
+-	radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
+-	radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+-	radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
+-	radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+-	radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
+-	radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+-	radeon_ring_write(rdev, PACKET0(0x20C8, 0));
+-	radeon_ring_write(rdev, 0);
+-	radeon_ring_unlock_commit(rdev);
++	radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
++	radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
++	radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
++	radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
++	radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
++	radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
++	radeon_ring_write(ring, PACKET0(0x20C8, 0));
++	radeon_ring_write(ring, 0);
++	radeon_ring_unlock_commit(rdev, ring);
+ }
+ 
+ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
+@@ -149,7 +149,7 @@ void rv515_gpu_init(struct radeon_device *rdev)
+ 
+ 	if (r100_gui_wait_for_idle(rdev)) {
+ 		printk(KERN_WARNING "Failed to wait GUI idle while "
+-		       "reseting GPU. Bad things might happen.\n");
++		       "resetting GPU. Bad things might happen.\n");
+ 	}
+ 	rv515_vga_render_disable(rdev);
+ 	r420_pipes_init(rdev);
+@@ -161,7 +161,7 @@ void rv515_gpu_init(struct radeon_device *rdev)
+ 	WREG32_PLL(0x000D, tmp);
+ 	if (r100_gui_wait_for_idle(rdev)) {
+ 		printk(KERN_WARNING "Failed to wait GUI idle while "
+-		       "reseting GPU. Bad things might happen.\n");
++		       "resetting GPU. Bad things might happen.\n");
+ 	}
+ 	if (rv515_mc_wait_for_idle(rdev)) {
+ 		printk(KERN_WARNING "Failed to wait MC idle while "
+@@ -379,6 +379,12 @@ static int rv515_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	rs600_irq_set(rdev);
+ 	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+@@ -388,9 +394,15 @@ static int rv515_startup(struct radeon_device *rdev)
+ 		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
+ 		return r;
+ 	}
+-	r = r100_ib_init(rdev);
++
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ 	if (r) {
+-		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
++		dev_err(rdev->dev, "failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
+ 	return 0;
+@@ -398,6 +410,8 @@ static int rv515_startup(struct radeon_device *rdev)
+ 
+ int rv515_resume(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	/* Make sur GART are not working */
+ 	if (rdev->flags & RADEON_IS_PCIE)
+ 		rv370_pcie_gart_disable(rdev);
+@@ -415,7 +429,13 @@ int rv515_resume(struct radeon_device *rdev)
+ 	rv515_clock_startup(rdev);
+ 	/* Initialize surface registers */
+ 	radeon_surface_init(rdev);
+-	return rv515_startup(rdev);
++
++	rdev->accel_working = true;
++	r =  rv515_startup(rdev);
++	if (r) {
++		rdev->accel_working = false;
++	}
++	return r;
+ }
+ 
+ int rv515_suspend(struct radeon_device *rdev)
+@@ -511,7 +531,14 @@ int rv515_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	rv515_set_safe_registers(rdev);
++
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = rv515_startup(rdev);
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index cc79449..591040b 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -359,7 +359,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
+ void r700_cp_fini(struct radeon_device *rdev)
+ {
+ 	r700_cp_stop(rdev);
+-	radeon_ring_fini(rdev);
++	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ }
+ 
+ /*
+@@ -978,7 +978,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+ 	}
+ 	if (rdev->flags & RADEON_IS_AGP) {
+ 		size_bf = mc->gtt_start;
+-		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
++		size_af = 0xFFFFFFFF - mc->gtt_end;
+ 		if (size_bf > size_af) {
+ 			if (mc->mc_vram_size > size_bf) {
+ 				dev_warn(rdev->dev, "limiting VRAM\n");
+@@ -992,7 +992,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+ 				mc->real_vram_size = size_af;
+ 				mc->mc_vram_size = size_af;
+ 			}
+-			mc->vram_start = mc->gtt_end;
++			mc->vram_start = mc->gtt_end + 1;
+ 		}
+ 		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ 		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+@@ -1052,6 +1052,7 @@ int rv770_mc_init(struct radeon_device *rdev)
+ 
+ static int rv770_startup(struct radeon_device *rdev)
+ {
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ 	int r;
+ 
+ 	/* enable pcie gen2 link */
+@@ -1082,7 +1083,7 @@ static int rv770_startup(struct radeon_device *rdev)
+ 	r = r600_blit_init(rdev);
+ 	if (r) {
+ 		r600_blit_fini(rdev);
+-		rdev->asic->copy = NULL;
++		rdev->asic->copy.copy = NULL;
+ 		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ 	}
+ 
+@@ -1091,6 +1092,12 @@ static int rv770_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
+ 	/* Enable IRQ */
+ 	r = r600_irq_init(rdev);
+ 	if (r) {
+@@ -1100,7 +1107,9 @@ static int rv770_startup(struct radeon_device *rdev)
+ 	}
+ 	r600_irq_set(rdev);
+ 
+-	r = radeon_ring_init(rdev, rdev->cp.ring_size);
++	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
++			     R600_CP_RB_RPTR, R600_CP_RB_WPTR,
++			     0, 0xfffff, RADEON_CP_PACKET2);
+ 	if (r)
+ 		return r;
+ 	r = rv770_cp_load_microcode(rdev);
+@@ -1110,6 +1119,17 @@ static int rv770_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
++	if (r) {
++		dev_err(rdev->dev, "IB test failed (%d).\n", r);
++		rdev->accel_working = false;
++		return r;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -1124,15 +1144,11 @@ int rv770_resume(struct radeon_device *rdev)
+ 	/* post card */
+ 	atom_asic_init(rdev->mode_info.atom_context);
+ 
++	rdev->accel_working = true;
+ 	r = rv770_startup(rdev);
+ 	if (r) {
+ 		DRM_ERROR("r600 startup failed on resume\n");
+-		return r;
+-	}
+-
+-	r = r600_ib_test(rdev);
+-	if (r) {
+-		DRM_ERROR("radeon: failed testing IB (%d).\n", r);
++		rdev->accel_working = false;
+ 		return r;
+ 	}
+ 
+@@ -1149,13 +1165,14 @@ int rv770_resume(struct radeon_device *rdev)
+ int rv770_suspend(struct radeon_device *rdev)
+ {
+ 	r600_audio_fini(rdev);
++	radeon_ib_pool_suspend(rdev);
++	r600_blit_suspend(rdev);
+ 	/* FIXME: we should wait for ring to be empty */
+ 	r700_cp_stop(rdev);
+-	rdev->cp.ready = false;
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ 	r600_irq_suspend(rdev);
+ 	radeon_wb_disable(rdev);
+ 	rv770_pcie_gart_disable(rdev);
+-	r600_blit_suspend(rdev);
+ 
+ 	return 0;
+ }
+@@ -1224,8 +1241,8 @@ int rv770_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
+-	rdev->cp.ring_obj = NULL;
+-	r600_ring_init(rdev, 1024 * 1024);
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
++	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
+ 
+ 	rdev->ih.ring_obj = NULL;
+ 	r600_ih_ring_init(rdev, 64 * 1024);
+@@ -1234,30 +1251,24 @@ int rv770_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 
++	r = radeon_ib_pool_init(rdev);
+ 	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++
+ 	r = rv770_startup(rdev);
+ 	if (r) {
+ 		dev_err(rdev->dev, "disabling GPU acceleration\n");
+ 		r700_cp_fini(rdev);
+ 		r600_irq_fini(rdev);
+ 		radeon_wb_fini(rdev);
++		r100_ib_fini(rdev);
+ 		radeon_irq_kms_fini(rdev);
+ 		rv770_pcie_gart_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+-	if (rdev->accel_working) {
+-		r = radeon_ib_pool_init(rdev);
+-		if (r) {
+-			dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+-			rdev->accel_working = false;
+-		} else {
+-			r = r600_ib_test(rdev);
+-			if (r) {
+-				dev_err(rdev->dev, "IB test failed (%d).\n", r);
+-				rdev->accel_working = false;
+-			}
+-		}
+-	}
+ 
+ 	r = r600_audio_init(rdev);
+ 	if (r) {
+@@ -1274,11 +1285,12 @@ void rv770_fini(struct radeon_device *rdev)
+ 	r700_cp_fini(rdev);
+ 	r600_irq_fini(rdev);
+ 	radeon_wb_fini(rdev);
+-	radeon_ib_pool_fini(rdev);
++	r100_ib_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	rv770_pcie_gart_fini(rdev);
+ 	r600_vram_scratch_fini(rdev);
+ 	radeon_gem_fini(rdev);
++	radeon_semaphore_driver_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+ 	radeon_agp_fini(rdev);
+ 	radeon_bo_fini(rdev);
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+new file mode 100644
+index 0000000..1197f21
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -0,0 +1,4128 @@
++/*
++ * Copyright 2011 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Alex Deucher
++ */
++#include <linux/firmware.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include "drmP.h"
++#include "radeon.h"
++#include "radeon_asic.h"
++#include "radeon_drm.h"
++#include "sid.h"
++#include "atom.h"
++#include "si_blit_shaders.h"
++
++#define SI_PFP_UCODE_SIZE 2144
++#define SI_PM4_UCODE_SIZE 2144
++#define SI_CE_UCODE_SIZE 2144
++#define SI_RLC_UCODE_SIZE 2048
++#define SI_MC_UCODE_SIZE 7769
++
++MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
++MODULE_FIRMWARE("radeon/TAHITI_me.bin");
++MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
++MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
++MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
++MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
++MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
++MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
++MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
++MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
++MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
++MODULE_FIRMWARE("radeon/VERDE_me.bin");
++MODULE_FIRMWARE("radeon/VERDE_ce.bin");
++MODULE_FIRMWARE("radeon/VERDE_mc.bin");
++MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
++
++extern int r600_ih_ring_alloc(struct radeon_device *rdev);
++extern void r600_ih_ring_fini(struct radeon_device *rdev);
++extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
++extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
++extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
++extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
++
++/* get temperature in millidegrees */
++int si_get_temp(struct radeon_device *rdev)
++{
++	u32 temp;
++	int actual_temp = 0;
++
++	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
++		CTF_TEMP_SHIFT;
++
++	if (temp & 0x200)
++		actual_temp = 255;
++	else
++		actual_temp = temp & 0x1ff;
++
++	actual_temp = (actual_temp * 1000);
++
++	return actual_temp;
++}
++
++#define TAHITI_IO_MC_REGS_SIZE 36
++
++static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
++	{0x0000006f, 0x03044000},
++	{0x00000070, 0x0480c018},
++	{0x00000071, 0x00000040},
++	{0x00000072, 0x01000000},
++	{0x00000074, 0x000000ff},
++	{0x00000075, 0x00143400},
++	{0x00000076, 0x08ec0800},
++	{0x00000077, 0x040000cc},
++	{0x00000079, 0x00000000},
++	{0x0000007a, 0x21000409},
++	{0x0000007c, 0x00000000},
++	{0x0000007d, 0xe8000000},
++	{0x0000007e, 0x044408a8},
++	{0x0000007f, 0x00000003},
++	{0x00000080, 0x00000000},
++	{0x00000081, 0x01000000},
++	{0x00000082, 0x02000000},
++	{0x00000083, 0x00000000},
++	{0x00000084, 0xe3f3e4f4},
++	{0x00000085, 0x00052024},
++	{0x00000087, 0x00000000},
++	{0x00000088, 0x66036603},
++	{0x00000089, 0x01000000},
++	{0x0000008b, 0x1c0a0000},
++	{0x0000008c, 0xff010000},
++	{0x0000008e, 0xffffefff},
++	{0x0000008f, 0xfff3efff},
++	{0x00000090, 0xfff3efbf},
++	{0x00000094, 0x00101101},
++	{0x00000095, 0x00000fff},
++	{0x00000096, 0x00116fff},
++	{0x00000097, 0x60010000},
++	{0x00000098, 0x10010000},
++	{0x00000099, 0x00006000},
++	{0x0000009a, 0x00001000},
++	{0x0000009f, 0x00a77400}
++};
++
++static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
++	{0x0000006f, 0x03044000},
++	{0x00000070, 0x0480c018},
++	{0x00000071, 0x00000040},
++	{0x00000072, 0x01000000},
++	{0x00000074, 0x000000ff},
++	{0x00000075, 0x00143400},
++	{0x00000076, 0x08ec0800},
++	{0x00000077, 0x040000cc},
++	{0x00000079, 0x00000000},
++	{0x0000007a, 0x21000409},
++	{0x0000007c, 0x00000000},
++	{0x0000007d, 0xe8000000},
++	{0x0000007e, 0x044408a8},
++	{0x0000007f, 0x00000003},
++	{0x00000080, 0x00000000},
++	{0x00000081, 0x01000000},
++	{0x00000082, 0x02000000},
++	{0x00000083, 0x00000000},
++	{0x00000084, 0xe3f3e4f4},
++	{0x00000085, 0x00052024},
++	{0x00000087, 0x00000000},
++	{0x00000088, 0x66036603},
++	{0x00000089, 0x01000000},
++	{0x0000008b, 0x1c0a0000},
++	{0x0000008c, 0xff010000},
++	{0x0000008e, 0xffffefff},
++	{0x0000008f, 0xfff3efff},
++	{0x00000090, 0xfff3efbf},
++	{0x00000094, 0x00101101},
++	{0x00000095, 0x00000fff},
++	{0x00000096, 0x00116fff},
++	{0x00000097, 0x60010000},
++	{0x00000098, 0x10010000},
++	{0x00000099, 0x00006000},
++	{0x0000009a, 0x00001000},
++	{0x0000009f, 0x00a47400}
++};
++
++static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
++	{0x0000006f, 0x03044000},
++	{0x00000070, 0x0480c018},
++	{0x00000071, 0x00000040},
++	{0x00000072, 0x01000000},
++	{0x00000074, 0x000000ff},
++	{0x00000075, 0x00143400},
++	{0x00000076, 0x08ec0800},
++	{0x00000077, 0x040000cc},
++	{0x00000079, 0x00000000},
++	{0x0000007a, 0x21000409},
++	{0x0000007c, 0x00000000},
++	{0x0000007d, 0xe8000000},
++	{0x0000007e, 0x044408a8},
++	{0x0000007f, 0x00000003},
++	{0x00000080, 0x00000000},
++	{0x00000081, 0x01000000},
++	{0x00000082, 0x02000000},
++	{0x00000083, 0x00000000},
++	{0x00000084, 0xe3f3e4f4},
++	{0x00000085, 0x00052024},
++	{0x00000087, 0x00000000},
++	{0x00000088, 0x66036603},
++	{0x00000089, 0x01000000},
++	{0x0000008b, 0x1c0a0000},
++	{0x0000008c, 0xff010000},
++	{0x0000008e, 0xffffefff},
++	{0x0000008f, 0xfff3efff},
++	{0x00000090, 0xfff3efbf},
++	{0x00000094, 0x00101101},
++	{0x00000095, 0x00000fff},
++	{0x00000096, 0x00116fff},
++	{0x00000097, 0x60010000},
++	{0x00000098, 0x10010000},
++	{0x00000099, 0x00006000},
++	{0x0000009a, 0x00001000},
++	{0x0000009f, 0x00a37400}
++};
++
++/* ucode loading */
++static int si_mc_load_microcode(struct radeon_device *rdev)
++{
++	const __be32 *fw_data;
++	u32 running, blackout = 0;
++	u32 *io_mc_regs;
++	int i, ucode_size, regs_size;
++
++	if (!rdev->mc_fw)
++		return -EINVAL;
++
++	switch (rdev->family) {
++	case CHIP_TAHITI:
++		io_mc_regs = (u32 *)&tahiti_io_mc_regs;
++		ucode_size = SI_MC_UCODE_SIZE;
++		regs_size = TAHITI_IO_MC_REGS_SIZE;
++		break;
++	case CHIP_PITCAIRN:
++		io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
++		ucode_size = SI_MC_UCODE_SIZE;
++		regs_size = TAHITI_IO_MC_REGS_SIZE;
++		break;
++	case CHIP_VERDE:
++	default:
++		io_mc_regs = (u32 *)&verde_io_mc_regs;
++		ucode_size = SI_MC_UCODE_SIZE;
++		regs_size = TAHITI_IO_MC_REGS_SIZE;
++		break;
++	}
++
++	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
++
++	if (running == 0) {
++		if (running) {
++			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
++			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
++		}
++
++		/* reset the engine and set to writable */
++		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
++		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
++
++		/* load mc io regs */
++		for (i = 0; i < regs_size; i++) {
++			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
++			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
++		}
++		/* load the MC ucode */
++		fw_data = (const __be32 *)rdev->mc_fw->data;
++		for (i = 0; i < ucode_size; i++)
++			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
++
++		/* put the engine back into the active state */
++		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
++		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
++		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
++
++		/* wait for training to complete */
++		for (i = 0; i < rdev->usec_timeout; i++) {
++			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
++				break;
++			udelay(1);
++		}
++		for (i = 0; i < rdev->usec_timeout; i++) {
++			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
++				break;
++			udelay(1);
++		}
++
++		if (running)
++			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
++	}
++
++	return 0;
++}
++
++static int si_init_microcode(struct radeon_device *rdev)
++{
++	struct platform_device *pdev;
++	const char *chip_name;
++	const char *rlc_chip_name;
++	size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
++	char fw_name[30];
++	int err;
++
++	DRM_DEBUG("\n");
++
++	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
++	err = IS_ERR(pdev);
++	if (err) {
++		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
++		return -EINVAL;
++	}
++
++	switch (rdev->family) {
++	case CHIP_TAHITI:
++		chip_name = "TAHITI";
++		rlc_chip_name = "TAHITI";
++		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
++		me_req_size = SI_PM4_UCODE_SIZE * 4;
++		ce_req_size = SI_CE_UCODE_SIZE * 4;
++		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
++		mc_req_size = SI_MC_UCODE_SIZE * 4;
++		break;
++	case CHIP_PITCAIRN:
++		chip_name = "PITCAIRN";
++		rlc_chip_name = "PITCAIRN";
++		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
++		me_req_size = SI_PM4_UCODE_SIZE * 4;
++		ce_req_size = SI_CE_UCODE_SIZE * 4;
++		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
++		mc_req_size = SI_MC_UCODE_SIZE * 4;
++		break;
++	case CHIP_VERDE:
++		chip_name = "VERDE";
++		rlc_chip_name = "VERDE";
++		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
++		me_req_size = SI_PM4_UCODE_SIZE * 4;
++		ce_req_size = SI_CE_UCODE_SIZE * 4;
++		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
++		mc_req_size = SI_MC_UCODE_SIZE * 4;
++		break;
++	default: BUG();
++	}
++
++	DRM_INFO("Loading %s Microcode\n", chip_name);
++
++	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
++	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
++	if (err)
++		goto out;
++	if (rdev->pfp_fw->size != pfp_req_size) {
++		printk(KERN_ERR
++		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
++		       rdev->pfp_fw->size, fw_name);
++		err = -EINVAL;
++		goto out;
++	}
++
++	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
++	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
++	if (err)
++		goto out;
++	if (rdev->me_fw->size != me_req_size) {
++		printk(KERN_ERR
++		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
++		       rdev->me_fw->size, fw_name);
++		err = -EINVAL;
++	}
++
++	snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
++	err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
++	if (err)
++		goto out;
++	if (rdev->ce_fw->size != ce_req_size) {
++		printk(KERN_ERR
++		       "si_cp: Bogus length %zu in firmware \"%s\"\n",
++		       rdev->ce_fw->size, fw_name);
++		err = -EINVAL;
++	}
++
++	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
++	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
++	if (err)
++		goto out;
++	if (rdev->rlc_fw->size != rlc_req_size) {
++		printk(KERN_ERR
++		       "si_rlc: Bogus length %zu in firmware \"%s\"\n",
++		       rdev->rlc_fw->size, fw_name);
++		err = -EINVAL;
++	}
++
++	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
++	err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
++	if (err)
++		goto out;
++	if (rdev->mc_fw->size != mc_req_size) {
++		printk(KERN_ERR
++		       "si_mc: Bogus length %zu in firmware \"%s\"\n",
++		       rdev->mc_fw->size, fw_name);
++		err = -EINVAL;
++	}
++
++out:
++	platform_device_unregister(pdev);
++
++	if (err) {
++		if (err != -EINVAL)
++			printk(KERN_ERR
++			       "si_cp: Failed to load firmware \"%s\"\n",
++			       fw_name);
++		release_firmware(rdev->pfp_fw);
++		rdev->pfp_fw = NULL;
++		release_firmware(rdev->me_fw);
++		rdev->me_fw = NULL;
++		release_firmware(rdev->ce_fw);
++		rdev->ce_fw = NULL;
++		release_firmware(rdev->rlc_fw);
++		rdev->rlc_fw = NULL;
++		release_firmware(rdev->mc_fw);
++		rdev->mc_fw = NULL;
++	}
++	return err;
++}
++
++/* watermark setup */
++static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
++				   struct radeon_crtc *radeon_crtc,
++				   struct drm_display_mode *mode,
++				   struct drm_display_mode *other_mode)
++{
++	u32 tmp;
++	/*
++	 * Line Buffer Setup
++	 * There are 3 line buffers, each one shared by 2 display controllers.
++	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
++	 * the display controllers.  The paritioning is done via one of four
++	 * preset allocations specified in bits 21:20:
++	 *  0 - half lb
++	 *  2 - whole lb, other crtc must be disabled
++	 */
++	/* this can get tricky if we have two large displays on a paired group
++	 * of crtcs.  Ideally for multiple large displays we'd assign them to
++	 * non-linked crtcs for maximum line buffer allocation.
++	 */
++	if (radeon_crtc->base.enabled && mode) {
++		if (other_mode)
++			tmp = 0; /* 1/2 */
++		else
++			tmp = 2; /* whole */
++	} else
++		tmp = 0;
++
++	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
++	       DC_LB_MEMORY_CONFIG(tmp));
++
++	if (radeon_crtc->base.enabled && mode) {
++		switch (tmp) {
++		case 0:
++		default:
++			return 4096 * 2;
++		case 2:
++			return 8192 * 2;
++		}
++	}
++
++	/* controller not enabled, so no lb used */
++	return 0;
++}
++
++static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
++{
++	u32 tmp = RREG32(MC_SHARED_CHMAP);
++
++	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
++	case 0:
++	default:
++		return 1;
++	case 1:
++		return 2;
++	case 2:
++		return 4;
++	case 3:
++		return 8;
++	case 4:
++		return 3;
++	case 5:
++		return 6;
++	case 6:
++		return 10;
++	case 7:
++		return 12;
++	case 8:
++		return 16;
++	}
++}
++
++struct dce6_wm_params {
++	u32 dram_channels; /* number of dram channels */
++	u32 yclk;          /* bandwidth per dram data pin in kHz */
++	u32 sclk;          /* engine clock in kHz */
++	u32 disp_clk;      /* display clock in kHz */
++	u32 src_width;     /* viewport width */
++	u32 active_time;   /* active display time in ns */
++	u32 blank_time;    /* blank time in ns */
++	bool interlaced;    /* mode is interlaced */
++	fixed20_12 vsc;    /* vertical scale ratio */
++	u32 num_heads;     /* number of active crtcs */
++	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
++	u32 lb_size;       /* line buffer allocated to pipe */
++	u32 vtaps;         /* vertical scaler taps */
++};
++
++static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
++{
++	/* Calculate raw DRAM Bandwidth */
++	fixed20_12 dram_efficiency; /* 0.7 */
++	fixed20_12 yclk, dram_channels, bandwidth;
++	fixed20_12 a;
++
++	a.full = dfixed_const(1000);
++	yclk.full = dfixed_const(wm->yclk);
++	yclk.full = dfixed_div(yclk, a);
++	dram_channels.full = dfixed_const(wm->dram_channels * 4);
++	a.full = dfixed_const(10);
++	dram_efficiency.full = dfixed_const(7);
++	dram_efficiency.full = dfixed_div(dram_efficiency, a);
++	bandwidth.full = dfixed_mul(dram_channels, yclk);
++	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
++
++	return dfixed_trunc(bandwidth);
++}
++
++static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
++{
++	/* Calculate DRAM Bandwidth and the part allocated to display. */
++	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
++	fixed20_12 yclk, dram_channels, bandwidth;
++	fixed20_12 a;
++
++	a.full = dfixed_const(1000);
++	yclk.full = dfixed_const(wm->yclk);
++	yclk.full = dfixed_div(yclk, a);
++	dram_channels.full = dfixed_const(wm->dram_channels * 4);
++	a.full = dfixed_const(10);
++	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
++	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
++	bandwidth.full = dfixed_mul(dram_channels, yclk);
++	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
++
++	return dfixed_trunc(bandwidth);
++}
++
++static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
++{
++	/* Calculate the display Data return Bandwidth */
++	fixed20_12 return_efficiency; /* 0.8 */
++	fixed20_12 sclk, bandwidth;
++	fixed20_12 a;
++
++	a.full = dfixed_const(1000);
++	sclk.full = dfixed_const(wm->sclk);
++	sclk.full = dfixed_div(sclk, a);
++	a.full = dfixed_const(10);
++	return_efficiency.full = dfixed_const(8);
++	return_efficiency.full = dfixed_div(return_efficiency, a);
++	a.full = dfixed_const(32);
++	bandwidth.full = dfixed_mul(a, sclk);
++	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
++
++	return dfixed_trunc(bandwidth);
++}
++
++static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
++{
++	return 32;
++}
++
++static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
++{
++	/* Calculate the DMIF Request Bandwidth */
++	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
++	fixed20_12 disp_clk, sclk, bandwidth;
++	fixed20_12 a, b1, b2;
++	u32 min_bandwidth;
++
++	a.full = dfixed_const(1000);
++	disp_clk.full = dfixed_const(wm->disp_clk);
++	disp_clk.full = dfixed_div(disp_clk, a);
++	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
++	b1.full = dfixed_mul(a, disp_clk);
++
++	a.full = dfixed_const(1000);
++	sclk.full = dfixed_const(wm->sclk);
++	sclk.full = dfixed_div(sclk, a);
++	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
++	b2.full = dfixed_mul(a, sclk);
++
++	a.full = dfixed_const(10);
++	disp_clk_request_efficiency.full = dfixed_const(8);
++	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
++
++	min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
++
++	a.full = dfixed_const(min_bandwidth);
++	bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
++
++	return dfixed_trunc(bandwidth);
++}
++
++static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
++{
++	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
++	u32 dram_bandwidth = dce6_dram_bandwidth(wm);
++	u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
++	u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
++
++	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
++}
++
++static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
++{
++	/* Calculate the display mode Average Bandwidth
++	 * DisplayMode should contain the source and destination dimensions,
++	 * timing, etc.
++	 */
++	fixed20_12 bpp;
++	fixed20_12 line_time;
++	fixed20_12 src_width;
++	fixed20_12 bandwidth;
++	fixed20_12 a;
++
++	a.full = dfixed_const(1000);
++	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
++	line_time.full = dfixed_div(line_time, a);
++	bpp.full = dfixed_const(wm->bytes_per_pixel);
++	src_width.full = dfixed_const(wm->src_width);
++	bandwidth.full = dfixed_mul(src_width, bpp);
++	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
++	bandwidth.full = dfixed_div(bandwidth, line_time);
++
++	return dfixed_trunc(bandwidth);
++}
++
++static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
++{
++	/* First calcualte the latency in ns */
++	u32 mc_latency = 2000; /* 2000 ns. */
++	u32 available_bandwidth = dce6_available_bandwidth(wm);
++	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
++	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
++	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
++	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
++		(wm->num_heads * cursor_line_pair_return_time);
++	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
++	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
++	u32 tmp, dmif_size = 12288;
++	fixed20_12 a, b, c;
++
++	if (wm->num_heads == 0)
++		return 0;
++
++	a.full = dfixed_const(2);
++	b.full = dfixed_const(1);
++	if ((wm->vsc.full > a.full) ||
++	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
++	    (wm->vtaps >= 5) ||
++	    ((wm->vsc.full >= a.full) && wm->interlaced))
++		max_src_lines_per_dst_line = 4;
++	else
++		max_src_lines_per_dst_line = 2;
++
++	a.full = dfixed_const(available_bandwidth);
++	b.full = dfixed_const(wm->num_heads);
++	a.full = dfixed_div(a, b);
++
++	b.full = dfixed_const(mc_latency + 512);
++	c.full = dfixed_const(wm->disp_clk);
++	b.full = dfixed_div(b, c);
++
++	c.full = dfixed_const(dmif_size);
++	b.full = dfixed_div(c, b);
++
++	tmp = min(dfixed_trunc(a), dfixed_trunc(b));
++
++	b.full = dfixed_const(1000);
++	c.full = dfixed_const(wm->disp_clk);
++	b.full = dfixed_div(c, b);
++	c.full = dfixed_const(wm->bytes_per_pixel);
++	b.full = dfixed_mul(b, c);
++
++	lb_fill_bw = min(tmp, dfixed_trunc(b));
++
++	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
++	b.full = dfixed_const(1000);
++	c.full = dfixed_const(lb_fill_bw);
++	b.full = dfixed_div(c, b);
++	a.full = dfixed_div(a, b);
++	line_fill_time = dfixed_trunc(a);
++
++	if (line_fill_time < wm->active_time)
++		return latency;
++	else
++		return latency + (line_fill_time - wm->active_time);
++
++}
++
++static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
++{
++	if (dce6_average_bandwidth(wm) <=
++	    (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
++		return true;
++	else
++		return false;
++};
++
++static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
++{
++	if (dce6_average_bandwidth(wm) <=
++	    (dce6_available_bandwidth(wm) / wm->num_heads))
++		return true;
++	else
++		return false;
++};
++
++static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
++{
++	u32 lb_partitions = wm->lb_size / wm->src_width;
++	u32 line_time = wm->active_time + wm->blank_time;
++	u32 latency_tolerant_lines;
++	u32 latency_hiding;
++	fixed20_12 a;
++
++	a.full = dfixed_const(1);
++	if (wm->vsc.full > a.full)
++		latency_tolerant_lines = 1;
++	else {
++		if (lb_partitions <= (wm->vtaps + 1))
++			latency_tolerant_lines = 1;
++		else
++			latency_tolerant_lines = 2;
++	}
++
++	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
++
++	if (dce6_latency_watermark(wm) <= latency_hiding)
++		return true;
++	else
++		return false;
++}
++
++static void dce6_program_watermarks(struct radeon_device *rdev,
++					 struct radeon_crtc *radeon_crtc,
++					 u32 lb_size, u32 num_heads)
++{
++	struct drm_display_mode *mode = &radeon_crtc->base.mode;
++	struct dce6_wm_params wm;
++	u32 pixel_period;
++	u32 line_time = 0;
++	u32 latency_watermark_a = 0, latency_watermark_b = 0;
++	u32 priority_a_mark = 0, priority_b_mark = 0;
++	u32 priority_a_cnt = PRIORITY_OFF;
++	u32 priority_b_cnt = PRIORITY_OFF;
++	u32 tmp, arb_control3;
++	fixed20_12 a, b, c;
++
++	if (radeon_crtc->base.enabled && num_heads && mode) {
++		pixel_period = 1000000 / (u32)mode->clock;
++		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
++		priority_a_cnt = 0;
++		priority_b_cnt = 0;
++
++		wm.yclk = rdev->pm.current_mclk * 10;
++		wm.sclk = rdev->pm.current_sclk * 10;
++		wm.disp_clk = mode->clock;
++		wm.src_width = mode->crtc_hdisplay;
++		wm.active_time = mode->crtc_hdisplay * pixel_period;
++		wm.blank_time = line_time - wm.active_time;
++		wm.interlaced = false;
++		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++			wm.interlaced = true;
++		wm.vsc = radeon_crtc->vsc;
++		wm.vtaps = 1;
++		if (radeon_crtc->rmx_type != RMX_OFF)
++			wm.vtaps = 2;
++		wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
++		wm.lb_size = lb_size;
++		if (rdev->family == CHIP_ARUBA)
++			wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
++		else
++			wm.dram_channels = si_get_number_of_dram_channels(rdev);
++		wm.num_heads = num_heads;
++
++		/* set for high clocks */
++		latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535);
++		/* set for low clocks */
++		/* wm.yclk = low clk; wm.sclk = low clk */
++		latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
++
++		/* possibly force display priority to high */
++		/* should really do this at mode validation time... */
++		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
++		    !dce6_average_bandwidth_vs_available_bandwidth(&wm) ||
++		    !dce6_check_latency_hiding(&wm) ||
++		    (rdev->disp_priority == 2)) {
++			DRM_DEBUG_KMS("force priority to high\n");
++			priority_a_cnt |= PRIORITY_ALWAYS_ON;
++			priority_b_cnt |= PRIORITY_ALWAYS_ON;
++		}
++
++		a.full = dfixed_const(1000);
++		b.full = dfixed_const(mode->clock);
++		b.full = dfixed_div(b, a);
++		c.full = dfixed_const(latency_watermark_a);
++		c.full = dfixed_mul(c, b);
++		c.full = dfixed_mul(c, radeon_crtc->hsc);
++		c.full = dfixed_div(c, a);
++		a.full = dfixed_const(16);
++		c.full = dfixed_div(c, a);
++		priority_a_mark = dfixed_trunc(c);
++		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
++
++		a.full = dfixed_const(1000);
++		b.full = dfixed_const(mode->clock);
++		b.full = dfixed_div(b, a);
++		c.full = dfixed_const(latency_watermark_b);
++		c.full = dfixed_mul(c, b);
++		c.full = dfixed_mul(c, radeon_crtc->hsc);
++		c.full = dfixed_div(c, a);
++		a.full = dfixed_const(16);
++		c.full = dfixed_div(c, a);
++		priority_b_mark = dfixed_trunc(c);
++		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
++	}
++
++	/* select wm A */
++	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
++	tmp = arb_control3;
++	tmp &= ~LATENCY_WATERMARK_MASK(3);
++	tmp |= LATENCY_WATERMARK_MASK(1);
++	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
++	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
++	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
++		LATENCY_HIGH_WATERMARK(line_time)));
++	/* select wm B */
++	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
++	tmp &= ~LATENCY_WATERMARK_MASK(3);
++	tmp |= LATENCY_WATERMARK_MASK(2);
++	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
++	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
++	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
++		LATENCY_HIGH_WATERMARK(line_time)));
++	/* restore original selection */
++	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
++
++	/* write the priority marks */
++	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
++	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
++
++}
++
++void dce6_bandwidth_update(struct radeon_device *rdev)
++{
++	struct drm_display_mode *mode0 = NULL;
++	struct drm_display_mode *mode1 = NULL;
++	u32 num_heads = 0, lb_size;
++	int i;
++
++	radeon_update_display_priority(rdev);
++
++	for (i = 0; i < rdev->num_crtc; i++) {
++		if (rdev->mode_info.crtcs[i]->base.enabled)
++			num_heads++;
++	}
++	for (i = 0; i < rdev->num_crtc; i += 2) {
++		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
++		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
++		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
++		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
++		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
++		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
++	}
++}
++
++/*
++ * Core functions
++ */
++static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
++					   u32 num_tile_pipes,
++					   u32 num_backends_per_asic,
++					   u32 *backend_disable_mask_per_asic,
++					   u32 num_shader_engines)
++{
++	u32 backend_map = 0;
++	u32 enabled_backends_mask = 0;
++	u32 enabled_backends_count = 0;
++	u32 num_backends_per_se;
++	u32 cur_pipe;
++	u32 swizzle_pipe[SI_MAX_PIPES];
++	u32 cur_backend = 0;
++	u32 i;
++	bool force_no_swizzle;
++
++	/* force legal values */
++	if (num_tile_pipes < 1)
++		num_tile_pipes = 1;
++	if (num_tile_pipes > rdev->config.si.max_tile_pipes)
++		num_tile_pipes = rdev->config.si.max_tile_pipes;
++	if (num_shader_engines < 1)
++		num_shader_engines = 1;
++	if (num_shader_engines > rdev->config.si.max_shader_engines)
++		num_shader_engines = rdev->config.si.max_shader_engines;
++	if (num_backends_per_asic < num_shader_engines)
++		num_backends_per_asic = num_shader_engines;
++	if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
++		num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
++
++	/* make sure we have the same number of backends per se */
++	num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
++	/* set up the number of backends per se */
++	num_backends_per_se = num_backends_per_asic / num_shader_engines;
++	if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
++		num_backends_per_se = rdev->config.si.max_backends_per_se;
++		num_backends_per_asic = num_backends_per_se * num_shader_engines;
++	}
++
++	/* create enable mask and count for enabled backends */
++	for (i = 0; i < SI_MAX_BACKENDS; ++i) {
++		if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
++			enabled_backends_mask |= (1 << i);
++			++enabled_backends_count;
++		}
++		if (enabled_backends_count == num_backends_per_asic)
++			break;
++	}
++
++	/* force the backends mask to match the current number of backends */
++	if (enabled_backends_count != num_backends_per_asic) {
++		u32 this_backend_enabled;
++		u32 shader_engine;
++		u32 backend_per_se;
++
++		enabled_backends_mask = 0;
++		enabled_backends_count = 0;
++		*backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
++		for (i = 0; i < SI_MAX_BACKENDS; ++i) {
++			/* calc the current se */
++			shader_engine = i / rdev->config.si.max_backends_per_se;
++			/* calc the backend per se */
++			backend_per_se = i % rdev->config.si.max_backends_per_se;
++			/* default to not enabled */
++			this_backend_enabled = 0;
++			if ((shader_engine < num_shader_engines) &&
++			    (backend_per_se < num_backends_per_se))
++				this_backend_enabled = 1;
++			if (this_backend_enabled) {
++				enabled_backends_mask |= (1 << i);
++				*backend_disable_mask_per_asic &= ~(1 << i);
++				++enabled_backends_count;
++			}
++		}
++	}
++
++
++	memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
++	switch (rdev->family) {
++	case CHIP_TAHITI:
++	case CHIP_PITCAIRN:
++	case CHIP_VERDE:
++		force_no_swizzle = true;
++		break;
++	default:
++		force_no_swizzle = false;
++		break;
++	}
++	if (force_no_swizzle) {
++		bool last_backend_enabled = false;
++
++		force_no_swizzle = false;
++		for (i = 0; i < SI_MAX_BACKENDS; ++i) {
++			if (((enabled_backends_mask >> i) & 1) == 1) {
++				if (last_backend_enabled)
++					force_no_swizzle = true;
++				last_backend_enabled = true;
++			} else
++				last_backend_enabled = false;
++		}
++	}
++
++	switch (num_tile_pipes) {
++	case 1:
++	case 3:
++	case 5:
++	case 7:
++		DRM_ERROR("odd number of pipes!\n");
++		break;
++	case 2:
++		swizzle_pipe[0] = 0;
++		swizzle_pipe[1] = 1;
++		break;
++	case 4:
++		if (force_no_swizzle) {
++			swizzle_pipe[0] = 0;
++			swizzle_pipe[1] = 1;
++			swizzle_pipe[2] = 2;
++			swizzle_pipe[3] = 3;
++		} else {
++			swizzle_pipe[0] = 0;
++			swizzle_pipe[1] = 2;
++			swizzle_pipe[2] = 1;
++			swizzle_pipe[3] = 3;
++		}
++		break;
++	case 6:
++		if (force_no_swizzle) {
++			swizzle_pipe[0] = 0;
++			swizzle_pipe[1] = 1;
++			swizzle_pipe[2] = 2;
++			swizzle_pipe[3] = 3;
++			swizzle_pipe[4] = 4;
++			swizzle_pipe[5] = 5;
++		} else {
++			swizzle_pipe[0] = 0;
++			swizzle_pipe[1] = 2;
++			swizzle_pipe[2] = 4;
++			swizzle_pipe[3] = 1;
++			swizzle_pipe[4] = 3;
++			swizzle_pipe[5] = 5;
++		}
++		break;
++	case 8:
++		if (force_no_swizzle) {
++			swizzle_pipe[0] = 0;
++			swizzle_pipe[1] = 1;
++			swizzle_pipe[2] = 2;
++			swizzle_pipe[3] = 3;
++			swizzle_pipe[4] = 4;
++			swizzle_pipe[5] = 5;
++			swizzle_pipe[6] = 6;
++			swizzle_pipe[7] = 7;
++		} else {
++			swizzle_pipe[0] = 0;
++			swizzle_pipe[1] = 2;
++			swizzle_pipe[2] = 4;
++			swizzle_pipe[3] = 6;
++			swizzle_pipe[4] = 1;
++			swizzle_pipe[5] = 3;
++			swizzle_pipe[6] = 5;
++			swizzle_pipe[7] = 7;
++		}
++		break;
++	}
++
++	for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
++		while (((1 << cur_backend) & enabled_backends_mask) == 0)
++			cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
++
++		backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
++
++		cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
++	}
++
++	return backend_map;
++}
++
++static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
++					u32 disable_mask_per_se,
++					u32 max_disable_mask_per_se,
++					u32 num_shader_engines)
++{
++	u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
++	u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
++
++	if (num_shader_engines == 1)
++		return disable_mask_per_asic;
++	else if (num_shader_engines == 2)
++		return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
++	else
++		return 0xffffffff;
++}
++
++static void si_tiling_mode_table_init(struct radeon_device *rdev)
++{
++	const u32 num_tile_mode_states = 32;
++	u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
++
++	switch (rdev->config.si.mem_row_size_in_kb) {
++	case 1:
++		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
++		break;
++	case 2:
++	default:
++		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
++		break;
++	case 4:
++		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
++		break;
++	}
++
++	if ((rdev->family == CHIP_TAHITI) ||
++	    (rdev->family == CHIP_PITCAIRN)) {
++		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
++			switch (reg_offset) {
++			case 0:  /* non-AA compressed depth or any compressed stencil */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 1:  /* 2xAA/4xAA compressed depth only */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 2:  /* 8xAA compressed depth only */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(split_equal_to_row_size) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(split_equal_to_row_size) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
++				break;
++			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(split_equal_to_row_size) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 8:  /* 1D and 1D Array Surfaces */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
++						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 9:  /* Displayable maps. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 10:  /* Display 8bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 11:  /* Display 16bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 12:  /* Display 32bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
++				break;
++			case 13:  /* Thin. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 14:  /* Thin 8 bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
++				break;
++			case 15:  /* Thin 16 bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
++				break;
++			case 16:  /* Thin 32 bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
++				break;
++			case 17:  /* Thin 64 bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(split_equal_to_row_size) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
++				break;
++			case 21:  /* 8 bpp PRT. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 22:  /* 16 bpp PRT */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
++				break;
++			case 23:  /* 32 bpp PRT */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 24:  /* 64 bpp PRT */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 25:  /* 128 bpp PRT */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
++						 NUM_BANKS(ADDR_SURF_8_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
++				break;
++			default:
++				gb_tile_moden = 0;
++				break;
++			}
++			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
++		}
++	} else if (rdev->family == CHIP_VERDE) {
++		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
++			switch (reg_offset) {
++			case 0:  /* non-AA compressed depth or any compressed stencil */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
++				break;
++			case 1:  /* 2xAA/4xAA compressed depth only */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
++				break;
++			case 2:  /* 8xAA compressed depth only */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
++				break;
++			case 3:  /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
++				break;
++			case 4:  /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 5:  /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(split_equal_to_row_size) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 6:  /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(split_equal_to_row_size) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 7:  /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(split_equal_to_row_size) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
++				break;
++			case 8:  /* 1D and 1D Array Surfaces */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
++						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 9:  /* Displayable maps. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 10:  /* Display 8bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
++				break;
++			case 11:  /* Display 16bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 12:  /* Display 32bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 13:  /* Thin. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 14:  /* Thin 8 bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 15:  /* Thin 16 bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 16:  /* Thin 32 bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 17:  /* Thin 64 bpp. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
++						 TILE_SPLIT(split_equal_to_row_size) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 21:  /* 8 bpp PRT. */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 22:  /* 16 bpp PRT */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
++				break;
++			case 23:  /* 32 bpp PRT */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 24:  /* 64 bpp PRT */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
++						 NUM_BANKS(ADDR_SURF_16_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
++				break;
++			case 25:  /* 128 bpp PRT */
++				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
++						 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
++						 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
++						 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
++						 NUM_BANKS(ADDR_SURF_8_BANK) |
++						 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
++						 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
++						 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
++				break;
++			default:
++				gb_tile_moden = 0;
++				break;
++			}
++			WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
++		}
++	} else
++		DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
++}
++
++static void si_gpu_init(struct radeon_device *rdev)
++{
++	u32 cc_rb_backend_disable = 0;
++	u32 cc_gc_shader_array_config;
++	u32 gb_addr_config = 0;
++	u32 mc_shared_chmap, mc_arb_ramcfg;
++	u32 gb_backend_map;
++	u32 cgts_tcc_disable;
++	u32 sx_debug_1;
++	u32 gc_user_shader_array_config;
++	u32 gc_user_rb_backend_disable;
++	u32 cgts_user_tcc_disable;
++	u32 hdp_host_path_cntl;
++	u32 tmp;
++	int i, j;
++
++	switch (rdev->family) {
++	case CHIP_TAHITI:
++		rdev->config.si.max_shader_engines = 2;
++		rdev->config.si.max_pipes_per_simd = 4;
++		rdev->config.si.max_tile_pipes = 12;
++		rdev->config.si.max_simds_per_se = 8;
++		rdev->config.si.max_backends_per_se = 4;
++		rdev->config.si.max_texture_channel_caches = 12;
++		rdev->config.si.max_gprs = 256;
++		rdev->config.si.max_gs_threads = 32;
++		rdev->config.si.max_hw_contexts = 8;
++
++		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
++		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
++		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
++		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
++		break;
++	case CHIP_PITCAIRN:
++		rdev->config.si.max_shader_engines = 2;
++		rdev->config.si.max_pipes_per_simd = 4;
++		rdev->config.si.max_tile_pipes = 8;
++		rdev->config.si.max_simds_per_se = 5;
++		rdev->config.si.max_backends_per_se = 4;
++		rdev->config.si.max_texture_channel_caches = 8;
++		rdev->config.si.max_gprs = 256;
++		rdev->config.si.max_gs_threads = 32;
++		rdev->config.si.max_hw_contexts = 8;
++
++		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
++		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
++		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
++		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
++		break;
++	case CHIP_VERDE:
++	default:
++		rdev->config.si.max_shader_engines = 1;
++		rdev->config.si.max_pipes_per_simd = 4;
++		rdev->config.si.max_tile_pipes = 4;
++		rdev->config.si.max_simds_per_se = 2;
++		rdev->config.si.max_backends_per_se = 4;
++		rdev->config.si.max_texture_channel_caches = 4;
++		rdev->config.si.max_gprs = 256;
++		rdev->config.si.max_gs_threads = 32;
++		rdev->config.si.max_hw_contexts = 8;
++
++		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
++		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
++		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
++		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
++		break;
++	}
++
++	/* Initialize HDP */
++	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
++		WREG32((0x2c14 + j), 0x00000000);
++		WREG32((0x2c18 + j), 0x00000000);
++		WREG32((0x2c1c + j), 0x00000000);
++		WREG32((0x2c20 + j), 0x00000000);
++		WREG32((0x2c24 + j), 0x00000000);
++	}
++
++	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
++
++	evergreen_fix_pci_max_read_req_size(rdev);
++
++	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
++
++	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
++	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
++
++	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
++	cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
++	cgts_tcc_disable = 0xffff0000;
++	for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
++		cgts_tcc_disable &= ~(1 << (16 + i));
++	gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
++	gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
++	cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
++
++	rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
++	rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
++	tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
++	rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
++	tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
++	rdev->config.si.backend_disable_mask_per_asic =
++		si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
++					     rdev->config.si.num_shader_engines);
++	rdev->config.si.backend_map =
++		si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
++						rdev->config.si.num_backends_per_se *
++						rdev->config.si.num_shader_engines,
++						&rdev->config.si.backend_disable_mask_per_asic,
++						rdev->config.si.num_shader_engines);
++	tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
++	rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
++	rdev->config.si.mem_max_burst_length_bytes = 256;
++	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
++	rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
++	if (rdev->config.si.mem_row_size_in_kb > 4)
++		rdev->config.si.mem_row_size_in_kb = 4;
++	/* XXX use MC settings? */
++	rdev->config.si.shader_engine_tile_size = 32;
++	rdev->config.si.num_gpus = 1;
++	rdev->config.si.multi_gpu_tile_size = 64;
++
++	gb_addr_config = 0;
++	switch (rdev->config.si.num_tile_pipes) {
++	case 1:
++		gb_addr_config |= NUM_PIPES(0);
++		break;
++	case 2:
++		gb_addr_config |= NUM_PIPES(1);
++		break;
++	case 4:
++		gb_addr_config |= NUM_PIPES(2);
++		break;
++	case 8:
++	default:
++		gb_addr_config |= NUM_PIPES(3);
++		break;
++	}
++
++	tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
++	gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
++	gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
++	tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
++	gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
++	switch (rdev->config.si.num_gpus) {
++	case 1:
++	default:
++		gb_addr_config |= NUM_GPUS(0);
++		break;
++	case 2:
++		gb_addr_config |= NUM_GPUS(1);
++		break;
++	case 4:
++		gb_addr_config |= NUM_GPUS(2);
++		break;
++	}
++	switch (rdev->config.si.multi_gpu_tile_size) {
++	case 16:
++		gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
++		break;
++	case 32:
++	default:
++		gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
++		break;
++	case 64:
++		gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
++		break;
++	case 128:
++		gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
++		break;
++	}
++	switch (rdev->config.si.mem_row_size_in_kb) {
++	case 1:
++	default:
++		gb_addr_config |= ROW_SIZE(0);
++		break;
++	case 2:
++		gb_addr_config |= ROW_SIZE(1);
++		break;
++	case 4:
++		gb_addr_config |= ROW_SIZE(2);
++		break;
++	}
++
++	tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
++	rdev->config.si.num_tile_pipes = (1 << tmp);
++	tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
++	rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
++	tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
++	rdev->config.si.num_shader_engines = tmp + 1;
++	tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
++	rdev->config.si.num_gpus = tmp + 1;
++	tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
++	rdev->config.si.multi_gpu_tile_size = 1 << tmp;
++	tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
++	rdev->config.si.mem_row_size_in_kb = 1 << tmp;
++
++	gb_backend_map =
++		si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
++						rdev->config.si.num_backends_per_se *
++						rdev->config.si.num_shader_engines,
++						&rdev->config.si.backend_disable_mask_per_asic,
++						rdev->config.si.num_shader_engines);
++
++	/* setup tiling info dword.  gb_addr_config is not adequate since it does
++	 * not have bank info, so create a custom tiling dword.
++	 * bits 3:0   num_pipes
++	 * bits 7:4   num_banks
++	 * bits 11:8  group_size
++	 * bits 15:12 row_size
++	 */
++	rdev->config.si.tile_config = 0;
++	switch (rdev->config.si.num_tile_pipes) {
++	case 1:
++		rdev->config.si.tile_config |= (0 << 0);
++		break;
++	case 2:
++		rdev->config.si.tile_config |= (1 << 0);
++		break;
++	case 4:
++		rdev->config.si.tile_config |= (2 << 0);
++		break;
++	case 8:
++	default:
++		/* XXX what about 12? */
++		rdev->config.si.tile_config |= (3 << 0);
++		break;
++	}
++	rdev->config.si.tile_config |=
++		((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
++	rdev->config.si.tile_config |=
++		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
++	rdev->config.si.tile_config |=
++		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
++
++	rdev->config.si.backend_map = gb_backend_map;
++	WREG32(GB_ADDR_CONFIG, gb_addr_config);
++	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
++	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
++
++	/* primary versions */
++	WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
++	WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
++	WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
++
++	WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
++
++	/* user versions */
++	WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
++	WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
++	WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
++
++	WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
++
++	si_tiling_mode_table_init(rdev);
++
++	/* set HW defaults for 3D engine */
++	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
++				     ROQ_IB2_START(0x2b)));
++	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
++
++	sx_debug_1 = RREG32(SX_DEBUG_1);
++	WREG32(SX_DEBUG_1, sx_debug_1);
++
++	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
++
++	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
++				 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
++				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
++				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
++
++	WREG32(VGT_NUM_INSTANCES, 1);
++
++	WREG32(CP_PERFMON_CNTL, 0);
++
++	WREG32(SQ_CONFIG, 0);
++
++	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
++					  FORCE_EOV_MAX_REZ_CNT(255)));
++
++	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
++	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
++
++	WREG32(VGT_GS_VERTEX_REUSE, 16);
++	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
++
++	WREG32(CB_PERFCOUNTER0_SELECT0, 0);
++	WREG32(CB_PERFCOUNTER0_SELECT1, 0);
++	WREG32(CB_PERFCOUNTER1_SELECT0, 0);
++	WREG32(CB_PERFCOUNTER1_SELECT1, 0);
++	WREG32(CB_PERFCOUNTER2_SELECT0, 0);
++	WREG32(CB_PERFCOUNTER2_SELECT1, 0);
++	WREG32(CB_PERFCOUNTER3_SELECT0, 0);
++	WREG32(CB_PERFCOUNTER3_SELECT1, 0);
++
++	tmp = RREG32(HDP_MISC_CNTL);
++	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
++	WREG32(HDP_MISC_CNTL, tmp);
++
++	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
++	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
++
++	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
++
++	udelay(50);
++}
++
++/*
++ * GPU scratch registers helpers function.
++ */
++static void si_scratch_init(struct radeon_device *rdev)
++{
++	int i;
++
++	rdev->scratch.num_reg = 7;
++	rdev->scratch.reg_base = SCRATCH_REG0;
++	for (i = 0; i < rdev->scratch.num_reg; i++) {
++		rdev->scratch.free[i] = true;
++		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
++	}
++}
++
++void si_fence_ring_emit(struct radeon_device *rdev,
++			struct radeon_fence *fence)
++{
++	struct radeon_ring *ring = &rdev->ring[fence->ring];
++	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
++
++	/* flush read cache over gart */
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
++	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
++			  PACKET3_TC_ACTION_ENA |
++			  PACKET3_SH_KCACHE_ACTION_ENA |
++			  PACKET3_SH_ICACHE_ACTION_ENA);
++	radeon_ring_write(ring, 0xFFFFFFFF);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 10); /* poll interval */
++	/* EVENT_WRITE_EOP - flush caches, send int */
++	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
++	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
++	radeon_ring_write(ring, addr & 0xffffffff);
++	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
++	radeon_ring_write(ring, fence->seq);
++	radeon_ring_write(ring, 0);
++}
++
++/*
++ * IB stuff
++ */
++void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
++{
++	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
++	u32 header;
++
++	if (ib->is_const_ib)
++		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
++	else
++		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
++
++	radeon_ring_write(ring, header);
++	radeon_ring_write(ring,
++#ifdef __BIG_ENDIAN
++			  (2 << 0) |
++#endif
++			  (ib->gpu_addr & 0xFFFFFFFC));
++	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
++	radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
++
++	/* flush read cache over gart for this vmid */
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
++	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
++	radeon_ring_write(ring, ib->vm_id);
++	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
++	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
++			  PACKET3_TC_ACTION_ENA |
++			  PACKET3_SH_KCACHE_ACTION_ENA |
++			  PACKET3_SH_ICACHE_ACTION_ENA);
++	radeon_ring_write(ring, 0xFFFFFFFF);
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 10); /* poll interval */
++}
++
++/*
++ * CP.
++ */
++static void si_cp_enable(struct radeon_device *rdev, bool enable)
++{
++	if (enable)
++		WREG32(CP_ME_CNTL, 0);
++	else {
++		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
++		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
++		WREG32(SCRATCH_UMSK, 0);
++	}
++	udelay(50);
++}
++
++static int si_cp_load_microcode(struct radeon_device *rdev)
++{
++	const __be32 *fw_data;
++	int i;
++
++	if (!rdev->me_fw || !rdev->pfp_fw)
++		return -EINVAL;
++
++	si_cp_enable(rdev, false);
++
++	/* PFP */
++	fw_data = (const __be32 *)rdev->pfp_fw->data;
++	WREG32(CP_PFP_UCODE_ADDR, 0);
++	for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
++		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
++	WREG32(CP_PFP_UCODE_ADDR, 0);
++
++	/* CE */
++	fw_data = (const __be32 *)rdev->ce_fw->data;
++	WREG32(CP_CE_UCODE_ADDR, 0);
++	for (i = 0; i < SI_CE_UCODE_SIZE; i++)
++		WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
++	WREG32(CP_CE_UCODE_ADDR, 0);
++
++	/* ME */
++	fw_data = (const __be32 *)rdev->me_fw->data;
++	WREG32(CP_ME_RAM_WADDR, 0);
++	for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
++		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
++	WREG32(CP_ME_RAM_WADDR, 0);
++
++	WREG32(CP_PFP_UCODE_ADDR, 0);
++	WREG32(CP_CE_UCODE_ADDR, 0);
++	WREG32(CP_ME_RAM_WADDR, 0);
++	WREG32(CP_ME_RAM_RADDR, 0);
++	return 0;
++}
++
++static int si_cp_start(struct radeon_device *rdev)
++{
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++	int r, i;
++
++	r = radeon_ring_lock(rdev, ring, 7 + 4);
++	if (r) {
++		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
++		return r;
++	}
++	/* init the CP */
++	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
++	radeon_ring_write(ring, 0x1);
++	radeon_ring_write(ring, 0x0);
++	radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
++	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
++	radeon_ring_write(ring, 0);
++	radeon_ring_write(ring, 0);
++
++	/* init the CE partitions */
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
++	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
++	radeon_ring_write(ring, 0xc000);
++	radeon_ring_write(ring, 0xe000);
++	radeon_ring_unlock_commit(rdev, ring);
++
++	si_cp_enable(rdev, true);
++
++	r = radeon_ring_lock(rdev, ring, si_default_size + 10);
++	if (r) {
++		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
++		return r;
++	}
++
++	/* setup clear context state */
++	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
++	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
++
++	for (i = 0; i < si_default_size; i++)
++		radeon_ring_write(ring, si_default_state[i]);
++
++	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
++	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
++
++	/* set clear context state */
++	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
++	radeon_ring_write(ring, 0);
++
++	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
++	radeon_ring_write(ring, 0x00000316);
++	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
++	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
++
++	radeon_ring_unlock_commit(rdev, ring);
++
++	for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
++		ring = &rdev->ring[i];
++		r = radeon_ring_lock(rdev, ring, 2);
++
++		/* clear the compute context state */
++		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
++		radeon_ring_write(ring, 0);
++
++		radeon_ring_unlock_commit(rdev, ring);
++	}
++
++	return 0;
++}
++
++static void si_cp_fini(struct radeon_device *rdev)
++{
++	si_cp_enable(rdev, false);
++	radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
++	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
++	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
++}
++
++static int si_cp_resume(struct radeon_device *rdev)
++{
++	struct radeon_ring *ring;
++	u32 tmp;
++	u32 rb_bufsz;
++	int r;
++
++	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
++	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
++				 SOFT_RESET_PA |
++				 SOFT_RESET_VGT |
++				 SOFT_RESET_SPI |
++				 SOFT_RESET_SX));
++	RREG32(GRBM_SOFT_RESET);
++	mdelay(15);
++	WREG32(GRBM_SOFT_RESET, 0);
++	RREG32(GRBM_SOFT_RESET);
++
++	WREG32(CP_SEM_WAIT_TIMER, 0x0);
++	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
++
++	/* Set the write pointer delay */
++	WREG32(CP_RB_WPTR_DELAY, 0);
++
++	WREG32(CP_DEBUG, 0);
++	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
++
++	/* ring 0 - compute and gfx */
++	/* Set ring buffer size */
++	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++	rb_bufsz = drm_order(ring->ring_size / 8);
++	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
++#ifdef __BIG_ENDIAN
++	tmp |= BUF_SWAP_32BIT;
++#endif
++	WREG32(CP_RB0_CNTL, tmp);
++
++	/* Initialize the ring buffer's read and write pointers */
++	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
++	ring->wptr = 0;
++	WREG32(CP_RB0_WPTR, ring->wptr);
++
++	/* set the wb address wether it's enabled or not */
++	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
++	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
++
++	if (rdev->wb.enabled)
++		WREG32(SCRATCH_UMSK, 0xff);
++	else {
++		tmp |= RB_NO_UPDATE;
++		WREG32(SCRATCH_UMSK, 0);
++	}
++
++	mdelay(1);
++	WREG32(CP_RB0_CNTL, tmp);
++
++	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
++
++	ring->rptr = RREG32(CP_RB0_RPTR);
++
++	/* ring1  - compute only */
++	/* Set ring buffer size */
++	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
++	rb_bufsz = drm_order(ring->ring_size / 8);
++	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
++#ifdef __BIG_ENDIAN
++	tmp |= BUF_SWAP_32BIT;
++#endif
++	WREG32(CP_RB1_CNTL, tmp);
++
++	/* Initialize the ring buffer's read and write pointers */
++	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
++	ring->wptr = 0;
++	WREG32(CP_RB1_WPTR, ring->wptr);
++
++	/* set the wb address wether it's enabled or not */
++	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
++	WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
++
++	mdelay(1);
++	WREG32(CP_RB1_CNTL, tmp);
++
++	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
++
++	ring->rptr = RREG32(CP_RB1_RPTR);
++
++	/* ring2 - compute only */
++	/* Set ring buffer size */
++	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
++	rb_bufsz = drm_order(ring->ring_size / 8);
++	tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
++#ifdef __BIG_ENDIAN
++	tmp |= BUF_SWAP_32BIT;
++#endif
++	WREG32(CP_RB2_CNTL, tmp);
++
++	/* Initialize the ring buffer's read and write pointers */
++	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
++	ring->wptr = 0;
++	WREG32(CP_RB2_WPTR, ring->wptr);
++
++	/* set the wb address wether it's enabled or not */
++	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
++	WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
++
++	mdelay(1);
++	WREG32(CP_RB2_CNTL, tmp);
++
++	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
++
++	ring->rptr = RREG32(CP_RB2_RPTR);
++
++	/* start the rings */
++	si_cp_start(rdev);
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
++	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
++	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
++	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
++	if (r) {
++		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
++		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
++		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
++		return r;
++	}
++	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
++	if (r) {
++		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
++	}
++	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
++	if (r) {
++		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
++	}
++
++	return 0;
++}
++
++bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
++{
++	u32 srbm_status;
++	u32 grbm_status, grbm_status2;
++	u32 grbm_status_se0, grbm_status_se1;
++	struct r100_gpu_lockup *lockup = &rdev->config.si.lockup;
++	int r;
++
++	srbm_status = RREG32(SRBM_STATUS);
++	grbm_status = RREG32(GRBM_STATUS);
++	grbm_status2 = RREG32(GRBM_STATUS2);
++	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
++	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
++	if (!(grbm_status & GUI_ACTIVE)) {
++		r100_gpu_lockup_update(lockup, ring);
++		return false;
++	}
++	/* force CP activities */
++	r = radeon_ring_lock(rdev, ring, 2);
++	if (!r) {
++		/* PACKET2 NOP */
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_write(ring, 0x80000000);
++		radeon_ring_unlock_commit(rdev, ring);
++	}
++	/* XXX deal with CP0,1,2 */
++	ring->rptr = RREG32(ring->rptr_reg);
++	return r100_gpu_cp_is_lockup(rdev, lockup, ring);
++}
++
++static int si_gpu_soft_reset(struct radeon_device *rdev)
++{
++	struct evergreen_mc_save save;
++	u32 grbm_reset = 0;
++
++	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
++		return 0;
++
++	dev_info(rdev->dev, "GPU softreset \n");
++	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
++		RREG32(GRBM_STATUS));
++	dev_info(rdev->dev, "  GRBM_STATUS2=0x%08X\n",
++		RREG32(GRBM_STATUS2));
++	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
++		RREG32(GRBM_STATUS_SE0));
++	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
++		RREG32(GRBM_STATUS_SE1));
++	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
++		RREG32(SRBM_STATUS));
++	evergreen_mc_stop(rdev, &save);
++	if (radeon_mc_wait_for_idle(rdev)) {
++		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
++	}
++	/* Disable CP parsing/prefetching */
++	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
++
++	/* reset all the gfx blocks */
++	grbm_reset = (SOFT_RESET_CP |
++		      SOFT_RESET_CB |
++		      SOFT_RESET_DB |
++		      SOFT_RESET_GDS |
++		      SOFT_RESET_PA |
++		      SOFT_RESET_SC |
++		      SOFT_RESET_SPI |
++		      SOFT_RESET_SX |
++		      SOFT_RESET_TC |
++		      SOFT_RESET_TA |
++		      SOFT_RESET_VGT |
++		      SOFT_RESET_IA);
++
++	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
++	WREG32(GRBM_SOFT_RESET, grbm_reset);
++	(void)RREG32(GRBM_SOFT_RESET);
++	udelay(50);
++	WREG32(GRBM_SOFT_RESET, 0);
++	(void)RREG32(GRBM_SOFT_RESET);
++	/* Wait a little for things to settle down */
++	udelay(50);
++	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
++		RREG32(GRBM_STATUS));
++	dev_info(rdev->dev, "  GRBM_STATUS2=0x%08X\n",
++		RREG32(GRBM_STATUS2));
++	dev_info(rdev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
++		RREG32(GRBM_STATUS_SE0));
++	dev_info(rdev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
++		RREG32(GRBM_STATUS_SE1));
++	dev_info(rdev->dev, "  SRBM_STATUS=0x%08X\n",
++		RREG32(SRBM_STATUS));
++	evergreen_mc_resume(rdev, &save);
++	return 0;
++}
++
++int si_asic_reset(struct radeon_device *rdev)
++{
++	return si_gpu_soft_reset(rdev);
++}
++
++/* MC */
++static void si_mc_program(struct radeon_device *rdev)
++{
++	struct evergreen_mc_save save;
++	u32 tmp;
++	int i, j;
++
++	/* Initialize HDP */
++	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
++		WREG32((0x2c14 + j), 0x00000000);
++		WREG32((0x2c18 + j), 0x00000000);
++		WREG32((0x2c1c + j), 0x00000000);
++		WREG32((0x2c20 + j), 0x00000000);
++		WREG32((0x2c24 + j), 0x00000000);
++	}
++	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
++
++	evergreen_mc_stop(rdev, &save);
++	if (radeon_mc_wait_for_idle(rdev)) {
++		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
++	}
++	/* Lockout access through VGA aperture*/
++	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
++	/* Update configuration */
++	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
++	       rdev->mc.vram_start >> 12);
++	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
++	       rdev->mc.vram_end >> 12);
++	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
++	       rdev->vram_scratch.gpu_addr >> 12);
++	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
++	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
++	WREG32(MC_VM_FB_LOCATION, tmp);
++	/* XXX double check these! */
++	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
++	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
++	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
++	WREG32(MC_VM_AGP_BASE, 0);
++	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
++	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
++	if (radeon_mc_wait_for_idle(rdev)) {
++		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
++	}
++	evergreen_mc_resume(rdev, &save);
++	/* we need to own VRAM, so turn off the VGA renderer here
++	 * to stop it overwriting our objects */
++	rv515_vga_render_disable(rdev);
++}
++
++/* SI MC address space is 40 bits */
++static void si_vram_location(struct radeon_device *rdev,
++			     struct radeon_mc *mc, u64 base)
++{
++	mc->vram_start = base;
++	if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) {
++		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
++		mc->real_vram_size = mc->aper_size;
++		mc->mc_vram_size = mc->aper_size;
++	}
++	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
++	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
++			mc->mc_vram_size >> 20, mc->vram_start,
++			mc->vram_end, mc->real_vram_size >> 20);
++}
++
++static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
++{
++	u64 size_af, size_bf;
++
++	size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
++	size_bf = mc->vram_start & ~mc->gtt_base_align;
++	if (size_bf > size_af) {
++		if (mc->gtt_size > size_bf) {
++			dev_warn(rdev->dev, "limiting GTT\n");
++			mc->gtt_size = size_bf;
++		}
++		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
++	} else {
++		if (mc->gtt_size > size_af) {
++			dev_warn(rdev->dev, "limiting GTT\n");
++			mc->gtt_size = size_af;
++		}
++		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
++	}
++	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
++	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
++			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
++}
++
++static void si_vram_gtt_location(struct radeon_device *rdev,
++				 struct radeon_mc *mc)
++{
++	if (mc->mc_vram_size > 0xFFC0000000ULL) {
++		/* leave room for at least 1024M GTT */
++		dev_warn(rdev->dev, "limiting VRAM\n");
++		mc->real_vram_size = 0xFFC0000000ULL;
++		mc->mc_vram_size = 0xFFC0000000ULL;
++	}
++	si_vram_location(rdev, &rdev->mc, 0);
++	rdev->mc.gtt_base_align = 0;
++	si_gtt_location(rdev, mc);
++}
++
++static int si_mc_init(struct radeon_device *rdev)
++{
++	u32 tmp;
++	int chansize, numchan;
++
++	/* Get VRAM informations */
++	rdev->mc.vram_is_ddr = true;
++	tmp = RREG32(MC_ARB_RAMCFG);
++	if (tmp & CHANSIZE_OVERRIDE) {
++		chansize = 16;
++	} else if (tmp & CHANSIZE_MASK) {
++		chansize = 64;
++	} else {
++		chansize = 32;
++	}
++	tmp = RREG32(MC_SHARED_CHMAP);
++	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
++	case 0:
++	default:
++		numchan = 1;
++		break;
++	case 1:
++		numchan = 2;
++		break;
++	case 2:
++		numchan = 4;
++		break;
++	case 3:
++		numchan = 8;
++		break;
++	case 4:
++		numchan = 3;
++		break;
++	case 5:
++		numchan = 6;
++		break;
++	case 6:
++		numchan = 10;
++		break;
++	case 7:
++		numchan = 12;
++		break;
++	case 8:
++		numchan = 16;
++		break;
++	}
++	rdev->mc.vram_width = numchan * chansize;
++	/* Could aper size report 0 ? */
++	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
++	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
++	/* size in MB on si */
++	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
++	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
++	rdev->mc.visible_vram_size = rdev->mc.aper_size;
++	si_vram_gtt_location(rdev, &rdev->mc);
++	radeon_update_bandwidth_info(rdev);
++
++	return 0;
++}
++
++/*
++ * GART
++ */
++void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
++{
++	/* flush hdp cache */
++	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
++
++	/* bits 0-15 are the VM contexts0-15 */
++	WREG32(VM_INVALIDATE_REQUEST, 1);
++}
++
++int si_pcie_gart_enable(struct radeon_device *rdev)
++{
++	int r, i;
++
++	if (rdev->gart.robj == NULL) {
++		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
++		return -EINVAL;
++	}
++	r = radeon_gart_table_vram_pin(rdev);
++	if (r)
++		return r;
++	radeon_gart_restore(rdev);
++	/* Setup TLB control */
++	WREG32(MC_VM_MX_L1_TLB_CNTL,
++	       (0xA << 7) |
++	       ENABLE_L1_TLB |
++	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
++	       ENABLE_ADVANCED_DRIVER_MODEL |
++	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
++	/* Setup L2 cache */
++	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
++	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
++	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
++	       EFFECTIVE_L2_QUEUE_SIZE(7) |
++	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
++	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
++	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
++	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
++	/* setup context0 */
++	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
++	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
++	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
++	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
++			(u32)(rdev->dummy_page.addr >> 12));
++	WREG32(VM_CONTEXT0_CNTL2, 0);
++	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
++				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
++
++	WREG32(0x15D4, 0);
++	WREG32(0x15D8, 0);
++	WREG32(0x15DC, 0);
++
++	/* empty context1-15 */
++	/* FIXME start with 4G, once using 2 level pt switch to full
++	 * vm size space
++	 */
++	/* set vm size, must be a multiple of 4 */
++	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
++	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
++	for (i = 1; i < 16; i++) {
++		if (i < 8)
++			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
++			       rdev->gart.table_addr >> 12);
++		else
++			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
++			       rdev->gart.table_addr >> 12);
++	}
++
++	/* enable context1-15 */
++	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
++	       (u32)(rdev->dummy_page.addr >> 12));
++	WREG32(VM_CONTEXT1_CNTL2, 0);
++	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
++				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
++
++	si_pcie_gart_tlb_flush(rdev);
++	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
++		 (unsigned)(rdev->mc.gtt_size >> 20),
++		 (unsigned long long)rdev->gart.table_addr);
++	rdev->gart.ready = true;
++	return 0;
++}
++
++void si_pcie_gart_disable(struct radeon_device *rdev)
++{
++	/* Disable all tables */
++	WREG32(VM_CONTEXT0_CNTL, 0);
++	WREG32(VM_CONTEXT1_CNTL, 0);
++	/* Setup TLB control */
++	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
++	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
++	/* Setup L2 cache */
++	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
++	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
++	       EFFECTIVE_L2_QUEUE_SIZE(7) |
++	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
++	WREG32(VM_L2_CNTL2, 0);
++	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
++	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
++	radeon_gart_table_vram_unpin(rdev);
++}
++
++void si_pcie_gart_fini(struct radeon_device *rdev)
++{
++	si_pcie_gart_disable(rdev);
++	radeon_gart_table_vram_free(rdev);
++	radeon_gart_fini(rdev);
++}
++
++/* vm parser */
++static bool si_vm_reg_valid(u32 reg)
++{
++	/* context regs are fine */
++	if (reg >= 0x28000)
++		return true;
++
++	/* check config regs */
++	switch (reg) {
++	case GRBM_GFX_INDEX:
++	case CP_STRMOUT_CNTL:
++	case VGT_VTX_VECT_EJECT_REG:
++	case VGT_CACHE_INVALIDATION:
++	case VGT_ESGS_RING_SIZE:
++	case VGT_GSVS_RING_SIZE:
++	case VGT_GS_VERTEX_REUSE:
++	case VGT_PRIMITIVE_TYPE:
++	case VGT_INDEX_TYPE:
++	case VGT_NUM_INDICES:
++	case VGT_NUM_INSTANCES:
++	case VGT_TF_RING_SIZE:
++	case VGT_HS_OFFCHIP_PARAM:
++	case VGT_TF_MEMORY_BASE:
++	case PA_CL_ENHANCE:
++	case PA_SU_LINE_STIPPLE_VALUE:
++	case PA_SC_LINE_STIPPLE_STATE:
++	case PA_SC_ENHANCE:
++	case SQC_CACHES:
++	case SPI_STATIC_THREAD_MGMT_1:
++	case SPI_STATIC_THREAD_MGMT_2:
++	case SPI_STATIC_THREAD_MGMT_3:
++	case SPI_PS_MAX_WAVE_ID:
++	case SPI_CONFIG_CNTL:
++	case SPI_CONFIG_CNTL_1:
++	case TA_CNTL_AUX:
++		return true;
++	default:
++		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
++		return false;
++	}
++}
++
++static int si_vm_packet3_ce_check(struct radeon_device *rdev,
++				  u32 *ib, struct radeon_cs_packet *pkt)
++{
++	switch (pkt->opcode) {
++	case PACKET3_NOP:
++	case PACKET3_SET_BASE:
++	case PACKET3_SET_CE_DE_COUNTERS:
++	case PACKET3_LOAD_CONST_RAM:
++	case PACKET3_WRITE_CONST_RAM:
++	case PACKET3_WRITE_CONST_RAM_OFFSET:
++	case PACKET3_DUMP_CONST_RAM:
++	case PACKET3_INCREMENT_CE_COUNTER:
++	case PACKET3_WAIT_ON_DE_COUNTER:
++	case PACKET3_CE_WRITE:
++		break;
++	default:
++		DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
++				   u32 *ib, struct radeon_cs_packet *pkt)
++{
++	u32 idx = pkt->idx + 1;
++	u32 idx_value = ib[idx];
++	u32 start_reg, end_reg, reg, i;
++
++	switch (pkt->opcode) {
++	case PACKET3_NOP:
++	case PACKET3_SET_BASE:
++	case PACKET3_CLEAR_STATE:
++	case PACKET3_INDEX_BUFFER_SIZE:
++	case PACKET3_DISPATCH_DIRECT:
++	case PACKET3_DISPATCH_INDIRECT:
++	case PACKET3_ALLOC_GDS:
++	case PACKET3_WRITE_GDS_RAM:
++	case PACKET3_ATOMIC_GDS:
++	case PACKET3_ATOMIC:
++	case PACKET3_OCCLUSION_QUERY:
++	case PACKET3_SET_PREDICATION:
++	case PACKET3_COND_EXEC:
++	case PACKET3_PRED_EXEC:
++	case PACKET3_DRAW_INDIRECT:
++	case PACKET3_DRAW_INDEX_INDIRECT:
++	case PACKET3_INDEX_BASE:
++	case PACKET3_DRAW_INDEX_2:
++	case PACKET3_CONTEXT_CONTROL:
++	case PACKET3_INDEX_TYPE:
++	case PACKET3_DRAW_INDIRECT_MULTI:
++	case PACKET3_DRAW_INDEX_AUTO:
++	case PACKET3_DRAW_INDEX_IMMD:
++	case PACKET3_NUM_INSTANCES:
++	case PACKET3_DRAW_INDEX_MULTI_AUTO:
++	case PACKET3_STRMOUT_BUFFER_UPDATE:
++	case PACKET3_DRAW_INDEX_OFFSET_2:
++	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
++	case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
++	case PACKET3_MPEG_INDEX:
++	case PACKET3_WAIT_REG_MEM:
++	case PACKET3_MEM_WRITE:
++	case PACKET3_PFP_SYNC_ME:
++	case PACKET3_SURFACE_SYNC:
++	case PACKET3_EVENT_WRITE:
++	case PACKET3_EVENT_WRITE_EOP:
++	case PACKET3_EVENT_WRITE_EOS:
++	case PACKET3_SET_CONTEXT_REG:
++	case PACKET3_SET_CONTEXT_REG_INDIRECT:
++	case PACKET3_SET_SH_REG:
++	case PACKET3_SET_SH_REG_OFFSET:
++	case PACKET3_INCREMENT_DE_COUNTER:
++	case PACKET3_WAIT_ON_CE_COUNTER:
++	case PACKET3_WAIT_ON_AVAIL_BUFFER:
++	case PACKET3_ME_WRITE:
++		break;
++	case PACKET3_COPY_DATA:
++		if ((idx_value & 0xf00) == 0) {
++			reg = ib[idx + 3] * 4;
++			if (!si_vm_reg_valid(reg))
++				return -EINVAL;
++		}
++		break;
++	case PACKET3_WRITE_DATA:
++		if ((idx_value & 0xf00) == 0) {
++			start_reg = ib[idx + 1] * 4;
++			if (idx_value & 0x10000) {
++				if (!si_vm_reg_valid(start_reg))
++					return -EINVAL;
++			} else {
++				for (i = 0; i < (pkt->count - 2); i++) {
++					reg = start_reg + (4 * i);
++					if (!si_vm_reg_valid(reg))
++						return -EINVAL;
++				}
++			}
++		}
++		break;
++	case PACKET3_COND_WRITE:
++		if (idx_value & 0x100) {
++			reg = ib[idx + 5] * 4;
++			if (!si_vm_reg_valid(reg))
++				return -EINVAL;
++		}
++		break;
++	case PACKET3_COPY_DW:
++		if (idx_value & 0x2) {
++			reg = ib[idx + 3] * 4;
++			if (!si_vm_reg_valid(reg))
++				return -EINVAL;
++		}
++		break;
++	case PACKET3_SET_CONFIG_REG:
++		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
++		end_reg = 4 * pkt->count + start_reg - 4;
++		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
++		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
++		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
++			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
++			return -EINVAL;
++		}
++		for (i = 0; i < pkt->count; i++) {
++			reg = start_reg + (4 * i);
++			if (!si_vm_reg_valid(reg))
++				return -EINVAL;
++		}
++		break;
++	default:
++		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static int si_vm_packet3_compute_check(struct radeon_device *rdev,
++				       u32 *ib, struct radeon_cs_packet *pkt)
++{
++	u32 idx = pkt->idx + 1;
++	u32 idx_value = ib[idx];
++	u32 start_reg, reg, i;
++
++	switch (pkt->opcode) {
++	case PACKET3_NOP:
++	case PACKET3_SET_BASE:
++	case PACKET3_CLEAR_STATE:
++	case PACKET3_DISPATCH_DIRECT:
++	case PACKET3_DISPATCH_INDIRECT:
++	case PACKET3_ALLOC_GDS:
++	case PACKET3_WRITE_GDS_RAM:
++	case PACKET3_ATOMIC_GDS:
++	case PACKET3_ATOMIC:
++	case PACKET3_OCCLUSION_QUERY:
++	case PACKET3_SET_PREDICATION:
++	case PACKET3_COND_EXEC:
++	case PACKET3_PRED_EXEC:
++	case PACKET3_CONTEXT_CONTROL:
++	case PACKET3_STRMOUT_BUFFER_UPDATE:
++	case PACKET3_WAIT_REG_MEM:
++	case PACKET3_MEM_WRITE:
++	case PACKET3_PFP_SYNC_ME:
++	case PACKET3_SURFACE_SYNC:
++	case PACKET3_EVENT_WRITE:
++	case PACKET3_EVENT_WRITE_EOP:
++	case PACKET3_EVENT_WRITE_EOS:
++	case PACKET3_SET_CONTEXT_REG:
++	case PACKET3_SET_CONTEXT_REG_INDIRECT:
++	case PACKET3_SET_SH_REG:
++	case PACKET3_SET_SH_REG_OFFSET:
++	case PACKET3_INCREMENT_DE_COUNTER:
++	case PACKET3_WAIT_ON_CE_COUNTER:
++	case PACKET3_WAIT_ON_AVAIL_BUFFER:
++	case PACKET3_ME_WRITE:
++		break;
++	case PACKET3_COPY_DATA:
++		if ((idx_value & 0xf00) == 0) {
++			reg = ib[idx + 3] * 4;
++			if (!si_vm_reg_valid(reg))
++				return -EINVAL;
++		}
++		break;
++	case PACKET3_WRITE_DATA:
++		if ((idx_value & 0xf00) == 0) {
++			start_reg = ib[idx + 1] * 4;
++			if (idx_value & 0x10000) {
++				if (!si_vm_reg_valid(start_reg))
++					return -EINVAL;
++			} else {
++				for (i = 0; i < (pkt->count - 2); i++) {
++					reg = start_reg + (4 * i);
++					if (!si_vm_reg_valid(reg))
++						return -EINVAL;
++				}
++			}
++		}
++		break;
++	case PACKET3_COND_WRITE:
++		if (idx_value & 0x100) {
++			reg = ib[idx + 5] * 4;
++			if (!si_vm_reg_valid(reg))
++				return -EINVAL;
++		}
++		break;
++	case PACKET3_COPY_DW:
++		if (idx_value & 0x2) {
++			reg = ib[idx + 3] * 4;
++			if (!si_vm_reg_valid(reg))
++				return -EINVAL;
++		}
++		break;
++	default:
++		DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
++		return -EINVAL;
++	}
++	return 0;
++}
++
++int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
++{
++	int ret = 0;
++	u32 idx = 0;
++	struct radeon_cs_packet pkt;
++
++	do {
++		pkt.idx = idx;
++		pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
++		pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
++		pkt.one_reg_wr = 0;
++		switch (pkt.type) {
++		case PACKET_TYPE0:
++			dev_err(rdev->dev, "Packet0 not allowed!\n");
++			ret = -EINVAL;
++			break;
++		case PACKET_TYPE2:
++			idx += 1;
++			break;
++		case PACKET_TYPE3:
++			pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
++			if (ib->is_const_ib)
++				ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
++			else {
++				switch (ib->fence->ring) {
++				case RADEON_RING_TYPE_GFX_INDEX:
++					ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
++					break;
++				case CAYMAN_RING_TYPE_CP1_INDEX:
++				case CAYMAN_RING_TYPE_CP2_INDEX:
++					ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
++					break;
++				default:
++					dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->fence->ring);
++					ret = -EINVAL;
++					break;
++				}
++			}
++			idx += pkt.count + 2;
++			break;
++		default:
++			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
++			ret = -EINVAL;
++			break;
++		}
++		if (ret)
++			break;
++	} while (idx < ib->length_dw);
++
++	return ret;
++}
++
++/*
++ * vm
++ */
++int si_vm_init(struct radeon_device *rdev)
++{
++	/* number of VMs */
++	rdev->vm_manager.nvm = 16;
++	/* base offset of vram pages */
++	rdev->vm_manager.vram_base_offset = 0;
++
++	return 0;
++}
++
++void si_vm_fini(struct radeon_device *rdev)
++{
++}
++
++int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
++{
++	if (id < 8)
++		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
++	else
++		WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((id - 8) << 2),
++		       vm->pt_gpu_addr >> 12);
++	/* flush hdp cache */
++	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
++	/* bits 0-15 are the VM contexts0-15 */
++	WREG32(VM_INVALIDATE_REQUEST, 1 << id);
++	return 0;
++}
++
++void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
++{
++	if (vm->id < 8)
++		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
++	else
++		WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2), 0);
++	/* flush hdp cache */
++	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
++	/* bits 0-15 are the VM contexts0-15 */
++	WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
++}
++
++void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
++{
++	if (vm->id == -1)
++		return;
++
++	/* flush hdp cache */
++	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
++	/* bits 0-15 are the VM contexts0-15 */
++	WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
++}
++
++/*
++ * RLC
++ */
++void si_rlc_fini(struct radeon_device *rdev)
++{
++	int r;
++
++	/* save restore block */
++	if (rdev->rlc.save_restore_obj) {
++		r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
++		if (unlikely(r != 0))
++			dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
++		radeon_bo_unpin(rdev->rlc.save_restore_obj);
++		radeon_bo_unreserve(rdev->rlc.save_restore_obj);
++
++		radeon_bo_unref(&rdev->rlc.save_restore_obj);
++		rdev->rlc.save_restore_obj = NULL;
++	}
++
++	/* clear state block */
++	if (rdev->rlc.clear_state_obj) {
++		r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
++		if (unlikely(r != 0))
++			dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
++		radeon_bo_unpin(rdev->rlc.clear_state_obj);
++		radeon_bo_unreserve(rdev->rlc.clear_state_obj);
++
++		radeon_bo_unref(&rdev->rlc.clear_state_obj);
++		rdev->rlc.clear_state_obj = NULL;
++	}
++}
++
++int si_rlc_init(struct radeon_device *rdev)
++{
++	int r;
++
++	/* save restore block */
++	if (rdev->rlc.save_restore_obj == NULL) {
++		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
++				RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.save_restore_obj);
++		if (r) {
++			dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
++			return r;
++		}
++	}
++
++	r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
++	if (unlikely(r != 0)) {
++		si_rlc_fini(rdev);
++		return r;
++	}
++	r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
++			  &rdev->rlc.save_restore_gpu_addr);
++	radeon_bo_unreserve(rdev->rlc.save_restore_obj);
++	if (r) {
++		dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
++		si_rlc_fini(rdev);
++		return r;
++	}
++
++	/* clear state block */
++	if (rdev->rlc.clear_state_obj == NULL) {
++		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
++				RADEON_GEM_DOMAIN_VRAM, &rdev->rlc.clear_state_obj);
++		if (r) {
++			dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
++			si_rlc_fini(rdev);
++			return r;
++		}
++	}
++	r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
++	if (unlikely(r != 0)) {
++		si_rlc_fini(rdev);
++		return r;
++	}
++	r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
++			  &rdev->rlc.clear_state_gpu_addr);
++	radeon_bo_unreserve(rdev->rlc.clear_state_obj);
++	if (r) {
++		dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
++		si_rlc_fini(rdev);
++		return r;
++	}
++
++	return 0;
++}
++
++static void si_rlc_stop(struct radeon_device *rdev)
++{
++	WREG32(RLC_CNTL, 0);
++}
++
++static void si_rlc_start(struct radeon_device *rdev)
++{
++	WREG32(RLC_CNTL, RLC_ENABLE);
++}
++
++static int si_rlc_resume(struct radeon_device *rdev)
++{
++	u32 i;
++	const __be32 *fw_data;
++
++	if (!rdev->rlc_fw)
++		return -EINVAL;
++
++	si_rlc_stop(rdev);
++
++	WREG32(RLC_RL_BASE, 0);
++	WREG32(RLC_RL_SIZE, 0);
++	WREG32(RLC_LB_CNTL, 0);
++	WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
++	WREG32(RLC_LB_CNTR_INIT, 0);
++
++	WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
++	WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
++
++	WREG32(RLC_MC_CNTL, 0);
++	WREG32(RLC_UCODE_CNTL, 0);
++
++	fw_data = (const __be32 *)rdev->rlc_fw->data;
++	for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
++		WREG32(RLC_UCODE_ADDR, i);
++		WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
++	}
++	WREG32(RLC_UCODE_ADDR, 0);
++
++	si_rlc_start(rdev);
++
++	return 0;
++}
++
++static void si_enable_interrupts(struct radeon_device *rdev)
++{
++	u32 ih_cntl = RREG32(IH_CNTL);
++	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
++
++	ih_cntl |= ENABLE_INTR;
++	ih_rb_cntl |= IH_RB_ENABLE;
++	WREG32(IH_CNTL, ih_cntl);
++	WREG32(IH_RB_CNTL, ih_rb_cntl);
++	rdev->ih.enabled = true;
++}
++
++static void si_disable_interrupts(struct radeon_device *rdev)
++{
++	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
++	u32 ih_cntl = RREG32(IH_CNTL);
++
++	ih_rb_cntl &= ~IH_RB_ENABLE;
++	ih_cntl &= ~ENABLE_INTR;
++	WREG32(IH_RB_CNTL, ih_rb_cntl);
++	WREG32(IH_CNTL, ih_cntl);
++	/* set rptr, wptr to 0 */
++	WREG32(IH_RB_RPTR, 0);
++	WREG32(IH_RB_WPTR, 0);
++	rdev->ih.enabled = false;
++	rdev->ih.wptr = 0;
++	rdev->ih.rptr = 0;
++}
++
++static void si_disable_interrupt_state(struct radeon_device *rdev)
++{
++	u32 tmp;
++
++	WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
++	WREG32(CP_INT_CNTL_RING1, 0);
++	WREG32(CP_INT_CNTL_RING2, 0);
++	WREG32(GRBM_INT_CNTL, 0);
++	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
++	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
++	if (rdev->num_crtc >= 4) {
++		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
++		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
++	}
++	if (rdev->num_crtc >= 6) {
++		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
++		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
++	}
++
++	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
++	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
++	if (rdev->num_crtc >= 4) {
++		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
++		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
++	}
++	if (rdev->num_crtc >= 6) {
++		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
++		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
++	}
++
++	WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
++
++	tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++	WREG32(DC_HPD1_INT_CONTROL, tmp);
++	tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++	WREG32(DC_HPD2_INT_CONTROL, tmp);
++	tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++	WREG32(DC_HPD3_INT_CONTROL, tmp);
++	tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++	WREG32(DC_HPD4_INT_CONTROL, tmp);
++	tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++	WREG32(DC_HPD5_INT_CONTROL, tmp);
++	tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++	WREG32(DC_HPD6_INT_CONTROL, tmp);
++
++}
++
++static int si_irq_init(struct radeon_device *rdev)
++{
++	int ret = 0;
++	int rb_bufsz;
++	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
++
++	/* allocate ring */
++	ret = r600_ih_ring_alloc(rdev);
++	if (ret)
++		return ret;
++
++	/* disable irqs */
++	si_disable_interrupts(rdev);
++
++	/* init rlc */
++	ret = si_rlc_resume(rdev);
++	if (ret) {
++		r600_ih_ring_fini(rdev);
++		return ret;
++	}
++
++	/* setup interrupt control */
++	/* set dummy read address to ring address */
++	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
++	interrupt_cntl = RREG32(INTERRUPT_CNTL);
++	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
++	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
++	 */
++	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
++	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
++	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
++	WREG32(INTERRUPT_CNTL, interrupt_cntl);
++
++	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
++	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
++
++	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
++		      IH_WPTR_OVERFLOW_CLEAR |
++		      (rb_bufsz << 1));
++
++	if (rdev->wb.enabled)
++		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
++
++	/* set the writeback address whether it's enabled or not */
++	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
++	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
++
++	WREG32(IH_RB_CNTL, ih_rb_cntl);
++
++	/* set rptr, wptr to 0 */
++	WREG32(IH_RB_RPTR, 0);
++	WREG32(IH_RB_WPTR, 0);
++
++	/* Default settings for IH_CNTL (disabled at first) */
++	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
++	/* RPTR_REARM only works if msi's are enabled */
++	if (rdev->msi_enabled)
++		ih_cntl |= RPTR_REARM;
++	WREG32(IH_CNTL, ih_cntl);
++
++	/* force the active interrupt state to all disabled */
++	si_disable_interrupt_state(rdev);
++
++	/* enable irqs */
++	si_enable_interrupts(rdev);
++
++	return ret;
++}
++
++int si_irq_set(struct radeon_device *rdev)
++{
++	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
++	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
++	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
++	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
++	u32 grbm_int_cntl = 0;
++	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
++
++	if (!rdev->irq.installed) {
++		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
++		return -EINVAL;
++	}
++	/* don't enable anything if the ih is disabled */
++	if (!rdev->ih.enabled) {
++		si_disable_interrupts(rdev);
++		/* force the active interrupt state to all disabled */
++		si_disable_interrupt_state(rdev);
++		return 0;
++	}
++
++	hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
++	hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
++	hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
++	hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
++	hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
++	hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
++
++	/* enable CP interrupts on all rings */
++	if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
++		DRM_DEBUG("si_irq_set: sw int gfx\n");
++		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
++	}
++	if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
++		DRM_DEBUG("si_irq_set: sw int cp1\n");
++		cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
++	}
++	if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
++		DRM_DEBUG("si_irq_set: sw int cp2\n");
++		cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
++	}
++	if (rdev->irq.crtc_vblank_int[0] ||
++	    rdev->irq.pflip[0]) {
++		DRM_DEBUG("si_irq_set: vblank 0\n");
++		crtc1 |= VBLANK_INT_MASK;
++	}
++	if (rdev->irq.crtc_vblank_int[1] ||
++	    rdev->irq.pflip[1]) {
++		DRM_DEBUG("si_irq_set: vblank 1\n");
++		crtc2 |= VBLANK_INT_MASK;
++	}
++	if (rdev->irq.crtc_vblank_int[2] ||
++	    rdev->irq.pflip[2]) {
++		DRM_DEBUG("si_irq_set: vblank 2\n");
++		crtc3 |= VBLANK_INT_MASK;
++	}
++	if (rdev->irq.crtc_vblank_int[3] ||
++	    rdev->irq.pflip[3]) {
++		DRM_DEBUG("si_irq_set: vblank 3\n");
++		crtc4 |= VBLANK_INT_MASK;
++	}
++	if (rdev->irq.crtc_vblank_int[4] ||
++	    rdev->irq.pflip[4]) {
++		DRM_DEBUG("si_irq_set: vblank 4\n");
++		crtc5 |= VBLANK_INT_MASK;
++	}
++	if (rdev->irq.crtc_vblank_int[5] ||
++	    rdev->irq.pflip[5]) {
++		DRM_DEBUG("si_irq_set: vblank 5\n");
++		crtc6 |= VBLANK_INT_MASK;
++	}
++	if (rdev->irq.hpd[0]) {
++		DRM_DEBUG("si_irq_set: hpd 1\n");
++		hpd1 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.hpd[1]) {
++		DRM_DEBUG("si_irq_set: hpd 2\n");
++		hpd2 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.hpd[2]) {
++		DRM_DEBUG("si_irq_set: hpd 3\n");
++		hpd3 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.hpd[3]) {
++		DRM_DEBUG("si_irq_set: hpd 4\n");
++		hpd4 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.hpd[4]) {
++		DRM_DEBUG("si_irq_set: hpd 5\n");
++		hpd5 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.hpd[5]) {
++		DRM_DEBUG("si_irq_set: hpd 6\n");
++		hpd6 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.gui_idle) {
++		DRM_DEBUG("gui idle\n");
++		grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
++	}
++
++	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
++	WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
++	WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
++
++	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
++
++	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
++	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
++	if (rdev->num_crtc >= 4) {
++		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
++		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
++	}
++	if (rdev->num_crtc >= 6) {
++		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
++		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
++	}
++
++	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
++	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
++	if (rdev->num_crtc >= 4) {
++		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
++		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
++	}
++	if (rdev->num_crtc >= 6) {
++		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
++		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
++	}
++
++	WREG32(DC_HPD1_INT_CONTROL, hpd1);
++	WREG32(DC_HPD2_INT_CONTROL, hpd2);
++	WREG32(DC_HPD3_INT_CONTROL, hpd3);
++	WREG32(DC_HPD4_INT_CONTROL, hpd4);
++	WREG32(DC_HPD5_INT_CONTROL, hpd5);
++	WREG32(DC_HPD6_INT_CONTROL, hpd6);
++
++	return 0;
++}
++
++static inline void si_irq_ack(struct radeon_device *rdev)
++{
++	u32 tmp;
++
++	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
++	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
++	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
++	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
++	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
++	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
++	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
++	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
++	if (rdev->num_crtc >= 4) {
++		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
++		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
++	}
++	if (rdev->num_crtc >= 6) {
++		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
++		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
++	}
++
++	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
++		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
++	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
++		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
++	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
++		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
++	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
++		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
++	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
++		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
++	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
++		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
++
++	if (rdev->num_crtc >= 4) {
++		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
++			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
++		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
++			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
++		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
++			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
++		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
++			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
++		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
++			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
++		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
++			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
++	}
++
++	if (rdev->num_crtc >= 6) {
++		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
++			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
++		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
++			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
++		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
++			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
++		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
++			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
++		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
++			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
++		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
++			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
++	}
++
++	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
++		tmp = RREG32(DC_HPD1_INT_CONTROL);
++		tmp |= DC_HPDx_INT_ACK;
++		WREG32(DC_HPD1_INT_CONTROL, tmp);
++	}
++	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
++		tmp = RREG32(DC_HPD2_INT_CONTROL);
++		tmp |= DC_HPDx_INT_ACK;
++		WREG32(DC_HPD2_INT_CONTROL, tmp);
++	}
++	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
++		tmp = RREG32(DC_HPD3_INT_CONTROL);
++		tmp |= DC_HPDx_INT_ACK;
++		WREG32(DC_HPD3_INT_CONTROL, tmp);
++	}
++	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
++		tmp = RREG32(DC_HPD4_INT_CONTROL);
++		tmp |= DC_HPDx_INT_ACK;
++		WREG32(DC_HPD4_INT_CONTROL, tmp);
++	}
++	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
++		tmp = RREG32(DC_HPD5_INT_CONTROL);
++		tmp |= DC_HPDx_INT_ACK;
++		WREG32(DC_HPD5_INT_CONTROL, tmp);
++	}
++	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
++		tmp = RREG32(DC_HPD5_INT_CONTROL);
++		tmp |= DC_HPDx_INT_ACK;
++		WREG32(DC_HPD6_INT_CONTROL, tmp);
++	}
++}
++
++static void si_irq_disable(struct radeon_device *rdev)
++{
++	si_disable_interrupts(rdev);
++	/* Wait and acknowledge irq */
++	mdelay(1);
++	si_irq_ack(rdev);
++	si_disable_interrupt_state(rdev);
++}
++
++static void si_irq_suspend(struct radeon_device *rdev)
++{
++	si_irq_disable(rdev);
++	si_rlc_stop(rdev);
++}
++
++static void si_irq_fini(struct radeon_device *rdev)
++{
++	si_irq_suspend(rdev);
++	r600_ih_ring_fini(rdev);
++}
++
++static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
++{
++	u32 wptr, tmp;
++
++	if (rdev->wb.enabled)
++		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
++	else
++		wptr = RREG32(IH_RB_WPTR);
++
++	if (wptr & RB_OVERFLOW) {
++		/* When a ring buffer overflow happen start parsing interrupt
++		 * from the last not overwritten vector (wptr + 16). Hopefully
++		 * this should allow us to catchup.
++		 */
++		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
++			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
++		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
++		tmp = RREG32(IH_RB_CNTL);
++		tmp |= IH_WPTR_OVERFLOW_CLEAR;
++		WREG32(IH_RB_CNTL, tmp);
++	}
++	return (wptr & rdev->ih.ptr_mask);
++}
++
++/*        SI IV Ring
++ * Each IV ring entry is 128 bits:
++ * [7:0]    - interrupt source id
++ * [31:8]   - reserved
++ * [59:32]  - interrupt source data
++ * [63:60]  - reserved
++ * [71:64]  - RINGID
++ * [79:72]  - VMID
++ * [127:80] - reserved
++ */
++int si_irq_process(struct radeon_device *rdev)
++{
++	u32 wptr;
++	u32 rptr;
++	u32 src_id, src_data, ring_id;
++	u32 ring_index;
++	unsigned long flags;
++	bool queue_hotplug = false;
++
++	if (!rdev->ih.enabled || rdev->shutdown)
++		return IRQ_NONE;
++
++	wptr = si_get_ih_wptr(rdev);
++	rptr = rdev->ih.rptr;
++	DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
++
++	spin_lock_irqsave(&rdev->ih.lock, flags);
++	if (rptr == wptr) {
++		spin_unlock_irqrestore(&rdev->ih.lock, flags);
++		return IRQ_NONE;
++	}
++restart_ih:
++	/* Order reading of wptr vs. reading of IH ring data */
++	rmb();
++
++	/* display interrupts */
++	si_irq_ack(rdev);
++
++	rdev->ih.wptr = wptr;
++	while (rptr != wptr) {
++		/* wptr/rptr are in bytes! */
++		ring_index = rptr / 4;
++		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
++		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
++		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
++
++		switch (src_id) {
++		case 1: /* D1 vblank/vline */
++			switch (src_data) {
++			case 0: /* D1 vblank */
++				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
++					if (rdev->irq.crtc_vblank_int[0]) {
++						drm_handle_vblank(rdev->ddev, 0);
++						rdev->pm.vblank_sync = true;
++						wake_up(&rdev->irq.vblank_queue);
++					}
++					if (rdev->irq.pflip[0])
++						radeon_crtc_handle_flip(rdev, 0);
++					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
++					DRM_DEBUG("IH: D1 vblank\n");
++				}
++				break;
++			case 1: /* D1 vline */
++				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
++					DRM_DEBUG("IH: D1 vline\n");
++				}
++				break;
++			default:
++				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++				break;
++			}
++			break;
++		case 2: /* D2 vblank/vline */
++			switch (src_data) {
++			case 0: /* D2 vblank */
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
++					if (rdev->irq.crtc_vblank_int[1]) {
++						drm_handle_vblank(rdev->ddev, 1);
++						rdev->pm.vblank_sync = true;
++						wake_up(&rdev->irq.vblank_queue);
++					}
++					if (rdev->irq.pflip[1])
++						radeon_crtc_handle_flip(rdev, 1);
++					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
++					DRM_DEBUG("IH: D2 vblank\n");
++				}
++				break;
++			case 1: /* D2 vline */
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
++					DRM_DEBUG("IH: D2 vline\n");
++				}
++				break;
++			default:
++				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++				break;
++			}
++			break;
++		case 3: /* D3 vblank/vline */
++			switch (src_data) {
++			case 0: /* D3 vblank */
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
++					if (rdev->irq.crtc_vblank_int[2]) {
++						drm_handle_vblank(rdev->ddev, 2);
++						rdev->pm.vblank_sync = true;
++						wake_up(&rdev->irq.vblank_queue);
++					}
++					if (rdev->irq.pflip[2])
++						radeon_crtc_handle_flip(rdev, 2);
++					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
++					DRM_DEBUG("IH: D3 vblank\n");
++				}
++				break;
++			case 1: /* D3 vline */
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
++					DRM_DEBUG("IH: D3 vline\n");
++				}
++				break;
++			default:
++				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++				break;
++			}
++			break;
++		case 4: /* D4 vblank/vline */
++			switch (src_data) {
++			case 0: /* D4 vblank */
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
++					if (rdev->irq.crtc_vblank_int[3]) {
++						drm_handle_vblank(rdev->ddev, 3);
++						rdev->pm.vblank_sync = true;
++						wake_up(&rdev->irq.vblank_queue);
++					}
++					if (rdev->irq.pflip[3])
++						radeon_crtc_handle_flip(rdev, 3);
++					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
++					DRM_DEBUG("IH: D4 vblank\n");
++				}
++				break;
++			case 1: /* D4 vline */
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
++					DRM_DEBUG("IH: D4 vline\n");
++				}
++				break;
++			default:
++				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++				break;
++			}
++			break;
++		case 5: /* D5 vblank/vline */
++			switch (src_data) {
++			case 0: /* D5 vblank */
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
++					if (rdev->irq.crtc_vblank_int[4]) {
++						drm_handle_vblank(rdev->ddev, 4);
++						rdev->pm.vblank_sync = true;
++						wake_up(&rdev->irq.vblank_queue);
++					}
++					if (rdev->irq.pflip[4])
++						radeon_crtc_handle_flip(rdev, 4);
++					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
++					DRM_DEBUG("IH: D5 vblank\n");
++				}
++				break;
++			case 1: /* D5 vline */
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
++					DRM_DEBUG("IH: D5 vline\n");
++				}
++				break;
++			default:
++				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++				break;
++			}
++			break;
++		case 6: /* D6 vblank/vline */
++			switch (src_data) {
++			case 0: /* D6 vblank */
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
++					if (rdev->irq.crtc_vblank_int[5]) {
++						drm_handle_vblank(rdev->ddev, 5);
++						rdev->pm.vblank_sync = true;
++						wake_up(&rdev->irq.vblank_queue);
++					}
++					if (rdev->irq.pflip[5])
++						radeon_crtc_handle_flip(rdev, 5);
++					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
++					DRM_DEBUG("IH: D6 vblank\n");
++				}
++				break;
++			case 1: /* D6 vline */
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
++					DRM_DEBUG("IH: D6 vline\n");
++				}
++				break;
++			default:
++				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++				break;
++			}
++			break;
++		case 42: /* HPD hotplug */
++			switch (src_data) {
++			case 0:
++				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD1\n");
++				}
++				break;
++			case 1:
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD2\n");
++				}
++				break;
++			case 2:
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD3\n");
++				}
++				break;
++			case 3:
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD4\n");
++				}
++				break;
++			case 4:
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD5\n");
++				}
++				break;
++			case 5:
++				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
++					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD6\n");
++				}
++				break;
++			default:
++				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++				break;
++			}
++			break;
++		case 176: /* RINGID0 CP_INT */
++			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
++			break;
++		case 177: /* RINGID1 CP_INT */
++			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
++			break;
++		case 178: /* RINGID2 CP_INT */
++			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
++			break;
++		case 181: /* CP EOP event */
++			DRM_DEBUG("IH: CP EOP\n");
++			switch (ring_id) {
++			case 0:
++				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
++				break;
++			case 1:
++				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
++				break;
++			case 2:
++				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
++				break;
++			}
++			break;
++		case 233: /* GUI IDLE */
++			DRM_DEBUG("IH: GUI idle\n");
++			rdev->pm.gui_idle = true;
++			wake_up(&rdev->irq.idle_queue);
++			break;
++		default:
++			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++			break;
++		}
++
++		/* wptr/rptr are in bytes! */
++		rptr += 16;
++		rptr &= rdev->ih.ptr_mask;
++	}
++	/* make sure wptr hasn't changed while processing */
++	wptr = si_get_ih_wptr(rdev);
++	if (wptr != rdev->ih.wptr)
++		goto restart_ih;
++	if (queue_hotplug)
++		schedule_work(&rdev->hotplug_work);
++	rdev->ih.rptr = rptr;
++	WREG32(IH_RB_RPTR, rdev->ih.rptr);
++	spin_unlock_irqrestore(&rdev->ih.lock, flags);
++	return IRQ_HANDLED;
++}
++
++/*
++ * startup/shutdown callbacks
++ */
++static int si_startup(struct radeon_device *rdev)
++{
++	struct radeon_ring *ring;
++	int r;
++
++	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
++	    !rdev->rlc_fw || !rdev->mc_fw) {
++		r = si_init_microcode(rdev);
++		if (r) {
++			DRM_ERROR("Failed to load firmware!\n");
++			return r;
++		}
++	}
++
++	r = si_mc_load_microcode(rdev);
++	if (r) {
++		DRM_ERROR("Failed to load MC firmware!\n");
++		return r;
++	}
++
++	r = r600_vram_scratch_init(rdev);
++	if (r)
++		return r;
++
++	si_mc_program(rdev);
++	r = si_pcie_gart_enable(rdev);
++	if (r)
++		return r;
++	si_gpu_init(rdev);
++
++#if 0
++	r = evergreen_blit_init(rdev);
++	if (r) {
++		r600_blit_fini(rdev);
++		rdev->asic->copy = NULL;
++		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
++	}
++#endif
++	/* allocate rlc buffers */
++	r = si_rlc_init(rdev);
++	if (r) {
++		DRM_ERROR("Failed to init rlc BOs!\n");
++		return r;
++	}
++
++	/* allocate wb buffer */
++	r = radeon_wb_init(rdev);
++	if (r)
++		return r;
++
++	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
++	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
++	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
++	if (r) {
++		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
++		return r;
++	}
++
++	/* Enable IRQ */
++	r = si_irq_init(rdev);
++	if (r) {
++		DRM_ERROR("radeon: IH init failed (%d).\n", r);
++		radeon_irq_kms_fini(rdev);
++		return r;
++	}
++	si_irq_set(rdev);
++
++	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
++			     CP_RB0_RPTR, CP_RB0_WPTR,
++			     0, 0xfffff, RADEON_CP_PACKET2);
++	if (r)
++		return r;
++
++	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
++	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
++			     CP_RB1_RPTR, CP_RB1_WPTR,
++			     0, 0xfffff, RADEON_CP_PACKET2);
++	if (r)
++		return r;
++
++	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
++	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
++			     CP_RB2_RPTR, CP_RB2_WPTR,
++			     0, 0xfffff, RADEON_CP_PACKET2);
++	if (r)
++		return r;
++
++	r = si_cp_load_microcode(rdev);
++	if (r)
++		return r;
++	r = si_cp_resume(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_pool_start(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
++	if (r) {
++		DRM_ERROR("radeon: failed testing IB (%d) on CP ring 0\n", r);
++		rdev->accel_working = false;
++		return r;
++	}
++
++	r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
++	if (r) {
++		DRM_ERROR("radeon: failed testing IB (%d) on CP ring 1\n", r);
++		rdev->accel_working = false;
++		return r;
++	}
++
++	r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
++	if (r) {
++		DRM_ERROR("radeon: failed testing IB (%d) on CP ring 2\n", r);
++		rdev->accel_working = false;
++		return r;
++	}
++
++	r = radeon_vm_manager_start(rdev);
++	if (r)
++		return r;
++
++	return 0;
++}
++
++int si_resume(struct radeon_device *rdev)
++{
++	int r;
++
++	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
++	 * posting will perform necessary task to bring back GPU into good
++	 * shape.
++	 */
++	/* post card */
++	atom_asic_init(rdev->mode_info.atom_context);
++
++	rdev->accel_working = true;
++	r = si_startup(rdev);
++	if (r) {
++		DRM_ERROR("si startup failed on resume\n");
++		rdev->accel_working = false;
++		return r;
++	}
++
++	return r;
++
++}
++
++int si_suspend(struct radeon_device *rdev)
++{
++	/* FIXME: we should wait for ring to be empty */
++	radeon_ib_pool_suspend(rdev);
++	radeon_vm_manager_suspend(rdev);
++#if 0
++	r600_blit_suspend(rdev);
++#endif
++	si_cp_enable(rdev, false);
++	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
++	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
++	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
++	si_irq_suspend(rdev);
++	radeon_wb_disable(rdev);
++	si_pcie_gart_disable(rdev);
++	return 0;
++}
++
++/* Plan is to move initialization in that function and use
++ * helper function so that radeon_device_init pretty much
++ * do nothing more than calling asic specific function. This
++ * should also allow to remove a bunch of callback function
++ * like vram_info.
++ */
++int si_init(struct radeon_device *rdev)
++{
++	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++	int r;
++
++	/* This don't do much */
++	r = radeon_gem_init(rdev);
++	if (r)
++		return r;
++	/* Read BIOS */
++	if (!radeon_get_bios(rdev)) {
++		if (ASIC_IS_AVIVO(rdev))
++			return -EINVAL;
++	}
++	/* Must be an ATOMBIOS */
++	if (!rdev->is_atom_bios) {
++		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
++		return -EINVAL;
++	}
++	r = radeon_atombios_init(rdev);
++	if (r)
++		return r;
++
++	/* Post card if necessary */
++	if (!radeon_card_posted(rdev)) {
++		if (!rdev->bios) {
++			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
++			return -EINVAL;
++		}
++		DRM_INFO("GPU not posted. posting now...\n");
++		atom_asic_init(rdev->mode_info.atom_context);
++	}
++	/* Initialize scratch registers */
++	si_scratch_init(rdev);
++	/* Initialize surface registers */
++	radeon_surface_init(rdev);
++	/* Initialize clocks */
++	radeon_get_clock_info(rdev->ddev);
++
++	/* Fence driver */
++	r = radeon_fence_driver_init(rdev);
++	if (r)
++		return r;
++
++	/* initialize memory controller */
++	r = si_mc_init(rdev);
++	if (r)
++		return r;
++	/* Memory manager */
++	r = radeon_bo_init(rdev);
++	if (r)
++		return r;
++
++	r = radeon_irq_kms_init(rdev);
++	if (r)
++		return r;
++
++	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
++	ring->ring_obj = NULL;
++	r600_ring_init(rdev, ring, 1024 * 1024);
++
++	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
++	ring->ring_obj = NULL;
++	r600_ring_init(rdev, ring, 1024 * 1024);
++
++	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
++	ring->ring_obj = NULL;
++	r600_ring_init(rdev, ring, 1024 * 1024);
++
++	rdev->ih.ring_obj = NULL;
++	r600_ih_ring_init(rdev, 64 * 1024);
++
++	r = r600_pcie_gart_init(rdev);
++	if (r)
++		return r;
++
++	r = radeon_ib_pool_init(rdev);
++	rdev->accel_working = true;
++	if (r) {
++		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
++		rdev->accel_working = false;
++	}
++	r = radeon_vm_manager_init(rdev);
++	if (r) {
++		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
++	}
++
++	r = si_startup(rdev);
++	if (r) {
++		dev_err(rdev->dev, "disabling GPU acceleration\n");
++		si_cp_fini(rdev);
++		si_irq_fini(rdev);
++		si_rlc_fini(rdev);
++		radeon_wb_fini(rdev);
++		r100_ib_fini(rdev);
++		radeon_vm_manager_fini(rdev);
++		radeon_irq_kms_fini(rdev);
++		si_pcie_gart_fini(rdev);
++		rdev->accel_working = false;
++	}
++
++	/* Don't start up if the MC ucode is missing.
++	 * The default clocks and voltages before the MC ucode
++	 * is loaded are not suffient for advanced operations.
++	 */
++	if (!rdev->mc_fw) {
++		DRM_ERROR("radeon: MC ucode required for NI+.\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++void si_fini(struct radeon_device *rdev)
++{
++#if 0
++	r600_blit_fini(rdev);
++#endif
++	si_cp_fini(rdev);
++	si_irq_fini(rdev);
++	si_rlc_fini(rdev);
++	radeon_wb_fini(rdev);
++	radeon_vm_manager_fini(rdev);
++	r100_ib_fini(rdev);
++	radeon_irq_kms_fini(rdev);
++	si_pcie_gart_fini(rdev);
++	r600_vram_scratch_fini(rdev);
++	radeon_gem_fini(rdev);
++	radeon_semaphore_driver_fini(rdev);
++	radeon_fence_driver_fini(rdev);
++	radeon_bo_fini(rdev);
++	radeon_atombios_fini(rdev);
++	kfree(rdev->bios);
++	rdev->bios = NULL;
++}
++
+diff --git a/drivers/gpu/drm/radeon/si_blit_shaders.c b/drivers/gpu/drm/radeon/si_blit_shaders.c
+new file mode 100644
+index 0000000..ec415e7
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/si_blit_shaders.c
+@@ -0,0 +1,253 @@
++/*
++ * Copyright 2011 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *     Alex Deucher <alexander.deucher at amd.com>
++ */
++
++#include <linux/types.h>
++#include <linux/bug.h>
++#include <linux/kernel.h>
++
++const u32 si_default_state[] =
++{
++	0xc0066900,
++	0x00000000,
++	0x00000060, /* DB_RENDER_CONTROL */
++	0x00000000, /* DB_COUNT_CONTROL */
++	0x00000000, /* DB_DEPTH_VIEW */
++	0x0000002a, /* DB_RENDER_OVERRIDE */
++	0x00000000, /* DB_RENDER_OVERRIDE2 */
++	0x00000000, /* DB_HTILE_DATA_BASE */
++
++	0xc0046900,
++	0x00000008,
++	0x00000000, /* DB_DEPTH_BOUNDS_MIN */
++	0x00000000, /* DB_DEPTH_BOUNDS_MAX */
++	0x00000000, /* DB_STENCIL_CLEAR */
++	0x00000000, /* DB_DEPTH_CLEAR */
++
++	0xc0036900,
++	0x0000000f,
++	0x00000000, /* DB_DEPTH_INFO */
++	0x00000000, /* DB_Z_INFO */
++	0x00000000, /* DB_STENCIL_INFO */
++
++	0xc0016900,
++	0x00000080,
++	0x00000000, /* PA_SC_WINDOW_OFFSET */
++
++	0xc00d6900,
++	0x00000083,
++	0x0000ffff, /* PA_SC_CLIPRECT_RULE */
++	0x00000000, /* PA_SC_CLIPRECT_0_TL */
++	0x20002000, /* PA_SC_CLIPRECT_0_BR */
++	0x00000000,
++	0x20002000,
++	0x00000000,
++	0x20002000,
++	0x00000000,
++	0x20002000,
++	0xaaaaaaaa, /* PA_SC_EDGERULE */
++	0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
++	0x0000000f, /* CB_TARGET_MASK */
++	0x0000000f, /* CB_SHADER_MASK */
++
++	0xc0226900,
++	0x00000094,
++	0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
++	0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x80000000,
++	0x20002000,
++	0x00000000, /* PA_SC_VPORT_ZMIN_0 */
++	0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
++
++	0xc0026900,
++	0x000000d9,
++	0x00000000, /* CP_RINGID */
++	0x00000000, /* CP_VMID */
++
++	0xc0046900,
++	0x00000100,
++	0xffffffff, /* VGT_MAX_VTX_INDX */
++	0x00000000, /* VGT_MIN_VTX_INDX */
++	0x00000000, /* VGT_INDX_OFFSET */
++	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
++
++	0xc0046900,
++	0x00000105,
++	0x00000000, /* CB_BLEND_RED */
++	0x00000000, /* CB_BLEND_GREEN */
++	0x00000000, /* CB_BLEND_BLUE */
++	0x00000000, /* CB_BLEND_ALPHA */
++
++	0xc0016900,
++	0x000001e0,
++	0x00000000, /* CB_BLEND0_CONTROL */
++
++	0xc00e6900,
++	0x00000200,
++	0x00000000, /* DB_DEPTH_CONTROL */
++	0x00000000, /* DB_EQAA */
++	0x00cc0010, /* CB_COLOR_CONTROL */
++	0x00000210, /* DB_SHADER_CONTROL */
++	0x00010000, /* PA_CL_CLIP_CNTL */
++	0x00000004, /* PA_SU_SC_MODE_CNTL */
++	0x00000100, /* PA_CL_VTE_CNTL */
++	0x00000000, /* PA_CL_VS_OUT_CNTL */
++	0x00000000, /* PA_CL_NANINF_CNTL */
++	0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
++	0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
++	0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
++	0x00000000, /*  */
++	0x00000000, /*  */
++
++	0xc0116900,
++	0x00000280,
++	0x00000000, /* PA_SU_POINT_SIZE */
++	0x00000000, /* PA_SU_POINT_MINMAX */
++	0x00000008, /* PA_SU_LINE_CNTL */
++	0x00000000, /* PA_SC_LINE_STIPPLE */
++	0x00000000, /* VGT_OUTPUT_PATH_CNTL */
++	0x00000000, /* VGT_HOS_CNTL */
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000, /* VGT_GS_MODE */
++
++	0xc0026900,
++	0x00000292,
++	0x00000000, /* PA_SC_MODE_CNTL_0 */
++	0x00000000, /* PA_SC_MODE_CNTL_1 */
++
++	0xc0016900,
++	0x000002a1,
++	0x00000000, /* VGT_PRIMITIVEID_EN */
++
++	0xc0016900,
++	0x000002a5,
++	0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
++
++	0xc0026900,
++	0x000002a8,
++	0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
++	0x00000000,
++
++	0xc0026900,
++	0x000002ad,
++	0x00000000, /* VGT_REUSE_OFF */
++	0x00000000,
++
++	0xc0016900,
++	0x000002d5,
++	0x00000000, /* VGT_SHADER_STAGES_EN */
++
++	0xc0016900,
++	0x000002dc,
++	0x0000aa00, /* DB_ALPHA_TO_MASK */
++
++	0xc0066900,
++	0x000002de,
++	0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++
++	0xc0026900,
++	0x000002e5,
++	0x00000000, /* VGT_STRMOUT_CONFIG */
++	0x00000000,
++
++	0xc01b6900,
++	0x000002f5,
++	0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
++	0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
++	0x00000000, /* PA_SC_LINE_CNTL */
++	0x00000000, /* PA_SC_AA_CONFIG */
++	0x00000005, /* PA_SU_VTX_CNTL */
++	0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
++	0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
++	0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
++	0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
++	0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0x00000000,
++	0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
++	0xffffffff,
++
++	0xc0026900,
++	0x00000316,
++	0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
++	0x00000010, /*  */
++};
++
++const u32 si_default_size = ARRAY_SIZE(si_default_state);
+diff --git a/drivers/gpu/drm/radeon/si_blit_shaders.h b/drivers/gpu/drm/radeon/si_blit_shaders.h
+new file mode 100644
+index 0000000..c739e51
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/si_blit_shaders.h
+@@ -0,0 +1,32 @@
++/*
++ * Copyright 2011 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef SI_BLIT_SHADERS_H
++#define SI_BLIT_SHADERS_H
++
++extern const u32 si_default_state[];
++
++extern const u32 si_default_size;
++
++#endif
+diff --git a/drivers/gpu/drm/radeon/si_reg.h b/drivers/gpu/drm/radeon/si_reg.h
+new file mode 100644
+index 0000000..eda938a
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/si_reg.h
+@@ -0,0 +1,33 @@
++/*
++ * Copyright 2010 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Alex Deucher
++ */
++#ifndef __SI_REG_H__
++#define __SI_REG_H__
++
++/* SI */
++#define SI_DC_GPIO_HPD_MASK                      0x65b0
++#define SI_DC_GPIO_HPD_A                         0x65b4
++#define SI_DC_GPIO_HPD_EN                        0x65b8
++#define SI_DC_GPIO_HPD_Y                         0x65bc
++
++#endif
+diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
+new file mode 100644
+index 0000000..2c2bc63
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/sid.h
+@@ -0,0 +1,887 @@
++/*
++ * Copyright 2011 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Alex Deucher
++ */
++#ifndef SI_H
++#define SI_H
++
++#define	CG_MULT_THERMAL_STATUS					0x714
++#define		ASIC_MAX_TEMP(x)				((x) << 0)
++#define		ASIC_MAX_TEMP_MASK				0x000001ff
++#define		ASIC_MAX_TEMP_SHIFT				0
++#define		CTF_TEMP(x)					((x) << 9)
++#define		CTF_TEMP_MASK					0x0003fe00
++#define		CTF_TEMP_SHIFT					9
++
++#define SI_MAX_SH_GPRS           256
++#define SI_MAX_TEMP_GPRS         16
++#define SI_MAX_SH_THREADS        256
++#define SI_MAX_SH_STACK_ENTRIES  4096
++#define SI_MAX_FRC_EOV_CNT       16384
++#define SI_MAX_BACKENDS          8
++#define SI_MAX_BACKENDS_MASK     0xFF
++#define SI_MAX_BACKENDS_PER_SE_MASK     0x0F
++#define SI_MAX_SIMDS             12
++#define SI_MAX_SIMDS_MASK        0x0FFF
++#define SI_MAX_SIMDS_PER_SE_MASK        0x00FF
++#define SI_MAX_PIPES             8
++#define SI_MAX_PIPES_MASK        0xFF
++#define SI_MAX_PIPES_PER_SIMD_MASK      0x3F
++#define SI_MAX_LDS_NUM           0xFFFF
++#define SI_MAX_TCC               16
++#define SI_MAX_TCC_MASK          0xFFFF
++
++#define VGA_HDP_CONTROL  				0x328
++#define		VGA_MEMORY_DISABLE				(1 << 4)
++
++#define DMIF_ADDR_CONFIG  				0xBD4
++
++#define	SRBM_STATUS				        0xE50
++
++#define	CC_SYS_RB_BACKEND_DISABLE			0xe80
++#define	GC_USER_SYS_RB_BACKEND_DISABLE			0xe84
++
++#define VM_L2_CNTL					0x1400
++#define		ENABLE_L2_CACHE					(1 << 0)
++#define		ENABLE_L2_FRAGMENT_PROCESSING			(1 << 1)
++#define		L2_CACHE_PTE_ENDIAN_SWAP_MODE(x)		((x) << 2)
++#define		L2_CACHE_PDE_ENDIAN_SWAP_MODE(x)		((x) << 4)
++#define		ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE		(1 << 9)
++#define		ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE	(1 << 10)
++#define		EFFECTIVE_L2_QUEUE_SIZE(x)			(((x) & 7) << 15)
++#define		CONTEXT1_IDENTITY_ACCESS_MODE(x)		(((x) & 3) << 19)
++#define VM_L2_CNTL2					0x1404
++#define		INVALIDATE_ALL_L1_TLBS				(1 << 0)
++#define		INVALIDATE_L2_CACHE				(1 << 1)
++#define		INVALIDATE_CACHE_MODE(x)			((x) << 26)
++#define			INVALIDATE_PTE_AND_PDE_CACHES		0
++#define			INVALIDATE_ONLY_PTE_CACHES		1
++#define			INVALIDATE_ONLY_PDE_CACHES		2
++#define VM_L2_CNTL3					0x1408
++#define		BANK_SELECT(x)					((x) << 0)
++#define		L2_CACHE_UPDATE_MODE(x)				((x) << 6)
++#define		L2_CACHE_BIGK_FRAGMENT_SIZE(x)			((x) << 15)
++#define		L2_CACHE_BIGK_ASSOCIATIVITY			(1 << 20)
++#define	VM_L2_STATUS					0x140C
++#define		L2_BUSY						(1 << 0)
++#define VM_CONTEXT0_CNTL				0x1410
++#define		ENABLE_CONTEXT					(1 << 0)
++#define		PAGE_TABLE_DEPTH(x)				(((x) & 3) << 1)
++#define		RANGE_PROTECTION_FAULT_ENABLE_DEFAULT		(1 << 4)
++#define VM_CONTEXT1_CNTL				0x1414
++#define VM_CONTEXT0_CNTL2				0x1430
++#define VM_CONTEXT1_CNTL2				0x1434
++#define	VM_CONTEXT8_PAGE_TABLE_BASE_ADDR		0x1438
++#define	VM_CONTEXT9_PAGE_TABLE_BASE_ADDR		0x143c
++#define	VM_CONTEXT10_PAGE_TABLE_BASE_ADDR		0x1440
++#define	VM_CONTEXT11_PAGE_TABLE_BASE_ADDR		0x1444
++#define	VM_CONTEXT12_PAGE_TABLE_BASE_ADDR		0x1448
++#define	VM_CONTEXT13_PAGE_TABLE_BASE_ADDR		0x144c
++#define	VM_CONTEXT14_PAGE_TABLE_BASE_ADDR		0x1450
++#define	VM_CONTEXT15_PAGE_TABLE_BASE_ADDR		0x1454
++
++#define VM_INVALIDATE_REQUEST				0x1478
++#define VM_INVALIDATE_RESPONSE				0x147c
++
++#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR	0x1518
++#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR	0x151c
++
++#define	VM_CONTEXT0_PAGE_TABLE_BASE_ADDR		0x153c
++#define	VM_CONTEXT1_PAGE_TABLE_BASE_ADDR		0x1540
++#define	VM_CONTEXT2_PAGE_TABLE_BASE_ADDR		0x1544
++#define	VM_CONTEXT3_PAGE_TABLE_BASE_ADDR		0x1548
++#define	VM_CONTEXT4_PAGE_TABLE_BASE_ADDR		0x154c
++#define	VM_CONTEXT5_PAGE_TABLE_BASE_ADDR		0x1550
++#define	VM_CONTEXT6_PAGE_TABLE_BASE_ADDR		0x1554
++#define	VM_CONTEXT7_PAGE_TABLE_BASE_ADDR		0x1558
++#define	VM_CONTEXT0_PAGE_TABLE_START_ADDR		0x155c
++#define	VM_CONTEXT1_PAGE_TABLE_START_ADDR		0x1560
++
++#define	VM_CONTEXT0_PAGE_TABLE_END_ADDR			0x157C
++#define	VM_CONTEXT1_PAGE_TABLE_END_ADDR			0x1580
++
++#define MC_SHARED_CHMAP						0x2004
++#define		NOOFCHAN_SHIFT					12
++#define		NOOFCHAN_MASK					0x0000f000
++#define MC_SHARED_CHREMAP					0x2008
++
++#define	MC_VM_FB_LOCATION				0x2024
++#define	MC_VM_AGP_TOP					0x2028
++#define	MC_VM_AGP_BOT					0x202C
++#define	MC_VM_AGP_BASE					0x2030
++#define	MC_VM_SYSTEM_APERTURE_LOW_ADDR			0x2034
++#define	MC_VM_SYSTEM_APERTURE_HIGH_ADDR			0x2038
++#define	MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR		0x203C
++
++#define	MC_VM_MX_L1_TLB_CNTL				0x2064
++#define		ENABLE_L1_TLB					(1 << 0)
++#define		ENABLE_L1_FRAGMENT_PROCESSING			(1 << 1)
++#define		SYSTEM_ACCESS_MODE_PA_ONLY			(0 << 3)
++#define		SYSTEM_ACCESS_MODE_USE_SYS_MAP			(1 << 3)
++#define		SYSTEM_ACCESS_MODE_IN_SYS			(2 << 3)
++#define		SYSTEM_ACCESS_MODE_NOT_IN_SYS			(3 << 3)
++#define		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU	(0 << 5)
++#define		ENABLE_ADVANCED_DRIVER_MODEL			(1 << 6)
++
++#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
++
++#define	MC_ARB_RAMCFG					0x2760
++#define		NOOFBANK_SHIFT					0
++#define		NOOFBANK_MASK					0x00000003
++#define		NOOFRANK_SHIFT					2
++#define		NOOFRANK_MASK					0x00000004
++#define		NOOFROWS_SHIFT					3
++#define		NOOFROWS_MASK					0x00000038
++#define		NOOFCOLS_SHIFT					6
++#define		NOOFCOLS_MASK					0x000000C0
++#define		CHANSIZE_SHIFT					8
++#define		CHANSIZE_MASK					0x00000100
++#define		CHANSIZE_OVERRIDE				(1 << 11)
++#define		NOOFGROUPS_SHIFT				12
++#define		NOOFGROUPS_MASK					0x00001000
++
++#define	MC_SEQ_TRAIN_WAKEUP_CNTL			0x2808
++#define		TRAIN_DONE_D0      			(1 << 30)
++#define		TRAIN_DONE_D1      			(1 << 31)
++
++#define MC_SEQ_SUP_CNTL           			0x28c8
++#define		RUN_MASK      				(1 << 0)
++#define MC_SEQ_SUP_PGM           			0x28cc
++
++#define MC_IO_PAD_CNTL_D0           			0x29d0
++#define		MEM_FALL_OUT_CMD      			(1 << 8)
++
++#define MC_SEQ_IO_DEBUG_INDEX           		0x2a44
++#define MC_SEQ_IO_DEBUG_DATA           			0x2a48
++
++#define	HDP_HOST_PATH_CNTL				0x2C00
++#define	HDP_NONSURFACE_BASE				0x2C04
++#define	HDP_NONSURFACE_INFO				0x2C08
++#define	HDP_NONSURFACE_SIZE				0x2C0C
++
++#define HDP_ADDR_CONFIG  				0x2F48
++#define HDP_MISC_CNTL					0x2F4C
++#define 	HDP_FLUSH_INVALIDATE_CACHE			(1 << 0)
++
++#define IH_RB_CNTL                                        0x3e00
++#       define IH_RB_ENABLE                               (1 << 0)
++#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
++#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
++#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
++#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
++#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
++#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
++#define IH_RB_BASE                                        0x3e04
++#define IH_RB_RPTR                                        0x3e08
++#define IH_RB_WPTR                                        0x3e0c
++#       define RB_OVERFLOW                                (1 << 0)
++#       define WPTR_OFFSET_MASK                           0x3fffc
++#define IH_RB_WPTR_ADDR_HI                                0x3e10
++#define IH_RB_WPTR_ADDR_LO                                0x3e14
++#define IH_CNTL                                           0x3e18
++#       define ENABLE_INTR                                (1 << 0)
++#       define IH_MC_SWAP(x)                              ((x) << 1)
++#       define IH_MC_SWAP_NONE                            0
++#       define IH_MC_SWAP_16BIT                           1
++#       define IH_MC_SWAP_32BIT                           2
++#       define IH_MC_SWAP_64BIT                           3
++#       define RPTR_REARM                                 (1 << 4)
++#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
++#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
++#       define MC_VMID(x)                                 ((x) << 25)
++
++#define	CONFIG_MEMSIZE					0x5428
++
++#define INTERRUPT_CNTL                                    0x5468
++#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
++#       define IH_DUMMY_RD_EN                             (1 << 1)
++#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
++#       define GEN_IH_INT_EN                              (1 << 8)
++#define INTERRUPT_CNTL2                                   0x546c
++
++#define HDP_MEM_COHERENCY_FLUSH_CNTL			0x5480
++
++#define	BIF_FB_EN						0x5490
++#define		FB_READ_EN					(1 << 0)
++#define		FB_WRITE_EN					(1 << 1)
++
++#define HDP_REG_COHERENCY_FLUSH_CNTL			0x54A0
++
++#define	DC_LB_MEMORY_SPLIT					0x6b0c
++#define		DC_LB_MEMORY_CONFIG(x)				((x) << 20)
++
++#define	PRIORITY_A_CNT						0x6b18
++#define		PRIORITY_MARK_MASK				0x7fff
++#define		PRIORITY_OFF					(1 << 16)
++#define		PRIORITY_ALWAYS_ON				(1 << 20)
++#define	PRIORITY_B_CNT						0x6b1c
++
++#define	DPG_PIPE_ARBITRATION_CONTROL3				0x6cc8
++#       define LATENCY_WATERMARK_MASK(x)			((x) << 16)
++#define	DPG_PIPE_LATENCY_CONTROL				0x6ccc
++#       define LATENCY_LOW_WATERMARK(x)				((x) << 0)
++#       define LATENCY_HIGH_WATERMARK(x)			((x) << 16)
++
++/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
++#define VLINE_STATUS                                    0x6bb8
++#       define VLINE_OCCURRED                           (1 << 0)
++#       define VLINE_ACK                                (1 << 4)
++#       define VLINE_STAT                               (1 << 12)
++#       define VLINE_INTERRUPT                          (1 << 16)
++#       define VLINE_INTERRUPT_TYPE                     (1 << 17)
++/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
++#define VBLANK_STATUS                                   0x6bbc
++#       define VBLANK_OCCURRED                          (1 << 0)
++#       define VBLANK_ACK                               (1 << 4)
++#       define VBLANK_STAT                              (1 << 12)
++#       define VBLANK_INTERRUPT                         (1 << 16)
++#       define VBLANK_INTERRUPT_TYPE                    (1 << 17)
++
++/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
++#define INT_MASK                                        0x6b40
++#       define VBLANK_INT_MASK                          (1 << 0)
++#       define VLINE_INT_MASK                           (1 << 4)
++
++#define DISP_INTERRUPT_STATUS                           0x60f4
++#       define LB_D1_VLINE_INTERRUPT                    (1 << 2)
++#       define LB_D1_VBLANK_INTERRUPT                   (1 << 3)
++#       define DC_HPD1_INTERRUPT                        (1 << 17)
++#       define DC_HPD1_RX_INTERRUPT                     (1 << 18)
++#       define DACA_AUTODETECT_INTERRUPT                (1 << 22)
++#       define DACB_AUTODETECT_INTERRUPT                (1 << 23)
++#       define DC_I2C_SW_DONE_INTERRUPT                 (1 << 24)
++#       define DC_I2C_HW_DONE_INTERRUPT                 (1 << 25)
++#define DISP_INTERRUPT_STATUS_CONTINUE                  0x60f8
++#       define LB_D2_VLINE_INTERRUPT                    (1 << 2)
++#       define LB_D2_VBLANK_INTERRUPT                   (1 << 3)
++#       define DC_HPD2_INTERRUPT                        (1 << 17)
++#       define DC_HPD2_RX_INTERRUPT                     (1 << 18)
++#       define DISP_TIMER_INTERRUPT                     (1 << 24)
++#define DISP_INTERRUPT_STATUS_CONTINUE2                 0x60fc
++#       define LB_D3_VLINE_INTERRUPT                    (1 << 2)
++#       define LB_D3_VBLANK_INTERRUPT                   (1 << 3)
++#       define DC_HPD3_INTERRUPT                        (1 << 17)
++#       define DC_HPD3_RX_INTERRUPT                     (1 << 18)
++#define DISP_INTERRUPT_STATUS_CONTINUE3                 0x6100
++#       define LB_D4_VLINE_INTERRUPT                    (1 << 2)
++#       define LB_D4_VBLANK_INTERRUPT                   (1 << 3)
++#       define DC_HPD4_INTERRUPT                        (1 << 17)
++#       define DC_HPD4_RX_INTERRUPT                     (1 << 18)
++#define DISP_INTERRUPT_STATUS_CONTINUE4                 0x614c
++#       define LB_D5_VLINE_INTERRUPT                    (1 << 2)
++#       define LB_D5_VBLANK_INTERRUPT                   (1 << 3)
++#       define DC_HPD5_INTERRUPT                        (1 << 17)
++#       define DC_HPD5_RX_INTERRUPT                     (1 << 18)
++#define DISP_INTERRUPT_STATUS_CONTINUE5                 0x6150
++#       define LB_D6_VLINE_INTERRUPT                    (1 << 2)
++#       define LB_D6_VBLANK_INTERRUPT                   (1 << 3)
++#       define DC_HPD6_INTERRUPT                        (1 << 17)
++#       define DC_HPD6_RX_INTERRUPT                     (1 << 18)
++
++/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
++#define GRPH_INT_STATUS                                 0x6858
++#       define GRPH_PFLIP_INT_OCCURRED                  (1 << 0)
++#       define GRPH_PFLIP_INT_CLEAR                     (1 << 8)
++/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
++#define	GRPH_INT_CONTROL			        0x685c
++#       define GRPH_PFLIP_INT_MASK                      (1 << 0)
++#       define GRPH_PFLIP_INT_TYPE                      (1 << 8)
++
++#define	DACA_AUTODETECT_INT_CONTROL			0x66c8
++
++#define DC_HPD1_INT_STATUS                              0x601c
++#define DC_HPD2_INT_STATUS                              0x6028
++#define DC_HPD3_INT_STATUS                              0x6034
++#define DC_HPD4_INT_STATUS                              0x6040
++#define DC_HPD5_INT_STATUS                              0x604c
++#define DC_HPD6_INT_STATUS                              0x6058
++#       define DC_HPDx_INT_STATUS                       (1 << 0)
++#       define DC_HPDx_SENSE                            (1 << 1)
++#       define DC_HPDx_RX_INT_STATUS                    (1 << 8)
++
++#define DC_HPD1_INT_CONTROL                             0x6020
++#define DC_HPD2_INT_CONTROL                             0x602c
++#define DC_HPD3_INT_CONTROL                             0x6038
++#define DC_HPD4_INT_CONTROL                             0x6044
++#define DC_HPD5_INT_CONTROL                             0x6050
++#define DC_HPD6_INT_CONTROL                             0x605c
++#       define DC_HPDx_INT_ACK                          (1 << 0)
++#       define DC_HPDx_INT_POLARITY                     (1 << 8)
++#       define DC_HPDx_INT_EN                           (1 << 16)
++#       define DC_HPDx_RX_INT_ACK                       (1 << 20)
++#       define DC_HPDx_RX_INT_EN                        (1 << 24)
++
++#define DC_HPD1_CONTROL                                   0x6024
++#define DC_HPD2_CONTROL                                   0x6030
++#define DC_HPD3_CONTROL                                   0x603c
++#define DC_HPD4_CONTROL                                   0x6048
++#define DC_HPD5_CONTROL                                   0x6054
++#define DC_HPD6_CONTROL                                   0x6060
++#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
++#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
++#       define DC_HPDx_EN                                 (1 << 28)
++
++/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
++#define CRTC_STATUS_FRAME_COUNT                         0x6e98
++
++#define	GRBM_CNTL					0x8000
++#define		GRBM_READ_TIMEOUT(x)				((x) << 0)
++
++#define	GRBM_STATUS2					0x8008
++#define		RLC_RQ_PENDING 					(1 << 0)
++#define		RLC_BUSY 					(1 << 8)
++#define		TC_BUSY 					(1 << 9)
++
++#define	GRBM_STATUS					0x8010
++#define		CMDFIFO_AVAIL_MASK				0x0000000F
++#define		RING2_RQ_PENDING				(1 << 4)
++#define		SRBM_RQ_PENDING					(1 << 5)
++#define		RING1_RQ_PENDING				(1 << 6)
++#define		CF_RQ_PENDING					(1 << 7)
++#define		PF_RQ_PENDING					(1 << 8)
++#define		GDS_DMA_RQ_PENDING				(1 << 9)
++#define		GRBM_EE_BUSY					(1 << 10)
++#define		DB_CLEAN					(1 << 12)
++#define		CB_CLEAN					(1 << 13)
++#define		TA_BUSY 					(1 << 14)
++#define		GDS_BUSY 					(1 << 15)
++#define		VGT_BUSY					(1 << 17)
++#define		IA_BUSY_NO_DMA					(1 << 18)
++#define		IA_BUSY						(1 << 19)
++#define		SX_BUSY 					(1 << 20)
++#define		SPI_BUSY					(1 << 22)
++#define		BCI_BUSY					(1 << 23)
++#define		SC_BUSY 					(1 << 24)
++#define		PA_BUSY 					(1 << 25)
++#define		DB_BUSY 					(1 << 26)
++#define		CP_COHERENCY_BUSY      				(1 << 28)
++#define		CP_BUSY 					(1 << 29)
++#define		CB_BUSY 					(1 << 30)
++#define		GUI_ACTIVE					(1 << 31)
++#define	GRBM_STATUS_SE0					0x8014
++#define	GRBM_STATUS_SE1					0x8018
++#define		SE_DB_CLEAN					(1 << 1)
++#define		SE_CB_CLEAN					(1 << 2)
++#define		SE_BCI_BUSY					(1 << 22)
++#define		SE_VGT_BUSY					(1 << 23)
++#define		SE_PA_BUSY					(1 << 24)
++#define		SE_TA_BUSY					(1 << 25)
++#define		SE_SX_BUSY					(1 << 26)
++#define		SE_SPI_BUSY					(1 << 27)
++#define		SE_SC_BUSY					(1 << 29)
++#define		SE_DB_BUSY					(1 << 30)
++#define		SE_CB_BUSY					(1 << 31)
++
++#define	GRBM_SOFT_RESET					0x8020
++#define		SOFT_RESET_CP					(1 << 0)
++#define		SOFT_RESET_CB					(1 << 1)
++#define		SOFT_RESET_RLC					(1 << 2)
++#define		SOFT_RESET_DB					(1 << 3)
++#define		SOFT_RESET_GDS					(1 << 4)
++#define		SOFT_RESET_PA					(1 << 5)
++#define		SOFT_RESET_SC					(1 << 6)
++#define		SOFT_RESET_BCI					(1 << 7)
++#define		SOFT_RESET_SPI					(1 << 8)
++#define		SOFT_RESET_SX					(1 << 10)
++#define		SOFT_RESET_TC					(1 << 11)
++#define		SOFT_RESET_TA					(1 << 12)
++#define		SOFT_RESET_VGT					(1 << 14)
++#define		SOFT_RESET_IA					(1 << 15)
++
++#define GRBM_GFX_INDEX          			0x802C
++
++#define GRBM_INT_CNTL                                   0x8060
++#       define RDERR_INT_ENABLE                         (1 << 0)
++#       define GUI_IDLE_INT_ENABLE                      (1 << 19)
++
++#define	CP_STRMOUT_CNTL					0x84FC
++#define	SCRATCH_REG0					0x8500
++#define	SCRATCH_REG1					0x8504
++#define	SCRATCH_REG2					0x8508
++#define	SCRATCH_REG3					0x850C
++#define	SCRATCH_REG4					0x8510
++#define	SCRATCH_REG5					0x8514
++#define	SCRATCH_REG6					0x8518
++#define	SCRATCH_REG7					0x851C
++
++#define	SCRATCH_UMSK					0x8540
++#define	SCRATCH_ADDR					0x8544
++
++#define	CP_SEM_WAIT_TIMER				0x85BC
++
++#define	CP_SEM_INCOMPLETE_TIMER_CNTL			0x85C8
++
++#define CP_ME_CNTL					0x86D8
++#define		CP_CE_HALT					(1 << 24)
++#define		CP_PFP_HALT					(1 << 26)
++#define		CP_ME_HALT					(1 << 28)
++
++#define	CP_COHER_CNTL2					0x85E8
++
++#define	CP_RB2_RPTR					0x86f8
++#define	CP_RB1_RPTR					0x86fc
++#define	CP_RB0_RPTR					0x8700
++#define	CP_RB_WPTR_DELAY				0x8704
++
++#define	CP_QUEUE_THRESHOLDS				0x8760
++#define		ROQ_IB1_START(x)				((x) << 0)
++#define		ROQ_IB2_START(x)				((x) << 8)
++#define CP_MEQ_THRESHOLDS				0x8764
++#define		MEQ1_START(x)				((x) << 0)
++#define		MEQ2_START(x)				((x) << 8)
++
++#define	CP_PERFMON_CNTL					0x87FC
++
++#define	VGT_VTX_VECT_EJECT_REG				0x88B0
++
++#define	VGT_CACHE_INVALIDATION				0x88C4
++#define		CACHE_INVALIDATION(x)				((x) << 0)
++#define			VC_ONLY						0
++#define			TC_ONLY						1
++#define			VC_AND_TC					2
++#define		AUTO_INVLD_EN(x)				((x) << 6)
++#define			NO_AUTO						0
++#define			ES_AUTO						1
++#define			GS_AUTO						2
++#define			ES_AND_GS_AUTO					3
++#define	VGT_ESGS_RING_SIZE				0x88C8
++#define	VGT_GSVS_RING_SIZE				0x88CC
++
++#define	VGT_GS_VERTEX_REUSE				0x88D4
++
++#define	VGT_PRIMITIVE_TYPE				0x8958
++#define	VGT_INDEX_TYPE					0x895C
++
++#define	VGT_NUM_INDICES					0x8970
++#define	VGT_NUM_INSTANCES				0x8974
++
++#define	VGT_TF_RING_SIZE				0x8988
++
++#define	VGT_HS_OFFCHIP_PARAM				0x89B0
++
++#define	VGT_TF_MEMORY_BASE				0x89B8
++
++#define CC_GC_SHADER_ARRAY_CONFIG			0x89bc
++#define GC_USER_SHADER_ARRAY_CONFIG			0x89c0
++
++#define	PA_CL_ENHANCE					0x8A14
++#define		CLIP_VTX_REORDER_ENA				(1 << 0)
++#define		NUM_CLIP_SEQ(x)					((x) << 1)
++
++#define	PA_SU_LINE_STIPPLE_VALUE			0x8A60
++
++#define	PA_SC_LINE_STIPPLE_STATE			0x8B10
++
++#define	PA_SC_FORCE_EOV_MAX_CNTS			0x8B24
++#define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
++#define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
++
++#define	PA_SC_FIFO_SIZE					0x8BCC
++#define		SC_FRONTEND_PRIM_FIFO_SIZE(x)			((x) << 0)
++#define		SC_BACKEND_PRIM_FIFO_SIZE(x)			((x) << 6)
++#define		SC_HIZ_TILE_FIFO_SIZE(x)			((x) << 15)
++#define		SC_EARLYZ_TILE_FIFO_SIZE(x)			((x) << 23)
++
++#define	PA_SC_ENHANCE					0x8BF0
++
++#define	SQ_CONFIG					0x8C00
++
++#define	SQC_CACHES					0x8C08
++
++#define	SX_DEBUG_1					0x9060
++
++#define	SPI_STATIC_THREAD_MGMT_1			0x90E0
++#define	SPI_STATIC_THREAD_MGMT_2			0x90E4
++#define	SPI_STATIC_THREAD_MGMT_3			0x90E8
++#define	SPI_PS_MAX_WAVE_ID				0x90EC
++
++#define	SPI_CONFIG_CNTL					0x9100
++
++#define	SPI_CONFIG_CNTL_1				0x913C
++#define		VTX_DONE_DELAY(x)				((x) << 0)
++#define		INTERP_ONE_PRIM_PER_ROW				(1 << 4)
++
++#define	CGTS_TCC_DISABLE				0x9148
++#define	CGTS_USER_TCC_DISABLE				0x914C
++#define		TCC_DISABLE_MASK				0xFFFF0000
++#define		TCC_DISABLE_SHIFT				16
++
++#define	TA_CNTL_AUX					0x9508
++
++#define CC_RB_BACKEND_DISABLE				0x98F4
++#define		BACKEND_DISABLE(x)     			((x) << 16)
++#define GB_ADDR_CONFIG  				0x98F8
++#define		NUM_PIPES(x)				((x) << 0)
++#define		NUM_PIPES_MASK				0x00000007
++#define		NUM_PIPES_SHIFT				0
++#define		PIPE_INTERLEAVE_SIZE(x)			((x) << 4)
++#define		PIPE_INTERLEAVE_SIZE_MASK		0x00000070
++#define		PIPE_INTERLEAVE_SIZE_SHIFT		4
++#define		NUM_SHADER_ENGINES(x)			((x) << 12)
++#define		NUM_SHADER_ENGINES_MASK			0x00003000
++#define		NUM_SHADER_ENGINES_SHIFT		12
++#define		SHADER_ENGINE_TILE_SIZE(x)     		((x) << 16)
++#define		SHADER_ENGINE_TILE_SIZE_MASK		0x00070000
++#define		SHADER_ENGINE_TILE_SIZE_SHIFT		16
++#define		NUM_GPUS(x)     			((x) << 20)
++#define		NUM_GPUS_MASK				0x00700000
++#define		NUM_GPUS_SHIFT				20
++#define		MULTI_GPU_TILE_SIZE(x)     		((x) << 24)
++#define		MULTI_GPU_TILE_SIZE_MASK		0x03000000
++#define		MULTI_GPU_TILE_SIZE_SHIFT		24
++#define		ROW_SIZE(x)             		((x) << 28)
++#define		ROW_SIZE_MASK				0x30000000
++#define		ROW_SIZE_SHIFT				28
++
++#define	GB_TILE_MODE0					0x9910
++#       define MICRO_TILE_MODE(x)				((x) << 0)
++#              define	ADDR_SURF_DISPLAY_MICRO_TILING		0
++#              define	ADDR_SURF_THIN_MICRO_TILING		1
++#              define	ADDR_SURF_DEPTH_MICRO_TILING		2
++#       define ARRAY_MODE(x)					((x) << 2)
++#              define	ARRAY_LINEAR_GENERAL			0
++#              define	ARRAY_LINEAR_ALIGNED			1
++#              define	ARRAY_1D_TILED_THIN1			2
++#              define	ARRAY_2D_TILED_THIN1			4
++#       define PIPE_CONFIG(x)					((x) << 6)
++#              define	ADDR_SURF_P2				0
++#              define	ADDR_SURF_P4_8x16			4
++#              define	ADDR_SURF_P4_16x16			5
++#              define	ADDR_SURF_P4_16x32			6
++#              define	ADDR_SURF_P4_32x32			7
++#              define	ADDR_SURF_P8_16x16_8x16			8
++#              define	ADDR_SURF_P8_16x32_8x16			9
++#              define	ADDR_SURF_P8_32x32_8x16			10
++#              define	ADDR_SURF_P8_16x32_16x16		11
++#              define	ADDR_SURF_P8_32x32_16x16		12
++#              define	ADDR_SURF_P8_32x32_16x32		13
++#              define	ADDR_SURF_P8_32x64_32x32		14
++#       define TILE_SPLIT(x)					((x) << 11)
++#              define	ADDR_SURF_TILE_SPLIT_64B		0
++#              define	ADDR_SURF_TILE_SPLIT_128B		1
++#              define	ADDR_SURF_TILE_SPLIT_256B		2
++#              define	ADDR_SURF_TILE_SPLIT_512B		3
++#              define	ADDR_SURF_TILE_SPLIT_1KB		4
++#              define	ADDR_SURF_TILE_SPLIT_2KB		5
++#              define	ADDR_SURF_TILE_SPLIT_4KB		6
++#       define BANK_WIDTH(x)					((x) << 14)
++#              define	ADDR_SURF_BANK_WIDTH_1			0
++#              define	ADDR_SURF_BANK_WIDTH_2			1
++#              define	ADDR_SURF_BANK_WIDTH_4			2
++#              define	ADDR_SURF_BANK_WIDTH_8			3
++#       define BANK_HEIGHT(x)					((x) << 16)
++#              define	ADDR_SURF_BANK_HEIGHT_1			0
++#              define	ADDR_SURF_BANK_HEIGHT_2			1
++#              define	ADDR_SURF_BANK_HEIGHT_4			2
++#              define	ADDR_SURF_BANK_HEIGHT_8			3
++#       define MACRO_TILE_ASPECT(x)				((x) << 18)
++#              define	ADDR_SURF_MACRO_ASPECT_1		0
++#              define	ADDR_SURF_MACRO_ASPECT_2		1
++#              define	ADDR_SURF_MACRO_ASPECT_4		2
++#              define	ADDR_SURF_MACRO_ASPECT_8		3
++#       define NUM_BANKS(x)					((x) << 20)
++#              define	ADDR_SURF_2_BANK			0
++#              define	ADDR_SURF_4_BANK			1
++#              define	ADDR_SURF_8_BANK			2
++#              define	ADDR_SURF_16_BANK			3
++
++#define	CB_PERFCOUNTER0_SELECT0				0x9a20
++#define	CB_PERFCOUNTER0_SELECT1				0x9a24
++#define	CB_PERFCOUNTER1_SELECT0				0x9a28
++#define	CB_PERFCOUNTER1_SELECT1				0x9a2c
++#define	CB_PERFCOUNTER2_SELECT0				0x9a30
++#define	CB_PERFCOUNTER2_SELECT1				0x9a34
++#define	CB_PERFCOUNTER3_SELECT0				0x9a38
++#define	CB_PERFCOUNTER3_SELECT1				0x9a3c
++
++#define	GC_USER_RB_BACKEND_DISABLE			0x9B7C
++#define		BACKEND_DISABLE_MASK			0x00FF0000
++#define		BACKEND_DISABLE_SHIFT			16
++
++#define	TCP_CHAN_STEER_LO				0xac0c
++#define	TCP_CHAN_STEER_HI				0xac10
++
++#define	CP_RB0_BASE					0xC100
++#define	CP_RB0_CNTL					0xC104
++#define		RB_BUFSZ(x)					((x) << 0)
++#define		RB_BLKSZ(x)					((x) << 8)
++#define		BUF_SWAP_32BIT					(2 << 16)
++#define		RB_NO_UPDATE					(1 << 27)
++#define		RB_RPTR_WR_ENA					(1 << 31)
++
++#define	CP_RB0_RPTR_ADDR				0xC10C
++#define	CP_RB0_RPTR_ADDR_HI				0xC110
++#define	CP_RB0_WPTR					0xC114
++
++#define	CP_PFP_UCODE_ADDR				0xC150
++#define	CP_PFP_UCODE_DATA				0xC154
++#define	CP_ME_RAM_RADDR					0xC158
++#define	CP_ME_RAM_WADDR					0xC15C
++#define	CP_ME_RAM_DATA					0xC160
++
++#define	CP_CE_UCODE_ADDR				0xC168
++#define	CP_CE_UCODE_DATA				0xC16C
++
++#define	CP_RB1_BASE					0xC180
++#define	CP_RB1_CNTL					0xC184
++#define	CP_RB1_RPTR_ADDR				0xC188
++#define	CP_RB1_RPTR_ADDR_HI				0xC18C
++#define	CP_RB1_WPTR					0xC190
++#define	CP_RB2_BASE					0xC194
++#define	CP_RB2_CNTL					0xC198
++#define	CP_RB2_RPTR_ADDR				0xC19C
++#define	CP_RB2_RPTR_ADDR_HI				0xC1A0
++#define	CP_RB2_WPTR					0xC1A4
++#define CP_INT_CNTL_RING0                               0xC1A8
++#define CP_INT_CNTL_RING1                               0xC1AC
++#define CP_INT_CNTL_RING2                               0xC1B0
++#       define CNTX_BUSY_INT_ENABLE                     (1 << 19)
++#       define CNTX_EMPTY_INT_ENABLE                    (1 << 20)
++#       define WAIT_MEM_SEM_INT_ENABLE                  (1 << 21)
++#       define TIME_STAMP_INT_ENABLE                    (1 << 26)
++#       define CP_RINGID2_INT_ENABLE                    (1 << 29)
++#       define CP_RINGID1_INT_ENABLE                    (1 << 30)
++#       define CP_RINGID0_INT_ENABLE                    (1 << 31)
++#define CP_INT_STATUS_RING0                             0xC1B4
++#define CP_INT_STATUS_RING1                             0xC1B8
++#define CP_INT_STATUS_RING2                             0xC1BC
++#       define WAIT_MEM_SEM_INT_STAT                    (1 << 21)
++#       define TIME_STAMP_INT_STAT                      (1 << 26)
++#       define CP_RINGID2_INT_STAT                      (1 << 29)
++#       define CP_RINGID1_INT_STAT                      (1 << 30)
++#       define CP_RINGID0_INT_STAT                      (1 << 31)
++
++#define	CP_DEBUG					0xC1FC
++
++#define RLC_CNTL                                          0xC300
++#       define RLC_ENABLE                                 (1 << 0)
++#define RLC_RL_BASE                                       0xC304
++#define RLC_RL_SIZE                                       0xC308
++#define RLC_LB_CNTL                                       0xC30C
++#define RLC_SAVE_AND_RESTORE_BASE                         0xC310
++#define RLC_LB_CNTR_MAX                                   0xC314
++#define RLC_LB_CNTR_INIT                                  0xC318
++
++#define RLC_CLEAR_STATE_RESTORE_BASE                      0xC320
++
++#define RLC_UCODE_ADDR                                    0xC32C
++#define RLC_UCODE_DATA                                    0xC330
++
++#define RLC_MC_CNTL                                       0xC344
++#define RLC_UCODE_CNTL                                    0xC348
++
++#define VGT_EVENT_INITIATOR                             0x28a90
++#       define SAMPLE_STREAMOUTSTATS1                   (1 << 0)
++#       define SAMPLE_STREAMOUTSTATS2                   (2 << 0)
++#       define SAMPLE_STREAMOUTSTATS3                   (3 << 0)
++#       define CACHE_FLUSH_TS                           (4 << 0)
++#       define CACHE_FLUSH                              (6 << 0)
++#       define CS_PARTIAL_FLUSH                         (7 << 0)
++#       define VGT_STREAMOUT_RESET                      (10 << 0)
++#       define END_OF_PIPE_INCR_DE                      (11 << 0)
++#       define END_OF_PIPE_IB_END                       (12 << 0)
++#       define RST_PIX_CNT                              (13 << 0)
++#       define VS_PARTIAL_FLUSH                         (15 << 0)
++#       define PS_PARTIAL_FLUSH                         (16 << 0)
++#       define CACHE_FLUSH_AND_INV_TS_EVENT             (20 << 0)
++#       define ZPASS_DONE                               (21 << 0)
++#       define CACHE_FLUSH_AND_INV_EVENT                (22 << 0)
++#       define PERFCOUNTER_START                        (23 << 0)
++#       define PERFCOUNTER_STOP                         (24 << 0)
++#       define PIPELINESTAT_START                       (25 << 0)
++#       define PIPELINESTAT_STOP                        (26 << 0)
++#       define PERFCOUNTER_SAMPLE                       (27 << 0)
++#       define SAMPLE_PIPELINESTAT                      (30 << 0)
++#       define SAMPLE_STREAMOUTSTATS                    (32 << 0)
++#       define RESET_VTX_CNT                            (33 << 0)
++#       define VGT_FLUSH                                (36 << 0)
++#       define BOTTOM_OF_PIPE_TS                        (40 << 0)
++#       define DB_CACHE_FLUSH_AND_INV                   (42 << 0)
++#       define FLUSH_AND_INV_DB_DATA_TS                 (43 << 0)
++#       define FLUSH_AND_INV_DB_META                    (44 << 0)
++#       define FLUSH_AND_INV_CB_DATA_TS                 (45 << 0)
++#       define FLUSH_AND_INV_CB_META                    (46 << 0)
++#       define CS_DONE                                  (47 << 0)
++#       define PS_DONE                                  (48 << 0)
++#       define FLUSH_AND_INV_CB_PIXEL_DATA              (49 << 0)
++#       define THREAD_TRACE_START                       (51 << 0)
++#       define THREAD_TRACE_STOP                        (52 << 0)
++#       define THREAD_TRACE_FLUSH                       (54 << 0)
++#       define THREAD_TRACE_FINISH                      (55 << 0)
++
++/*
++ * PM4
++ */
++#define	PACKET_TYPE0	0
++#define	PACKET_TYPE1	1
++#define	PACKET_TYPE2	2
++#define	PACKET_TYPE3	3
++
++#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
++#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
++#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
++#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
++#define PACKET0(reg, n)	((PACKET_TYPE0 << 30) |				\
++			 (((reg) >> 2) & 0xFFFF) |			\
++			 ((n) & 0x3FFF) << 16)
++#define CP_PACKET2			0x80000000
++#define		PACKET2_PAD_SHIFT		0
++#define		PACKET2_PAD_MASK		(0x3fffffff << 0)
++
++#define PACKET2(v)	(CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
++
++#define PACKET3(op, n)	((PACKET_TYPE3 << 30) |				\
++			 (((op) & 0xFF) << 8) |				\
++			 ((n) & 0x3FFF) << 16)
++
++#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
++
++/* Packet 3 types */
++#define	PACKET3_NOP					0x10
++#define	PACKET3_SET_BASE				0x11
++#define		PACKET3_BASE_INDEX(x)                  ((x) << 0)
++#define			GDS_PARTITION_BASE		2
++#define			CE_PARTITION_BASE		3
++#define	PACKET3_CLEAR_STATE				0x12
++#define	PACKET3_INDEX_BUFFER_SIZE			0x13
++#define	PACKET3_DISPATCH_DIRECT				0x15
++#define	PACKET3_DISPATCH_INDIRECT			0x16
++#define	PACKET3_ALLOC_GDS				0x1B
++#define	PACKET3_WRITE_GDS_RAM				0x1C
++#define	PACKET3_ATOMIC_GDS				0x1D
++#define	PACKET3_ATOMIC					0x1E
++#define	PACKET3_OCCLUSION_QUERY				0x1F
++#define	PACKET3_SET_PREDICATION				0x20
++#define	PACKET3_REG_RMW					0x21
++#define	PACKET3_COND_EXEC				0x22
++#define	PACKET3_PRED_EXEC				0x23
++#define	PACKET3_DRAW_INDIRECT				0x24
++#define	PACKET3_DRAW_INDEX_INDIRECT			0x25
++#define	PACKET3_INDEX_BASE				0x26
++#define	PACKET3_DRAW_INDEX_2				0x27
++#define	PACKET3_CONTEXT_CONTROL				0x28
++#define	PACKET3_INDEX_TYPE				0x2A
++#define	PACKET3_DRAW_INDIRECT_MULTI			0x2C
++#define	PACKET3_DRAW_INDEX_AUTO				0x2D
++#define	PACKET3_DRAW_INDEX_IMMD				0x2E
++#define	PACKET3_NUM_INSTANCES				0x2F
++#define	PACKET3_DRAW_INDEX_MULTI_AUTO			0x30
++#define	PACKET3_INDIRECT_BUFFER_CONST			0x31
++#define	PACKET3_INDIRECT_BUFFER				0x32
++#define	PACKET3_STRMOUT_BUFFER_UPDATE			0x34
++#define	PACKET3_DRAW_INDEX_OFFSET_2			0x35
++#define	PACKET3_DRAW_INDEX_MULTI_ELEMENT		0x36
++#define	PACKET3_WRITE_DATA				0x37
++#define	PACKET3_DRAW_INDEX_INDIRECT_MULTI		0x38
++#define	PACKET3_MEM_SEMAPHORE				0x39
++#define	PACKET3_MPEG_INDEX				0x3A
++#define	PACKET3_COPY_DW					0x3B
++#define	PACKET3_WAIT_REG_MEM				0x3C
++#define	PACKET3_MEM_WRITE				0x3D
++#define	PACKET3_COPY_DATA				0x40
++#define	PACKET3_PFP_SYNC_ME				0x42
++#define	PACKET3_SURFACE_SYNC				0x43
++#              define PACKET3_DEST_BASE_0_ENA      (1 << 0)
++#              define PACKET3_DEST_BASE_1_ENA      (1 << 1)
++#              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
++#              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
++#              define PACKET3_CB2_DEST_BASE_ENA    (1 << 8)
++#              define PACKET3_CB3_DEST_BASE_ENA    (1 << 9)
++#              define PACKET3_CB4_DEST_BASE_ENA    (1 << 10)
++#              define PACKET3_CB5_DEST_BASE_ENA    (1 << 11)
++#              define PACKET3_CB6_DEST_BASE_ENA    (1 << 12)
++#              define PACKET3_CB7_DEST_BASE_ENA    (1 << 13)
++#              define PACKET3_DB_DEST_BASE_ENA     (1 << 14)
++#              define PACKET3_DEST_BASE_2_ENA      (1 << 19)
++#              define PACKET3_DEST_BASE_3_ENA      (1 << 21)
++#              define PACKET3_TCL1_ACTION_ENA      (1 << 22)
++#              define PACKET3_TC_ACTION_ENA        (1 << 23)
++#              define PACKET3_CB_ACTION_ENA        (1 << 25)
++#              define PACKET3_DB_ACTION_ENA        (1 << 26)
++#              define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
++#              define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
++#define	PACKET3_ME_INITIALIZE				0x44
++#define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
++#define	PACKET3_COND_WRITE				0x45
++#define	PACKET3_EVENT_WRITE				0x46
++#define		EVENT_TYPE(x)                           ((x) << 0)
++#define		EVENT_INDEX(x)                          ((x) << 8)
++                /* 0 - any non-TS event
++		 * 1 - ZPASS_DONE
++		 * 2 - SAMPLE_PIPELINESTAT
++		 * 3 - SAMPLE_STREAMOUTSTAT*
++		 * 4 - *S_PARTIAL_FLUSH
++		 * 5 - EOP events
++		 * 6 - EOS events
++		 * 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
++		 */
++#define		INV_L2                                  (1 << 20)
++                /* INV TC L2 cache when EVENT_INDEX = 7 */
++#define	PACKET3_EVENT_WRITE_EOP				0x47
++#define		DATA_SEL(x)                             ((x) << 29)
++                /* 0 - discard
++		 * 1 - send low 32bit data
++		 * 2 - send 64bit data
++		 * 3 - send 64bit counter value
++		 */
++#define		INT_SEL(x)                              ((x) << 24)
++                /* 0 - none
++		 * 1 - interrupt only (DATA_SEL = 0)
++		 * 2 - interrupt when data write is confirmed
++		 */
++#define	PACKET3_EVENT_WRITE_EOS				0x48
++#define	PACKET3_PREAMBLE_CNTL				0x4A
++#              define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE     (2 << 28)
++#              define PACKET3_PREAMBLE_END_CLEAR_STATE       (3 << 28)
++#define	PACKET3_ONE_REG_WRITE				0x57
++#define	PACKET3_LOAD_CONFIG_REG				0x5F
++#define	PACKET3_LOAD_CONTEXT_REG			0x60
++#define	PACKET3_LOAD_SH_REG				0x61
++#define	PACKET3_SET_CONFIG_REG				0x68
++#define		PACKET3_SET_CONFIG_REG_START			0x00008000
++#define		PACKET3_SET_CONFIG_REG_END			0x0000b000
++#define	PACKET3_SET_CONTEXT_REG				0x69
++#define		PACKET3_SET_CONTEXT_REG_START			0x00028000
++#define		PACKET3_SET_CONTEXT_REG_END			0x00029000
++#define	PACKET3_SET_CONTEXT_REG_INDIRECT		0x73
++#define	PACKET3_SET_RESOURCE_INDIRECT			0x74
++#define	PACKET3_SET_SH_REG				0x76
++#define		PACKET3_SET_SH_REG_START			0x0000b000
++#define		PACKET3_SET_SH_REG_END				0x0000c000
++#define	PACKET3_SET_SH_REG_OFFSET			0x77
++#define	PACKET3_ME_WRITE				0x7A
++#define	PACKET3_SCRATCH_RAM_WRITE			0x7D
++#define	PACKET3_SCRATCH_RAM_READ			0x7E
++#define	PACKET3_CE_WRITE				0x7F
++#define	PACKET3_LOAD_CONST_RAM				0x80
++#define	PACKET3_WRITE_CONST_RAM				0x81
++#define	PACKET3_WRITE_CONST_RAM_OFFSET			0x82
++#define	PACKET3_DUMP_CONST_RAM				0x83
++#define	PACKET3_INCREMENT_CE_COUNTER			0x84
++#define	PACKET3_INCREMENT_DE_COUNTER			0x85
++#define	PACKET3_WAIT_ON_CE_COUNTER			0x86
++#define	PACKET3_WAIT_ON_DE_COUNTER			0x87
++#define	PACKET3_WAIT_ON_DE_COUNTER_DIFF			0x88
++#define	PACKET3_SET_CE_DE_COUNTERS			0x89
++#define	PACKET3_WAIT_ON_AVAIL_BUFFER			0x8A
++
++#endif
+diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
+index cb1ee4e..2a25888 100644
+--- a/drivers/gpu/drm/savage/savage_bci.c
++++ b/drivers/gpu/drm/savage/savage_bci.c
+@@ -547,6 +547,8 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
+ 
+ 	dev_priv->chipset = (enum savage_family)chipset;
+ 
++	pci_set_master(dev->pdev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
+index 5468d1c..89afe0b 100644
+--- a/drivers/gpu/drm/savage/savage_drv.c
++++ b/drivers/gpu/drm/savage/savage_drv.c
+@@ -35,6 +35,17 @@ static struct pci_device_id pciidlist[] = {
+ 	savage_PCI_IDS
+ };
+ 
++static const struct file_operations savage_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = drm_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++	.llseek = noop_llseek,
++};
++
+ static struct drm_driver driver = {
+ 	.driver_features =
+ 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
+@@ -46,17 +57,7 @@ static struct drm_driver driver = {
+ 	.reclaim_buffers = savage_reclaim_buffers,
+ 	.ioctls = savage_ioctls,
+ 	.dma_ioctl = savage_bci_buffers,
+-	.fops = {
+-		 .owner = THIS_MODULE,
+-		 .open = drm_open,
+-		 .release = drm_release,
+-		 .unlocked_ioctl = drm_ioctl,
+-		 .mmap = drm_mmap,
+-		 .poll = drm_poll,
+-		 .fasync = drm_fasync,
+-		 .llseek = noop_llseek,
+-	},
+-
++	.fops = &savage_driver_fops,
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ 	.date = DRIVER_DATE,
+diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
+index 8a3e315..b6d8608 100644
+--- a/drivers/gpu/drm/savage/savage_state.c
++++ b/drivers/gpu/drm/savage/savage_state.c
+@@ -988,7 +988,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
+ 	 * for locking on FreeBSD.
+ 	 */
+ 	if (cmdbuf->size) {
+-		kcmd_addr = kmalloc(cmdbuf->size * 8, GFP_KERNEL);
++		kcmd_addr = kmalloc_array(cmdbuf->size, 8, GFP_KERNEL);
+ 		if (kcmd_addr == NULL)
+ 			return -ENOMEM;
+ 
+@@ -1015,8 +1015,8 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
+ 		cmdbuf->vb_addr = kvb_addr;
+ 	}
+ 	if (cmdbuf->nbox) {
+-		kbox_addr = kmalloc(cmdbuf->nbox * sizeof(struct drm_clip_rect),
+-				    GFP_KERNEL);
++		kbox_addr = kmalloc_array(cmdbuf->nbox, sizeof(struct drm_clip_rect),
++					  GFP_KERNEL);
+ 		if (kbox_addr == NULL) {
+ 			ret = -ENOMEM;
+ 			goto done;
+@@ -1057,7 +1057,8 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
+ 				DRM_ERROR("indexed drawing command extends "
+ 					  "beyond end of command buffer\n");
+ 				DMA_FLUSH();
+-				return -EINVAL;
++				ret = -EINVAL;
++				goto done;
+ 			}
+ 			/* fall through */
+ 		case SAVAGE_CMD_DMA_PRIM:
+@@ -1076,7 +1077,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
+ 				      cmdbuf->vb_stride,
+ 				      cmdbuf->nbox, cmdbuf->box_addr);
+ 				if (ret != 0)
+-					return ret;
++					goto done;
+ 				first_draw_cmd = NULL;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
+index a9c5716..dd14cd1 100644
+--- a/drivers/gpu/drm/sis/sis_drv.c
++++ b/drivers/gpu/drm/sis/sis_drv.c
+@@ -40,51 +40,78 @@ static struct pci_device_id pciidlist[] = {
+ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
+ {
+ 	drm_sis_private_t *dev_priv;
+-	int ret;
++
++	pci_set_master(dev->pdev);
+ 
+ 	dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
+ 	if (dev_priv == NULL)
+ 		return -ENOMEM;
+ 
++	idr_init(&dev_priv->object_idr);
+ 	dev->dev_private = (void *)dev_priv;
+ 	dev_priv->chipset = chipset;
+-	ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
+-	if (ret)
+-		kfree(dev_priv);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int sis_driver_unload(struct drm_device *dev)
+ {
+ 	drm_sis_private_t *dev_priv = dev->dev_private;
+ 
+-	drm_sman_takedown(&dev_priv->sman);
++	idr_remove_all(&dev_priv->object_idr);
++	idr_destroy(&dev_priv->object_idr);
++
+ 	kfree(dev_priv);
+ 
+ 	return 0;
+ }
+ 
++static const struct file_operations sis_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = drm_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++	.llseek = noop_llseek,
++};
++
++static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
++{
++	struct sis_file_private *file_priv;
++
++	DRM_DEBUG_DRIVER("\n");
++	file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
++	if (!file_priv)
++		return -ENOMEM;
++
++	file->driver_priv = file_priv;
++
++	INIT_LIST_HEAD(&file_priv->obj_list);
++
++	return 0;
++}
++
++void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
++{
++	struct sis_file_private *file_priv = file->driver_priv;
++
++	kfree(file_priv);
++}
++
+ static struct drm_driver driver = {
+ 	.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
+ 	.load = sis_driver_load,
+ 	.unload = sis_driver_unload,
++	.open = sis_driver_open,
++	.postclose = sis_driver_postclose,
+ 	.dma_quiescent = sis_idle,
+ 	.reclaim_buffers = NULL,
+ 	.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
+ 	.lastclose = sis_lastclose,
+ 	.ioctls = sis_ioctls,
+-	.fops = {
+-		 .owner = THIS_MODULE,
+-		 .open = drm_open,
+-		 .release = drm_release,
+-		 .unlocked_ioctl = drm_ioctl,
+-		 .mmap = drm_mmap,
+-		 .poll = drm_poll,
+-		 .fasync = drm_fasync,
+-		 .llseek = noop_llseek,
+-	},
+-
++	.fops = &sis_driver_fops,
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ 	.date = DRIVER_DATE,
+diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
+index 194303c..573758b 100644
+--- a/drivers/gpu/drm/sis/sis_drv.h
++++ b/drivers/gpu/drm/sis/sis_drv.h
+@@ -44,7 +44,7 @@ enum sis_family {
+ 	SIS_CHIP_315 = 1,
+ };
+ 
+-#include "drm_sman.h"
++#include "drm_mm.h"
+ 
+ 
+ #define SIS_BASE (dev_priv->mmio)
+@@ -54,12 +54,15 @@ enum sis_family {
+ typedef struct drm_sis_private {
+ 	drm_local_map_t *mmio;
+ 	unsigned int idle_fault;
+-	struct drm_sman sman;
+ 	unsigned int chipset;
+ 	int vram_initialized;
+ 	int agp_initialized;
+ 	unsigned long vram_offset;
+ 	unsigned long agp_offset;
++	struct drm_mm vram_mm;
++	struct drm_mm agp_mm;
++	/** Mapping of userspace keys to mm objects */
++	struct idr object_idr;
+ } drm_sis_private_t;
+ 
+ extern int sis_idle(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
+index 7fe2b63..dd4a316 100644
+--- a/drivers/gpu/drm/sis/sis_mm.c
++++ b/drivers/gpu/drm/sis/sis_mm.c
+@@ -41,40 +41,18 @@
+ #define AGP_TYPE 1
+ 
+ 
++struct sis_memblock {
++	struct drm_mm_node mm_node;
++	struct sis_memreq req;
++	struct list_head owner_list;
++};
++
+ #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ /* fb management via fb device */
+ 
+ #define SIS_MM_ALIGN_SHIFT 0
+ #define SIS_MM_ALIGN_MASK 0
+ 
+-static void *sis_sman_mm_allocate(void *private, unsigned long size,
+-				  unsigned alignment)
+-{
+-	struct sis_memreq req;
+-
+-	req.size = size;
+-	sis_malloc(&req);
+-	if (req.size == 0)
+-		return NULL;
+-	else
+-		return (void *)(unsigned long)~req.offset;
+-}
+-
+-static void sis_sman_mm_free(void *private, void *ref)
+-{
+-	sis_free(~((unsigned long)ref));
+-}
+-
+-static void sis_sman_mm_destroy(void *private)
+-{
+-	;
+-}
+-
+-static unsigned long sis_sman_mm_offset(void *private, void *ref)
+-{
+-	return ~((unsigned long)ref);
+-}
+-
+ #else /* CONFIG_FB_SIS[_MODULE] */
+ 
+ #define SIS_MM_ALIGN_SHIFT 4
+@@ -86,30 +64,11 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
+ {
+ 	drm_sis_private_t *dev_priv = dev->dev_private;
+ 	drm_sis_fb_t *fb = data;
+-	int ret;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+-	{
+-		struct drm_sman_mm sman_mm;
+-		sman_mm.private = (void *)0xFFFFFFFF;
+-		sman_mm.allocate = sis_sman_mm_allocate;
+-		sman_mm.free = sis_sman_mm_free;
+-		sman_mm.destroy = sis_sman_mm_destroy;
+-		sman_mm.offset = sis_sman_mm_offset;
+-		ret =
+-		    drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
+-	}
+-#else
+-	ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
+-				 fb->size >> SIS_MM_ALIGN_SHIFT);
+-#endif
+-
+-	if (ret) {
+-		DRM_ERROR("VRAM memory manager initialisation error\n");
+-		mutex_unlock(&dev->struct_mutex);
+-		return ret;
+-	}
++	/* Unconditionally init the drm_mm, even though we don't use it when the
++	 * fb sis driver is available - make cleanup easier. */
++	drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
+ 
+ 	dev_priv->vram_initialized = 1;
+ 	dev_priv->vram_offset = fb->offset;
+@@ -120,13 +79,15 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
+ 	return 0;
+ }
+ 
+-static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
++static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
+ 			 void *data, int pool)
+ {
+ 	drm_sis_private_t *dev_priv = dev->dev_private;
+ 	drm_sis_mem_t *mem = data;
+-	int retval = 0;
+-	struct drm_memblock_item *item;
++	int retval = 0, user_key;
++	struct sis_memblock *item;
++	struct sis_file_private *file_priv = file->driver_priv;
++	unsigned long offset;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+ 
+@@ -138,25 +99,68 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
+ 		return -EINVAL;
+ 	}
+ 
+-	mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
+-	item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
+-			      (unsigned long)file_priv);
++	item = kzalloc(sizeof(*item), GFP_KERNEL);
++	if (!item) {
++		retval = -ENOMEM;
++		goto fail_alloc;
++	}
+ 
+-	mutex_unlock(&dev->struct_mutex);
+-	if (item) {
+-		mem->offset = ((pool == 0) ?
+-			      dev_priv->vram_offset : dev_priv->agp_offset) +
+-		    (item->mm->
+-		     offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
+-		mem->free = item->user_hash.key;
+-		mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
++	mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
++	if (pool == AGP_TYPE) {
++		retval = drm_mm_insert_node(&dev_priv->agp_mm,
++					    &item->mm_node,
++					    mem->size, 0);
++		offset = item->mm_node.start;
+ 	} else {
+-		mem->offset = 0;
+-		mem->size = 0;
+-		mem->free = 0;
++#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
++		item->req.size = mem->size;
++		sis_malloc(&item->req);
++		if (item->req.size == 0)
++			retval = -ENOMEM;
++		offset = item->req.offset;
++#else
++		retval = drm_mm_insert_node(&dev_priv->vram_mm,
++					    &item->mm_node,
++					    mem->size, 0);
++		offset = item->mm_node.start;
++#endif
++	}
++	if (retval)
++		goto fail_alloc;
++
++again:
++	if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
+ 		retval = -ENOMEM;
++		goto fail_idr;
+ 	}
+ 
++	retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
++	if (retval == -EAGAIN)
++		goto again;
++	if (retval)
++		goto fail_idr;
++
++	list_add(&item->owner_list, &file_priv->obj_list);
++	mutex_unlock(&dev->struct_mutex);
++
++	mem->offset = ((pool == 0) ?
++		      dev_priv->vram_offset : dev_priv->agp_offset) +
++	    (offset << SIS_MM_ALIGN_SHIFT);
++	mem->free = user_key;
++	mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
++
++	return 0;
++
++fail_idr:
++	drm_mm_remove_node(&item->mm_node);
++fail_alloc:
++	kfree(item);
++	mutex_unlock(&dev->struct_mutex);
++
++	mem->offset = 0;
++	mem->size = 0;
++	mem->free = 0;
++
+ 	DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
+ 		  mem->offset);
+ 
+@@ -167,14 +171,28 @@ static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *fil
+ {
+ 	drm_sis_private_t *dev_priv = dev->dev_private;
+ 	drm_sis_mem_t *mem = data;
+-	int ret;
++	struct sis_memblock *obj;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	ret = drm_sman_free_key(&dev_priv->sman, mem->free);
++	obj = idr_find(&dev_priv->object_idr, mem->free);
++	if (obj == NULL) {
++		mutex_unlock(&dev->struct_mutex);
++		return -EINVAL;
++	}
++
++	idr_remove(&dev_priv->object_idr, mem->free);
++	list_del(&obj->owner_list);
++	if (drm_mm_node_allocated(&obj->mm_node))
++		drm_mm_remove_node(&obj->mm_node);
++#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
++	else
++		sis_free(obj->req.offset);
++#endif
++	kfree(obj);
+ 	mutex_unlock(&dev->struct_mutex);
+ 	DRM_DEBUG("free = 0x%lx\n", mem->free);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int sis_fb_alloc(struct drm_device *dev, void *data,
+@@ -188,18 +206,10 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
+ {
+ 	drm_sis_private_t *dev_priv = dev->dev_private;
+ 	drm_sis_agp_t *agp = data;
+-	int ret;
+ 	dev_priv = dev->dev_private;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
+-				 agp->size >> SIS_MM_ALIGN_SHIFT);
+-
+-	if (ret) {
+-		DRM_ERROR("AGP memory manager initialisation error\n");
+-		mutex_unlock(&dev->struct_mutex);
+-		return ret;
+-	}
++	drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
+ 
+ 	dev_priv->agp_initialized = 1;
+ 	dev_priv->agp_offset = agp->offset;
+@@ -293,20 +303,26 @@ void sis_lastclose(struct drm_device *dev)
+ 		return;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	drm_sman_cleanup(&dev_priv->sman);
+-	dev_priv->vram_initialized = 0;
+-	dev_priv->agp_initialized = 0;
++	if (dev_priv->vram_initialized) {
++		drm_mm_takedown(&dev_priv->vram_mm);
++		dev_priv->vram_initialized = 0;
++	}
++	if (dev_priv->agp_initialized) {
++		drm_mm_takedown(&dev_priv->agp_mm);
++		dev_priv->agp_initialized = 0;
++	}
+ 	dev_priv->mmio = NULL;
+ 	mutex_unlock(&dev->struct_mutex);
+ }
+ 
+ void sis_reclaim_buffers_locked(struct drm_device *dev,
+-				struct drm_file *file_priv)
++				struct drm_file *file)
+ {
+-	drm_sis_private_t *dev_priv = dev->dev_private;
++	struct sis_file_private *file_priv = file->driver_priv;
++	struct sis_memblock *entry, *next;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
++	if (list_empty(&file_priv->obj_list)) {
+ 		mutex_unlock(&dev->struct_mutex);
+ 		return;
+ 	}
+@@ -314,7 +330,18 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
+ 	if (dev->driver->dma_quiescent)
+ 		dev->driver->dma_quiescent(dev);
+ 
+-	drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
++
++	list_for_each_entry_safe(entry, next, &file_priv->obj_list,
++				 owner_list) {
++		list_del(&entry->owner_list);
++		if (drm_mm_node_allocated(&entry->mm_node))
++			drm_mm_remove_node(&entry->mm_node);
++#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
++		else
++			sis_free(entry->req.offset);
++#endif
++		kfree(entry);
++	}
+ 	mutex_unlock(&dev->struct_mutex);
+ 	return;
+ }
+diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
+index cda2991..1613c78 100644
+--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
++++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
+@@ -41,20 +41,21 @@ static struct pci_device_id pciidlist[] = {
+ 	tdfx_PCI_IDS
+ };
+ 
++static const struct file_operations tdfx_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = drm_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++	.llseek = noop_llseek,
++};
++
+ static struct drm_driver driver = {
+ 	.driver_features = DRIVER_USE_MTRR,
+ 	.reclaim_buffers = drm_core_reclaim_buffers,
+-	.fops = {
+-		 .owner = THIS_MODULE,
+-		 .open = drm_open,
+-		 .release = drm_release,
+-		 .unlocked_ioctl = drm_ioctl,
+-		 .mmap = drm_mmap,
+-		 .poll = drm_poll,
+-		 .fasync = drm_fasync,
+-		 .llseek = noop_llseek,
+-	},
+-
++	.fops = &tdfx_driver_fops,
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ 	.date = DRIVER_DATE,
+diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
+index f3cf6f0..b2b33dd 100644
+--- a/drivers/gpu/drm/ttm/Makefile
++++ b/drivers/gpu/drm/ttm/Makefile
+@@ -7,4 +7,8 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
+ 	ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
+ 	ttm_bo_manager.o
+ 
++ifeq ($(CONFIG_SWIOTLB),y)
++ttm-y += ttm_page_alloc_dma.o
++endif
++
+ obj-$(CONFIG_DRM_TTM) += ttm.o
+diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
+index 1c4a72f..4a87282 100644
+--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
++++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
+@@ -29,8 +29,11 @@
+  *          Keith Packard.
+  */
+ 
++#define pr_fmt(fmt) "[TTM] " fmt
++
+ #include "ttm/ttm_module.h"
+ #include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_page_alloc.h"
+ #ifdef TTM_HAS_AGP
+ #include "ttm/ttm_placement.h"
+ #include <linux/agp_backend.h>
+@@ -40,100 +43,77 @@
+ #include <asm/agp.h>
+ 
+ struct ttm_agp_backend {
+-	struct ttm_backend backend;
++	struct ttm_tt ttm;
+ 	struct agp_memory *mem;
+ 	struct agp_bridge_data *bridge;
+ };
+ 
+-static int ttm_agp_populate(struct ttm_backend *backend,
+-			    unsigned long num_pages, struct page **pages,
+-			    struct page *dummy_read_page,
+-			    dma_addr_t *dma_addrs)
++static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+ {
+-	struct ttm_agp_backend *agp_be =
+-	    container_of(backend, struct ttm_agp_backend, backend);
+-	struct page **cur_page, **last_page = pages + num_pages;
++	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
++	struct drm_mm_node *node = bo_mem->mm_node;
+ 	struct agp_memory *mem;
++	int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
++	unsigned i;
+ 
+-	mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
++	mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
+ 	if (unlikely(mem == NULL))
+ 		return -ENOMEM;
+ 
+ 	mem->page_count = 0;
+-	for (cur_page = pages; cur_page < last_page; ++cur_page) {
+-		struct page *page = *cur_page;
++	for (i = 0; i < ttm->num_pages; i++) {
++		struct page *page = ttm->pages[i];
++
+ 		if (!page)
+-			page = dummy_read_page;
++			page = ttm->dummy_read_page;
+ 
+ 		mem->pages[mem->page_count++] = page;
+ 	}
+ 	agp_be->mem = mem;
+-	return 0;
+-}
+-
+-static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
+-{
+-	struct ttm_agp_backend *agp_be =
+-	    container_of(backend, struct ttm_agp_backend, backend);
+-	struct drm_mm_node *node = bo_mem->mm_node;
+-	struct agp_memory *mem = agp_be->mem;
+-	int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+-	int ret;
+ 
+ 	mem->is_flushed = 1;
+ 	mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
+ 
+ 	ret = agp_bind_memory(mem, node->start);
+ 	if (ret)
+-		printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
++		pr_err("AGP Bind memory failed\n");
+ 
+ 	return ret;
+ }
+ 
+-static int ttm_agp_unbind(struct ttm_backend *backend)
++static int ttm_agp_unbind(struct ttm_tt *ttm)
+ {
+-	struct ttm_agp_backend *agp_be =
+-	    container_of(backend, struct ttm_agp_backend, backend);
+-
+-	if (agp_be->mem->is_bound)
+-		return agp_unbind_memory(agp_be->mem);
+-	else
+-		return 0;
+-}
++	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+ 
+-static void ttm_agp_clear(struct ttm_backend *backend)
+-{
+-	struct ttm_agp_backend *agp_be =
+-	    container_of(backend, struct ttm_agp_backend, backend);
+-	struct agp_memory *mem = agp_be->mem;
+-
+-	if (mem) {
+-		ttm_agp_unbind(backend);
+-		agp_free_memory(mem);
++	if (agp_be->mem) {
++		if (agp_be->mem->is_bound)
++			return agp_unbind_memory(agp_be->mem);
++		agp_free_memory(agp_be->mem);
++		agp_be->mem = NULL;
+ 	}
+-	agp_be->mem = NULL;
++	return 0;
+ }
+ 
+-static void ttm_agp_destroy(struct ttm_backend *backend)
++static void ttm_agp_destroy(struct ttm_tt *ttm)
+ {
+-	struct ttm_agp_backend *agp_be =
+-	    container_of(backend, struct ttm_agp_backend, backend);
++	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+ 
+ 	if (agp_be->mem)
+-		ttm_agp_clear(backend);
++		ttm_agp_unbind(ttm);
++	ttm_tt_fini(ttm);
+ 	kfree(agp_be);
+ }
+ 
+ static struct ttm_backend_func ttm_agp_func = {
+-	.populate = ttm_agp_populate,
+-	.clear = ttm_agp_clear,
+ 	.bind = ttm_agp_bind,
+ 	.unbind = ttm_agp_unbind,
+ 	.destroy = ttm_agp_destroy,
+ };
+ 
+-struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
+-					 struct agp_bridge_data *bridge)
++struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
++				 struct agp_bridge_data *bridge,
++				 unsigned long size, uint32_t page_flags,
++				 struct page *dummy_read_page)
+ {
+ 	struct ttm_agp_backend *agp_be;
+ 
+@@ -143,10 +123,29 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
+ 
+ 	agp_be->mem = NULL;
+ 	agp_be->bridge = bridge;
+-	agp_be->backend.func = &ttm_agp_func;
+-	agp_be->backend.bdev = bdev;
+-	return &agp_be->backend;
++	agp_be->ttm.func = &ttm_agp_func;
++
++	if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
++		return NULL;
++	}
++
++	return &agp_be->ttm;
++}
++EXPORT_SYMBOL(ttm_agp_tt_create);
++
++int ttm_agp_tt_populate(struct ttm_tt *ttm)
++{
++	if (ttm->state != tt_unpopulated)
++		return 0;
++
++	return ttm_pool_populate(ttm);
++}
++EXPORT_SYMBOL(ttm_agp_tt_populate);
++
++void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
++{
++	ttm_pool_unpopulate(ttm);
+ }
+-EXPORT_SYMBOL(ttm_agp_backend_init);
++EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 0d27bff..8b73ae8 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -28,6 +28,8 @@
+  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+  */
+ 
++#define pr_fmt(fmt) "[TTM] " fmt
++
+ #include "ttm/ttm_module.h"
+ #include "ttm/ttm_bo_driver.h"
+ #include "ttm/ttm_placement.h"
+@@ -68,15 +70,13 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+ {
+ 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ 
+-	printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
+-	printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
+-	printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
+-	printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
+-	printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
+-	printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
+-		man->available_caching);
+-	printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
+-		man->default_caching);
++	pr_err("    has_type: %d\n", man->has_type);
++	pr_err("    use_type: %d\n", man->use_type);
++	pr_err("    flags: 0x%08X\n", man->flags);
++	pr_err("    gpu_offset: 0x%08lX\n", man->gpu_offset);
++	pr_err("    size: %llu\n", man->size);
++	pr_err("    available_caching: 0x%08X\n", man->available_caching);
++	pr_err("    default_caching: 0x%08X\n", man->default_caching);
+ 	if (mem_type != TTM_PL_SYSTEM)
+ 		(*man->func->debug)(man, TTM_PFX);
+ }
+@@ -86,16 +86,16 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+ {
+ 	int i, ret, mem_type;
+ 
+-	printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
+-		bo, bo->mem.num_pages, bo->mem.size >> 10,
+-		bo->mem.size >> 20);
++	pr_err("No space for %p (%lu pages, %luK, %luM)\n",
++	       bo, bo->mem.num_pages, bo->mem.size >> 10,
++	       bo->mem.size >> 20);
+ 	for (i = 0; i < placement->num_placement; i++) {
+ 		ret = ttm_mem_type_from_flags(placement->placement[i],
+ 						&mem_type);
+ 		if (ret)
+ 			return;
+-		printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
+-			i, placement->placement[i], mem_type);
++		pr_err("  placement[%d]=0x%08X (%d)\n",
++		       i, placement->placement[i], mem_type);
+ 		ttm_mem_type_debug(bo->bdev, mem_type);
+ 	}
+ }
+@@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
+ 	struct ttm_buffer_object *bo =
+ 	    container_of(list_kref, struct ttm_buffer_object, list_kref);
+ 	struct ttm_bo_device *bdev = bo->bdev;
++	size_t acc_size = bo->acc_size;
+ 
+ 	BUG_ON(atomic_read(&bo->list_kref.refcount));
+ 	BUG_ON(atomic_read(&bo->kref.refcount));
+@@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
+ 	if (bo->destroy)
+ 		bo->destroy(bo);
+ 	else {
+-		ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
+ 		kfree(bo);
+ 	}
++	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+ }
+ 
+ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+@@ -337,29 +338,13 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+ 		if (zero_alloc)
+ 			page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ 	case ttm_bo_type_kernel:
+-		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+-					page_flags, glob->dummy_read_page);
++		bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
++						      page_flags, glob->dummy_read_page);
+ 		if (unlikely(bo->ttm == NULL))
+ 			ret = -ENOMEM;
+ 		break;
+-	case ttm_bo_type_user:
+-		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+-					page_flags | TTM_PAGE_FLAG_USER,
+-					glob->dummy_read_page);
+-		if (unlikely(bo->ttm == NULL)) {
+-			ret = -ENOMEM;
+-			break;
+-		}
+-
+-		ret = ttm_tt_set_user(bo->ttm, current,
+-				      bo->buffer_start, bo->num_pages);
+-		if (unlikely(ret != 0)) {
+-			ttm_tt_destroy(bo->ttm);
+-			bo->ttm = NULL;
+-		}
+-		break;
+ 	default:
+-		printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
++		pr_err("Illegal buffer object type\n");
+ 		ret = -EINVAL;
+ 		break;
+ 	}
+@@ -431,14 +416,23 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+ 	else
+ 		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
+ 
+-	if (ret)
++	if (ret) {
++		if (bdev->driver->move_notify) {
++			struct ttm_mem_reg tmp_mem = *mem;
++			*mem = bo->mem;
++			bo->mem = tmp_mem;
++			bdev->driver->move_notify(bo, mem);
++			bo->mem = *mem;
++		}
++
+ 		goto out_err;
++	}
+ 
+ moved:
+ 	if (bo->evicted) {
+ 		ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ 		if (ret)
+-			printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
++			pr_err("Can not flush read caches\n");
+ 		bo->evicted = false;
+ 	}
+ 
+@@ -472,6 +466,9 @@ out_err:
+ 
+ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
+ {
++	if (bo->bdev->driver->move_notify)
++		bo->bdev->driver->move_notify(bo, NULL);
++
+ 	if (bo->ttm) {
+ 		ttm_tt_unbind(bo->ttm);
+ 		ttm_tt_destroy(bo->ttm);
+@@ -737,9 +734,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ 
+ 	if (unlikely(ret != 0)) {
+ 		if (ret != -ERESTARTSYS) {
+-			printk(KERN_ERR TTM_PFX
+-			       "Failed to expire sync object before "
+-			       "buffer eviction.\n");
++			pr_err("Failed to expire sync object before buffer eviction\n");
+ 		}
+ 		goto out;
+ 	}
+@@ -760,9 +755,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ 				no_wait_reserve, no_wait_gpu);
+ 	if (ret) {
+ 		if (ret != -ERESTARTSYS) {
+-			printk(KERN_ERR TTM_PFX
+-			       "Failed to find memory space for "
+-			       "buffer 0x%p eviction.\n", bo);
++			pr_err("Failed to find memory space for buffer 0x%p eviction\n",
++			       bo);
+ 			ttm_bo_mem_space_debug(bo, &placement);
+ 		}
+ 		goto out;
+@@ -772,7 +766,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+ 				     no_wait_reserve, no_wait_gpu);
+ 	if (ret) {
+ 		if (ret != -ERESTARTSYS)
+-			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
++			pr_err("Buffer eviction failed\n");
+ 		ttm_bo_mem_put(bo, &evict_mem);
+ 		goto out;
+ 	}
+@@ -913,16 +907,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
+ }
+ 
+ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+-				 bool disallow_fixed,
+ 				 uint32_t mem_type,
+ 				 uint32_t proposed_placement,
+ 				 uint32_t *masked_placement)
+ {
+ 	uint32_t cur_flags = ttm_bo_type_flags(mem_type);
+ 
+-	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
+-		return false;
+-
+ 	if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
+ 		return false;
+ 
+@@ -967,7 +957,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ 		man = &bdev->man[mem_type];
+ 
+ 		type_ok = ttm_bo_mt_compatible(man,
+-						bo->type == ttm_bo_type_user,
+ 						mem_type,
+ 						placement->placement[i],
+ 						&cur_flags);
+@@ -1015,7 +1004,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ 		if (!man->has_type)
+ 			continue;
+ 		if (!ttm_bo_mt_compatible(man,
+-						bo->type == ttm_bo_type_user,
+ 						mem_type,
+ 						placement->busy_placement[i],
+ 						&cur_flags))
+@@ -1185,15 +1173,27 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
+ {
+ 	int ret = 0;
+ 	unsigned long num_pages;
++	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
++
++	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
++	if (ret) {
++		pr_err("Out of kernel memory\n");
++		if (destroy)
++			(*destroy)(bo);
++		else
++			kfree(bo);
++		return -ENOMEM;
++	}
+ 
+ 	size += buffer_start & ~PAGE_MASK;
+ 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ 	if (num_pages == 0) {
+-		printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
++		pr_err("Illegal buffer object size\n");
+ 		if (destroy)
+ 			(*destroy)(bo);
+ 		else
+ 			kfree(bo);
++		ttm_mem_global_free(mem_glob, acc_size);
+ 		return -EINVAL;
+ 	}
+ 	bo->destroy = destroy;
+@@ -1255,14 +1255,34 @@ out_err:
+ }
+ EXPORT_SYMBOL(ttm_bo_init);
+ 
+-static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
+-				 unsigned long num_pages)
++size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
++		       unsigned long bo_size,
++		       unsigned struct_size)
+ {
+-	size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
+-	    PAGE_MASK;
++	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
++	size_t size = 0;
+ 
+-	return glob->ttm_bo_size + 2 * page_array_size;
++	size += ttm_round_pot(struct_size);
++	size += PAGE_ALIGN(npages * sizeof(void *));
++	size += ttm_round_pot(sizeof(struct ttm_tt));
++	return size;
+ }
++EXPORT_SYMBOL(ttm_bo_acc_size);
++
++size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
++			   unsigned long bo_size,
++			   unsigned struct_size)
++{
++	unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
++	size_t size = 0;
++
++	size += ttm_round_pot(struct_size);
++	size += PAGE_ALIGN(npages * sizeof(void *));
++	size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
++	size += ttm_round_pot(sizeof(struct ttm_dma_tt));
++	return size;
++}
++EXPORT_SYMBOL(ttm_bo_dma_acc_size);
+ 
+ int ttm_bo_create(struct ttm_bo_device *bdev,
+ 			unsigned long size,
+@@ -1275,22 +1295,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
+ 			struct ttm_buffer_object **p_bo)
+ {
+ 	struct ttm_buffer_object *bo;
+-	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
++	size_t acc_size;
+ 	int ret;
+ 
+-	size_t acc_size =
+-	    ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+-	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+-	if (unlikely(ret != 0))
+-		return ret;
+-
+ 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+-
+-	if (unlikely(bo == NULL)) {
+-		ttm_mem_global_free(mem_glob, acc_size);
++	if (unlikely(bo == NULL))
+ 		return -ENOMEM;
+-	}
+ 
++	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
+ 	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
+ 				buffer_start, interruptible,
+ 				persistent_swap_storage, acc_size, NULL);
+@@ -1320,8 +1332,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
+ 			if (allow_errors) {
+ 				return ret;
+ 			} else {
+-				printk(KERN_ERR TTM_PFX
+-					"Cleanup eviction failed\n");
++				pr_err("Cleanup eviction failed\n");
+ 			}
+ 		}
+ 		spin_lock(&glob->lru_lock);
+@@ -1336,14 +1347,14 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+ 	int ret = -EINVAL;
+ 
+ 	if (mem_type >= TTM_NUM_MEM_TYPES) {
+-		printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
++		pr_err("Illegal memory type %d\n", mem_type);
+ 		return ret;
+ 	}
+ 	man = &bdev->man[mem_type];
+ 
+ 	if (!man->has_type) {
+-		printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
+-		       "memory manager type %u\n", mem_type);
++		pr_err("Trying to take down uninitialized memory manager type %u\n",
++		       mem_type);
+ 		return ret;
+ 	}
+ 
+@@ -1366,16 +1377,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+ 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ 
+ 	if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
+-		printk(KERN_ERR TTM_PFX
+-		       "Illegal memory manager memory type %u.\n",
+-		       mem_type);
++		pr_err("Illegal memory manager memory type %u\n", mem_type);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (!man->has_type) {
+-		printk(KERN_ERR TTM_PFX
+-		       "Memory type %u has not been initialized.\n",
+-		       mem_type);
++		pr_err("Memory type %u has not been initialized\n", mem_type);
+ 		return 0;
+ 	}
+ 
+@@ -1460,18 +1467,10 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
+ 	ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
+ 	ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
+ 	if (unlikely(ret != 0)) {
+-		printk(KERN_ERR TTM_PFX
+-		       "Could not register buffer object swapout.\n");
++		pr_err("Could not register buffer object swapout\n");
+ 		goto out_no_shrink;
+ 	}
+ 
+-	glob->ttm_bo_extra_size =
+-		ttm_round_pot(sizeof(struct ttm_tt)) +
+-		ttm_round_pot(sizeof(struct ttm_backend));
+-
+-	glob->ttm_bo_size = glob->ttm_bo_extra_size +
+-		ttm_round_pot(sizeof(struct ttm_buffer_object));
+-
+ 	atomic_set(&glob->bo_count, 0);
+ 
+ 	ret = kobject_init_and_add(
+@@ -1501,9 +1500,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
+ 			man->use_type = false;
+ 			if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
+ 				ret = -EBUSY;
+-				printk(KERN_ERR TTM_PFX
+-				       "DRM memory manager type %d "
+-				       "is not clean.\n", i);
++				pr_err("DRM memory manager type %d is not clean\n",
++				       i);
+ 			}
+ 			man->has_type = false;
+ 		}
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
+index 082fcae..f8187ea 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
+@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
+ 				unsigned long page,
+ 				pgprot_t prot)
+ {
+-	struct page *d = ttm_tt_get_page(ttm, page);
++	struct page *d = ttm->pages[page];
+ 	void *dst;
+ 
+ 	if (!d)
+@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
+ 				unsigned long page,
+ 				pgprot_t prot)
+ {
+-	struct page *s = ttm_tt_get_page(ttm, page);
++	struct page *s = ttm->pages[page];
+ 	void *src;
+ 
+ 	if (!s)
+@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ 	if (old_iomap == NULL && ttm == NULL)
+ 		goto out2;
+ 
++	if (ttm->state == tt_unpopulated) {
++		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
++		if (ret)
++			goto out1;
++	}
++
+ 	add = 0;
+ 	dir = 1;
+ 
+@@ -439,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
+ 	kref_init(&fbo->list_kref);
+ 	kref_init(&fbo->kref);
+ 	fbo->destroy = &ttm_transfered_destroy;
++	fbo->acc_size = 0;
+ 
+ 	*new_obj = fbo;
+ 	return 0;
+@@ -502,10 +509,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+ {
+ 	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
+ 	struct ttm_tt *ttm = bo->ttm;
+-	struct page *d;
+-	int i;
++	int ret;
+ 
+ 	BUG_ON(!ttm);
++
++	if (ttm->state == tt_unpopulated) {
++		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
++		if (ret)
++			return ret;
++	}
++
+ 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ 		/*
+ 		 * We're mapping a single page, and the desired
+@@ -513,18 +526,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+ 		 */
+ 
+ 		map->bo_kmap_type = ttm_bo_map_kmap;
+-		map->page = ttm_tt_get_page(ttm, start_page);
++		map->page = ttm->pages[start_page];
+ 		map->virtual = kmap(map->page);
+ 	} else {
+-	    /*
+-	     * Populate the part we're mapping;
+-	     */
+-		for (i = start_page; i < start_page + num_pages; ++i) {
+-			d = ttm_tt_get_page(ttm, i);
+-			if (!d)
+-				return -ENOMEM;
+-		}
+-
+ 		/*
+ 		 * We need to use vmap to get the desired page protection
+ 		 * or to make the buffer object look contiguous.
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+index 221b924..a877813 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -28,6 +28,8 @@
+  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+  */
+ 
++#define pr_fmt(fmt) "[TTM] " fmt
++
+ #include <ttm/ttm_module.h>
+ #include <ttm/ttm_bo_driver.h>
+ #include <ttm/ttm_placement.h>
+@@ -174,18 +176,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ 		vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
+ 		    vm_get_page_prot(vma->vm_flags) :
+ 		    ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
++
++		/* Allocate all page at once, most common usage */
++		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
++			retval = VM_FAULT_OOM;
++			goto out_io_unlock;
++		}
+ 	}
+ 
+ 	/*
+ 	 * Speculatively prefault a number of pages. Only error on
+ 	 * first page.
+ 	 */
+-
+ 	for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
+ 		if (bo->mem.bus.is_iomem)
+ 			pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
+ 		else {
+-			page = ttm_tt_get_page(ttm, page_offset);
++			page = ttm->pages[page_offset];
+ 			if (unlikely(!page && i == 0)) {
+ 				retval = VM_FAULT_OOM;
+ 				goto out_io_unlock;
+@@ -257,8 +264,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+ 	read_unlock(&bdev->vm_lock);
+ 
+ 	if (unlikely(bo == NULL)) {
+-		printk(KERN_ERR TTM_PFX
+-		       "Could not find buffer object to map.\n");
++		pr_err("Could not find buffer object to map\n");
+ 		return -EINVAL;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
+index e70ddd8..23d2ecb 100644
+--- a/drivers/gpu/drm/ttm/ttm_memory.c
++++ b/drivers/gpu/drm/ttm/ttm_memory.c
+@@ -25,6 +25,8 @@
+  *
+  **************************************************************************/
+ 
++#define pr_fmt(fmt) "[TTM] " fmt
++
+ #include "ttm/ttm_memory.h"
+ #include "ttm/ttm_module.h"
+ #include "ttm/ttm_page_alloc.h"
+@@ -74,9 +76,8 @@ static void ttm_mem_zone_kobj_release(struct kobject *kobj)
+ 	struct ttm_mem_zone *zone =
+ 		container_of(kobj, struct ttm_mem_zone, kobj);
+ 
+-	printk(KERN_INFO TTM_PFX
+-	       "Zone %7s: Used memory at exit: %llu kiB.\n",
+-	       zone->name, (unsigned long long) zone->used_mem >> 10);
++	pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
++		zone->name, (unsigned long long)zone->used_mem >> 10);
+ 	kfree(zone);
+ }
+ 
+@@ -390,11 +391,11 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
+ #endif
+ 	for (i = 0; i < glob->num_zones; ++i) {
+ 		zone = glob->zones[i];
+-		printk(KERN_INFO TTM_PFX
+-		       "Zone %7s: Available graphics memory: %llu kiB.\n",
+-		       zone->name, (unsigned long long) zone->max_mem >> 10);
++		pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
++			zone->name, (unsigned long long)zone->max_mem >> 10);
+ 	}
+ 	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
++	ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ 	return 0;
+ out_no_zone:
+ 	ttm_mem_global_release(glob);
+@@ -409,6 +410,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
+ 
+ 	/* let the page allocator first stop the shrink work. */
+ 	ttm_page_alloc_fini();
++	ttm_dma_page_alloc_fini();
+ 
+ 	flush_workqueue(glob->swap_queue);
+ 	destroy_workqueue(glob->swap_queue);
+diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
+index 93577f2..68daca4 100644
+--- a/drivers/gpu/drm/ttm/ttm_object.c
++++ b/drivers/gpu/drm/ttm/ttm_object.c
+@@ -49,6 +49,8 @@
+  * for fast lookup of ref objects given a base object.
+  */
+ 
++#define pr_fmt(fmt) "[TTM] " fmt
++
+ #include "ttm/ttm_object.h"
+ #include "ttm/ttm_module.h"
+ #include <linux/list.h>
+@@ -232,8 +234,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
+ 		return NULL;
+ 
+ 	if (tfile != base->tfile && !base->shareable) {
+-		printk(KERN_ERR TTM_PFX
+-		       "Attempted access of non-shareable object.\n");
++		pr_err("Attempted access of non-shareable object\n");
+ 		ttm_base_object_unref(&base);
+ 		return NULL;
+ 	}
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index 9e4313e..578207e 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -30,6 +30,9 @@
+  * - Use page->lru to keep a free list
+  * - doesn't track currently in use pages
+  */
++
++#define pr_fmt(fmt) "[TTM] " fmt
++
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+ #include <linux/highmem.h>
+@@ -167,18 +170,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
+ 		m->options.small = val;
+ 	else if (attr == &ttm_page_pool_alloc_size) {
+ 		if (val > NUM_PAGES_TO_ALLOC*8) {
+-			printk(KERN_ERR TTM_PFX
+-			       "Setting allocation size to %lu "
+-			       "is not allowed. Recommended size is "
+-			       "%lu\n",
++			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+ 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ 			return size;
+ 		} else if (val > NUM_PAGES_TO_ALLOC) {
+-			printk(KERN_WARNING TTM_PFX
+-			       "Setting allocation size to "
+-			       "larger than %lu is not recommended.\n",
+-			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
++			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
++				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ 		}
+ 		m->options.alloc_size = val;
+ 	}
+@@ -279,8 +277,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
+ {
+ 	unsigned i;
+ 	if (set_pages_array_wb(pages, npages))
+-		printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
+-				npages);
++		pr_err("Failed to set %d pages to wb!\n", npages);
+ 	for (i = 0; i < npages; ++i)
+ 		__free_page(pages[i]);
+ }
+@@ -315,8 +312,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+ 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ 			GFP_KERNEL);
+ 	if (!pages_to_free) {
+-		printk(KERN_ERR TTM_PFX
+-		       "Failed to allocate memory for pool free operation.\n");
++		pr_err("Failed to allocate memory for pool free operation\n");
+ 		return 0;
+ 	}
+ 
+@@ -438,16 +434,12 @@ static int ttm_set_pages_caching(struct page **pages,
+ 	case tt_uncached:
+ 		r = set_pages_array_uc(pages, cpages);
+ 		if (r)
+-			printk(KERN_ERR TTM_PFX
+-			       "Failed to set %d pages to uc!\n",
+-			       cpages);
++			pr_err("Failed to set %d pages to uc!\n", cpages);
+ 		break;
+ 	case tt_wc:
+ 		r = set_pages_array_wc(pages, cpages);
+ 		if (r)
+-			printk(KERN_ERR TTM_PFX
+-			       "Failed to set %d pages to wc!\n",
+-			       cpages);
++			pr_err("Failed to set %d pages to wc!\n", cpages);
+ 		break;
+ 	default:
+ 		break;
+@@ -492,8 +484,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
+ 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+ 
+ 	if (!caching_array) {
+-		printk(KERN_ERR TTM_PFX
+-		       "Unable to allocate table for new pages.");
++		pr_err("Unable to allocate table for new pages\n");
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -501,7 +492,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
+ 		p = alloc_page(gfp_flags);
+ 
+ 		if (!p) {
+-			printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
++			pr_err("Unable to get page %u\n", i);
+ 
+ 			/* store already allocated pages in the pool after
+ 			 * setting the caching state */
+@@ -599,8 +590,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ 			++pool->nrefills;
+ 			pool->npages += alloc_size;
+ 		} else {
+-			printk(KERN_ERR TTM_PFX
+-			       "Failed to fill pool (%p).", pool);
++			pr_err("Failed to fill pool (%p)\n", pool);
+ 			/* If we have any pages left put them to the pool. */
+ 			list_for_each_entry(p, &pool->list, lru) {
+ 				++cpages;
+@@ -619,8 +609,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+  * @return count of pages still required to fulfill the request.
+  */
+ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
+-		struct list_head *pages, int ttm_flags,
+-		enum ttm_caching_state cstate, unsigned count)
++					struct list_head *pages,
++					int ttm_flags,
++					enum ttm_caching_state cstate,
++					unsigned count)
+ {
+ 	unsigned long irq_flags;
+ 	struct list_head *p;
+@@ -660,17 +652,63 @@ out:
+ 	return count;
+ }
+ 
++/* Put all pages in pages list to correct pool to wait for reuse */
++static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
++			  enum ttm_caching_state cstate)
++{
++	unsigned long irq_flags;
++	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
++	unsigned i;
++
++	if (pool == NULL) {
++		/* No pool for this memory type so free the pages */
++		for (i = 0; i < npages; i++) {
++			if (pages[i]) {
++				if (page_count(pages[i]) != 1)
++					pr_err("Erroneous page count. Leaking pages.\n");
++				__free_page(pages[i]);
++				pages[i] = NULL;
++			}
++		}
++		return;
++	}
++
++	spin_lock_irqsave(&pool->lock, irq_flags);
++	for (i = 0; i < npages; i++) {
++		if (pages[i]) {
++			if (page_count(pages[i]) != 1)
++				pr_err("Erroneous page count. Leaking pages.\n");
++			list_add_tail(&pages[i]->lru, &pool->list);
++			pages[i] = NULL;
++			pool->npages++;
++		}
++	}
++	/* Check that we don't go over the pool limit */
++	npages = 0;
++	if (pool->npages > _manager->options.max_size) {
++		npages = pool->npages - _manager->options.max_size;
++		/* free at least NUM_PAGES_TO_ALLOC number of pages
++		 * to reduce calls to set_memory_wb */
++		if (npages < NUM_PAGES_TO_ALLOC)
++			npages = NUM_PAGES_TO_ALLOC;
++	}
++	spin_unlock_irqrestore(&pool->lock, irq_flags);
++	if (npages)
++		ttm_page_pool_free(pool, npages);
++}
++
+ /*
+  * On success pages list will hold count number of correctly
+  * cached pages.
+  */
+-int ttm_get_pages(struct list_head *pages, int flags,
+-		  enum ttm_caching_state cstate, unsigned count,
+-		  dma_addr_t *dma_address)
++static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
++			 enum ttm_caching_state cstate)
+ {
+ 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
++	struct list_head plist;
+ 	struct page *p = NULL;
+ 	gfp_t gfp_flags = GFP_USER;
++	unsigned count;
+ 	int r;
+ 
+ 	/* set zero flag for page allocation if required */
+@@ -684,30 +722,33 @@ int ttm_get_pages(struct list_head *pages, int flags,
+ 		else
+ 			gfp_flags |= GFP_HIGHUSER;
+ 
+-		for (r = 0; r < count; ++r) {
++		for (r = 0; r < npages; ++r) {
+ 			p = alloc_page(gfp_flags);
+ 			if (!p) {
+ 
+-				printk(KERN_ERR TTM_PFX
+-				       "Unable to allocate page.");
++				pr_err("Unable to allocate page\n");
+ 				return -ENOMEM;
+ 			}
+ 
+-			list_add(&p->lru, pages);
++			pages[r] = p;
+ 		}
+ 		return 0;
+ 	}
+ 
+-
+ 	/* combine zero flag to pool flags */
+ 	gfp_flags |= pool->gfp_flags;
+ 
+ 	/* First we take pages from the pool */
+-	count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
++	INIT_LIST_HEAD(&plist);
++	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
++	count = 0;
++	list_for_each_entry(p, &plist, lru) {
++		pages[count++] = p;
++	}
+ 
+ 	/* clear the pages coming from the pool if requested */
+ 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+-		list_for_each_entry(p, pages, lru) {
++		list_for_each_entry(p, &plist, lru) {
+ 			if (PageHighMem(p))
+ 				clear_highpage(p);
+ 			else
+@@ -716,67 +757,27 @@ int ttm_get_pages(struct list_head *pages, int flags,
+ 	}
+ 
+ 	/* If pool didn't have enough pages allocate new one. */
+-	if (count > 0) {
++	if (npages > 0) {
+ 		/* ttm_alloc_new_pages doesn't reference pool so we can run
+ 		 * multiple requests in parallel.
+ 		 **/
+-		r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
++		INIT_LIST_HEAD(&plist);
++		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
++		list_for_each_entry(p, &plist, lru) {
++			pages[count++] = p;
++		}
+ 		if (r) {
+ 			/* If there is any pages in the list put them back to
+ 			 * the pool. */
+-			printk(KERN_ERR TTM_PFX
+-			       "Failed to allocate extra pages "
+-			       "for large request.");
+-			ttm_put_pages(pages, 0, flags, cstate, NULL);
++			pr_err("Failed to allocate extra pages for large request\n");
++			ttm_put_pages(pages, count, flags, cstate);
+ 			return r;
+ 		}
+ 	}
+ 
+-
+ 	return 0;
+ }
+ 
+-/* Put all pages in pages list to correct pool to wait for reuse */
+-void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+-		   enum ttm_caching_state cstate, dma_addr_t *dma_address)
+-{
+-	unsigned long irq_flags;
+-	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+-	struct page *p, *tmp;
+-
+-	if (pool == NULL) {
+-		/* No pool for this memory type so free the pages */
+-
+-		list_for_each_entry_safe(p, tmp, pages, lru) {
+-			__free_page(p);
+-		}
+-		/* Make the pages list empty */
+-		INIT_LIST_HEAD(pages);
+-		return;
+-	}
+-	if (page_count == 0) {
+-		list_for_each_entry_safe(p, tmp, pages, lru) {
+-			++page_count;
+-		}
+-	}
+-
+-	spin_lock_irqsave(&pool->lock, irq_flags);
+-	list_splice_init(pages, &pool->list);
+-	pool->npages += page_count;
+-	/* Check that we don't go over the pool limit */
+-	page_count = 0;
+-	if (pool->npages > _manager->options.max_size) {
+-		page_count = pool->npages - _manager->options.max_size;
+-		/* free at least NUM_PAGES_TO_ALLOC number of pages
+-		 * to reduce calls to set_memory_wb */
+-		if (page_count < NUM_PAGES_TO_ALLOC)
+-			page_count = NUM_PAGES_TO_ALLOC;
+-	}
+-	spin_unlock_irqrestore(&pool->lock, irq_flags);
+-	if (page_count)
+-		ttm_page_pool_free(pool, page_count);
+-}
+-
+ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+ 		char *name)
+ {
+@@ -794,7 +795,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+ 
+ 	WARN_ON(_manager);
+ 
+-	printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
++	pr_info("Initializing pool allocator\n");
+ 
+ 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ 
+@@ -829,7 +830,7 @@ void ttm_page_alloc_fini(void)
+ {
+ 	int i;
+ 
+-	printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
++	pr_info("Finalizing pool allocator\n");
+ 	ttm_pool_mm_shrink_fini(_manager);
+ 
+ 	for (i = 0; i < NUM_POOLS; ++i)
+@@ -839,6 +840,62 @@ void ttm_page_alloc_fini(void)
+ 	_manager = NULL;
+ }
+ 
++int ttm_pool_populate(struct ttm_tt *ttm)
++{
++	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
++	unsigned i;
++	int ret;
++
++	if (ttm->state != tt_unpopulated)
++		return 0;
++
++	for (i = 0; i < ttm->num_pages; ++i) {
++		ret = ttm_get_pages(&ttm->pages[i], 1,
++				    ttm->page_flags,
++				    ttm->caching_state);
++		if (ret != 0) {
++			ttm_pool_unpopulate(ttm);
++			return -ENOMEM;
++		}
++
++		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
++						false, false);
++		if (unlikely(ret != 0)) {
++			ttm_pool_unpopulate(ttm);
++			return -ENOMEM;
++		}
++	}
++
++	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
++		ret = ttm_tt_swapin(ttm);
++		if (unlikely(ret != 0)) {
++			ttm_pool_unpopulate(ttm);
++			return ret;
++		}
++	}
++
++	ttm->state = tt_unbound;
++	return 0;
++}
++EXPORT_SYMBOL(ttm_pool_populate);
++
++void ttm_pool_unpopulate(struct ttm_tt *ttm)
++{
++	unsigned i;
++
++	for (i = 0; i < ttm->num_pages; ++i) {
++		if (ttm->pages[i]) {
++			ttm_mem_global_free_page(ttm->glob->mem_glob,
++						 ttm->pages[i]);
++			ttm_put_pages(&ttm->pages[i], 1,
++				      ttm->page_flags,
++				      ttm->caching_state);
++		}
++	}
++	ttm->state = tt_unpopulated;
++}
++EXPORT_SYMBOL(ttm_pool_unpopulate);
++
+ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
+ {
+ 	struct ttm_page_pool *p;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+new file mode 100644
+index 0000000..4f9e548
+--- /dev/null
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+@@ -0,0 +1,1134 @@
++/*
++ * Copyright 2011 (c) Oracle Corp.
++
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>
++ */
++
++/*
++ * A simple DMA pool losely based on dmapool.c. It has certain advantages
++ * over the DMA pools:
++ * - Pool collects resently freed pages for reuse (and hooks up to
++ *   the shrinker).
++ * - Tracks currently in use pages
++ * - Tracks whether the page is UC, WB or cached (and reverts to WB
++ *   when freed).
++ */
++
++#define pr_fmt(fmt) "[TTM] " fmt
++
++#include <linux/dma-mapping.h>
++#include <linux/list.h>
++#include <linux/seq_file.h> /* for seq_printf */
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/highmem.h>
++#include <linux/mm_types.h>
++#include <linux/module.h>
++#include <linux/mm.h>
++#include <linux/atomic.h>
++#include <linux/device.h>
++#include <linux/kthread.h>
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_page_alloc.h"
++#ifdef TTM_HAS_AGP
++#include <asm/agp.h>
++#endif
++
++#define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
++#define SMALL_ALLOCATION		4
++#define FREE_ALL_PAGES			(~0U)
++/* times are in msecs */
++#define IS_UNDEFINED			(0)
++#define IS_WC				(1<<1)
++#define IS_UC				(1<<2)
++#define IS_CACHED			(1<<3)
++#define IS_DMA32			(1<<4)
++
++enum pool_type {
++	POOL_IS_UNDEFINED,
++	POOL_IS_WC = IS_WC,
++	POOL_IS_UC = IS_UC,
++	POOL_IS_CACHED = IS_CACHED,
++	POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
++	POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
++	POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
++};
++/*
++ * The pool structure. There are usually six pools:
++ *  - generic (not restricted to DMA32):
++ *      - write combined, uncached, cached.
++ *  - dma32 (up to 2^32 - so up 4GB):
++ *      - write combined, uncached, cached.
++ * for each 'struct device'. The 'cached' is for pages that are actively used.
++ * The other ones can be shrunk by the shrinker API if neccessary.
++ * @pools: The 'struct device->dma_pools' link.
++ * @type: Type of the pool
++ * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
++ * used with irqsave/irqrestore variants because pool allocator maybe called
++ * from delayed work.
++ * @inuse_list: Pool of pages that are in use. The order is very important and
++ *   it is in the order that the TTM pages that are put back are in.
++ * @free_list: Pool of pages that are free to be used. No order requirements.
++ * @dev: The device that is associated with these pools.
++ * @size: Size used during DMA allocation.
++ * @npages_free: Count of available pages for re-use.
++ * @npages_in_use: Count of pages that are in use.
++ * @nfrees: Stats when pool is shrinking.
++ * @nrefills: Stats when the pool is grown.
++ * @gfp_flags: Flags to pass for alloc_page.
++ * @name: Name of the pool.
++ * @dev_name: Name derieved from dev - similar to how dev_info works.
++ *   Used during shutdown as the dev_info during release is unavailable.
++ */
++struct dma_pool {
++	struct list_head pools; /* The 'struct device->dma_pools link */
++	enum pool_type type;
++	spinlock_t lock;
++	struct list_head inuse_list;
++	struct list_head free_list;
++	struct device *dev;
++	unsigned size;
++	unsigned npages_free;
++	unsigned npages_in_use;
++	unsigned long nfrees; /* Stats when shrunk. */
++	unsigned long nrefills; /* Stats when grown. */
++	gfp_t gfp_flags;
++	char name[13]; /* "cached dma32" */
++	char dev_name[64]; /* Constructed from dev */
++};
++
++/*
++ * The accounting page keeping track of the allocated page along with
++ * the DMA address.
++ * @page_list: The link to the 'page_list' in 'struct dma_pool'.
++ * @vaddr: The virtual address of the page
++ * @dma: The bus address of the page. If the page is not allocated
++ *   via the DMA API, it will be -1.
++ */
++struct dma_page {
++	struct list_head page_list;
++	void *vaddr;
++	struct page *p;
++	dma_addr_t dma;
++};
++
++/*
++ * Limits for the pool. They are handled without locks because only place where
++ * they may change is in sysfs store. They won't have immediate effect anyway
++ * so forcing serialization to access them is pointless.
++ */
++
++struct ttm_pool_opts {
++	unsigned	alloc_size;
++	unsigned	max_size;
++	unsigned	small;
++};
++
++/*
++ * Contains the list of all of the 'struct device' and their corresponding
++ * DMA pools. Guarded by _mutex->lock.
++ * @pools: The link to 'struct ttm_pool_manager->pools'
++ * @dev: The 'struct device' associated with the 'pool'
++ * @pool: The 'struct dma_pool' associated with the 'dev'
++ */
++struct device_pools {
++	struct list_head pools;
++	struct device *dev;
++	struct dma_pool *pool;
++};
++
++/*
++ * struct ttm_pool_manager - Holds memory pools for fast allocation
++ *
++ * @lock: Lock used when adding/removing from pools
++ * @pools: List of 'struct device' and 'struct dma_pool' tuples.
++ * @options: Limits for the pool.
++ * @npools: Total amount of pools in existence.
++ * @shrinker: The structure used by [un|]register_shrinker
++ */
++struct ttm_pool_manager {
++	struct mutex		lock;
++	struct list_head	pools;
++	struct ttm_pool_opts	options;
++	unsigned		npools;
++	struct shrinker		mm_shrink;
++	struct kobject		kobj;
++};
++
++static struct ttm_pool_manager *_manager;
++
++static struct attribute ttm_page_pool_max = {
++	.name = "pool_max_size",
++	.mode = S_IRUGO | S_IWUSR
++};
++static struct attribute ttm_page_pool_small = {
++	.name = "pool_small_allocation",
++	.mode = S_IRUGO | S_IWUSR
++};
++static struct attribute ttm_page_pool_alloc_size = {
++	.name = "pool_allocation_size",
++	.mode = S_IRUGO | S_IWUSR
++};
++
++static struct attribute *ttm_pool_attrs[] = {
++	&ttm_page_pool_max,
++	&ttm_page_pool_small,
++	&ttm_page_pool_alloc_size,
++	NULL
++};
++
++static void ttm_pool_kobj_release(struct kobject *kobj)
++{
++	struct ttm_pool_manager *m =
++		container_of(kobj, struct ttm_pool_manager, kobj);
++	kfree(m);
++}
++
++static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
++			      const char *buffer, size_t size)
++{
++	struct ttm_pool_manager *m =
++		container_of(kobj, struct ttm_pool_manager, kobj);
++	int chars;
++	unsigned val;
++	chars = sscanf(buffer, "%u", &val);
++	if (chars == 0)
++		return size;
++
++	/* Convert kb to number of pages */
++	val = val / (PAGE_SIZE >> 10);
++
++	if (attr == &ttm_page_pool_max)
++		m->options.max_size = val;
++	else if (attr == &ttm_page_pool_small)
++		m->options.small = val;
++	else if (attr == &ttm_page_pool_alloc_size) {
++		if (val > NUM_PAGES_TO_ALLOC*8) {
++			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
++			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
++			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
++			return size;
++		} else if (val > NUM_PAGES_TO_ALLOC) {
++			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
++				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
++		}
++		m->options.alloc_size = val;
++	}
++
++	return size;
++}
++
++static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
++			     char *buffer)
++{
++	struct ttm_pool_manager *m =
++		container_of(kobj, struct ttm_pool_manager, kobj);
++	unsigned val = 0;
++
++	if (attr == &ttm_page_pool_max)
++		val = m->options.max_size;
++	else if (attr == &ttm_page_pool_small)
++		val = m->options.small;
++	else if (attr == &ttm_page_pool_alloc_size)
++		val = m->options.alloc_size;
++
++	val = val * (PAGE_SIZE >> 10);
++
++	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
++}
++
++static const struct sysfs_ops ttm_pool_sysfs_ops = {
++	.show = &ttm_pool_show,
++	.store = &ttm_pool_store,
++};
++
++static struct kobj_type ttm_pool_kobj_type = {
++	.release = &ttm_pool_kobj_release,
++	.sysfs_ops = &ttm_pool_sysfs_ops,
++	.default_attrs = ttm_pool_attrs,
++};
++
++#ifndef CONFIG_X86
++static int set_pages_array_wb(struct page **pages, int addrinarray)
++{
++#ifdef TTM_HAS_AGP
++	int i;
++
++	for (i = 0; i < addrinarray; i++)
++		unmap_page_from_agp(pages[i]);
++#endif
++	return 0;
++}
++
++static int set_pages_array_wc(struct page **pages, int addrinarray)
++{
++#ifdef TTM_HAS_AGP
++	int i;
++
++	for (i = 0; i < addrinarray; i++)
++		map_page_into_agp(pages[i]);
++#endif
++	return 0;
++}
++
++static int set_pages_array_uc(struct page **pages, int addrinarray)
++{
++#ifdef TTM_HAS_AGP
++	int i;
++
++	for (i = 0; i < addrinarray; i++)
++		map_page_into_agp(pages[i]);
++#endif
++	return 0;
++}
++#endif /* for !CONFIG_X86 */
++
++static int ttm_set_pages_caching(struct dma_pool *pool,
++				 struct page **pages, unsigned cpages)
++{
++	int r = 0;
++	/* Set page caching */
++	if (pool->type & IS_UC) {
++		r = set_pages_array_uc(pages, cpages);
++		if (r)
++			pr_err("%s: Failed to set %d pages to uc!\n",
++			       pool->dev_name, cpages);
++	}
++	if (pool->type & IS_WC) {
++		r = set_pages_array_wc(pages, cpages);
++		if (r)
++			pr_err("%s: Failed to set %d pages to wc!\n",
++			       pool->dev_name, cpages);
++	}
++	return r;
++}
++
++static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
++{
++	dma_addr_t dma = d_page->dma;
++	dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
++
++	kfree(d_page);
++	d_page = NULL;
++}
++static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
++{
++	struct dma_page *d_page;
++
++	d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
++	if (!d_page)
++		return NULL;
++
++	d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
++					   &d_page->dma,
++					   pool->gfp_flags);
++	if (d_page->vaddr)
++		d_page->p = virt_to_page(d_page->vaddr);
++	else {
++		kfree(d_page);
++		d_page = NULL;
++	}
++	return d_page;
++}
++static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
++{
++	enum pool_type type = IS_UNDEFINED;
++
++	if (flags & TTM_PAGE_FLAG_DMA32)
++		type |= IS_DMA32;
++	if (cstate == tt_cached)
++		type |= IS_CACHED;
++	else if (cstate == tt_uncached)
++		type |= IS_UC;
++	else
++		type |= IS_WC;
++
++	return type;
++}
++
++static void ttm_pool_update_free_locked(struct dma_pool *pool,
++					unsigned freed_pages)
++{
++	pool->npages_free -= freed_pages;
++	pool->nfrees += freed_pages;
++
++}
++
++/* set memory back to wb and free the pages. */
++static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
++			      struct page *pages[], unsigned npages)
++{
++	struct dma_page *d_page, *tmp;
++
++	/* Don't set WB on WB page pool. */
++	if (npages && !(pool->type & IS_CACHED) &&
++	    set_pages_array_wb(pages, npages))
++		pr_err("%s: Failed to set %d pages to wb!\n",
++		       pool->dev_name, npages);
++
++	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
++		list_del(&d_page->page_list);
++		__ttm_dma_free_page(pool, d_page);
++	}
++}
++
++static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
++{
++	/* Don't set WB on WB page pool. */
++	if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
++		pr_err("%s: Failed to set %d pages to wb!\n",
++		       pool->dev_name, 1);
++
++	list_del(&d_page->page_list);
++	__ttm_dma_free_page(pool, d_page);
++}
++
++/*
++ * Free pages from pool.
++ *
++ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
++ * number of pages in one go.
++ *
++ * @pool: to free the pages from
++ * @nr_free: If set to true will free all pages in pool
++ **/
++static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
++{
++	unsigned long irq_flags;
++	struct dma_page *dma_p, *tmp;
++	struct page **pages_to_free;
++	struct list_head d_pages;
++	unsigned freed_pages = 0,
++		 npages_to_free = nr_free;
++
++	if (NUM_PAGES_TO_ALLOC < nr_free)
++		npages_to_free = NUM_PAGES_TO_ALLOC;
++#if 0
++	if (nr_free > 1) {
++		pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
++			 pool->dev_name, pool->name, current->pid,
++			 npages_to_free, nr_free);
++	}
++#endif
++	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
++			GFP_KERNEL);
++
++	if (!pages_to_free) {
++		pr_err("%s: Failed to allocate memory for pool free operation\n",
++		       pool->dev_name);
++		return 0;
++	}
++	INIT_LIST_HEAD(&d_pages);
++restart:
++	spin_lock_irqsave(&pool->lock, irq_flags);
++
++	/* We picking the oldest ones off the list */
++	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
++					 page_list) {
++		if (freed_pages >= npages_to_free)
++			break;
++
++		/* Move the dma_page from one list to another. */
++		list_move(&dma_p->page_list, &d_pages);
++
++		pages_to_free[freed_pages++] = dma_p->p;
++		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
++		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
++
++			ttm_pool_update_free_locked(pool, freed_pages);
++			/**
++			 * Because changing page caching is costly
++			 * we unlock the pool to prevent stalling.
++			 */
++			spin_unlock_irqrestore(&pool->lock, irq_flags);
++
++			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
++					  freed_pages);
++
++			INIT_LIST_HEAD(&d_pages);
++
++			if (likely(nr_free != FREE_ALL_PAGES))
++				nr_free -= freed_pages;
++
++			if (NUM_PAGES_TO_ALLOC >= nr_free)
++				npages_to_free = nr_free;
++			else
++				npages_to_free = NUM_PAGES_TO_ALLOC;
++
++			freed_pages = 0;
++
++			/* free all so restart the processing */
++			if (nr_free)
++				goto restart;
++
++			/* Not allowed to fall through or break because
++			 * following context is inside spinlock while we are
++			 * outside here.
++			 */
++			goto out;
++
++		}
++	}
++
++	/* remove range of pages from the pool */
++	if (freed_pages) {
++		ttm_pool_update_free_locked(pool, freed_pages);
++		nr_free -= freed_pages;
++	}
++
++	spin_unlock_irqrestore(&pool->lock, irq_flags);
++
++	if (freed_pages)
++		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
++out:
++	kfree(pages_to_free);
++	return nr_free;
++}
++
++static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
++{
++	struct device_pools *p;
++	struct dma_pool *pool;
++
++	if (!dev)
++		return;
++
++	mutex_lock(&_manager->lock);
++	list_for_each_entry_reverse(p, &_manager->pools, pools) {
++		if (p->dev != dev)
++			continue;
++		pool = p->pool;
++		if (pool->type != type)
++			continue;
++
++		list_del(&p->pools);
++		kfree(p);
++		_manager->npools--;
++		break;
++	}
++	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
++		if (pool->type != type)
++			continue;
++		/* Takes a spinlock.. */
++		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
++		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
++		/* This code path is called after _all_ references to the
++		 * struct device has been dropped - so nobody should be
++		 * touching it. In case somebody is trying to _add_ we are
++		 * guarded by the mutex. */
++		list_del(&pool->pools);
++		kfree(pool);
++		break;
++	}
++	mutex_unlock(&_manager->lock);
++}
++
++/*
++ * On free-ing of the 'struct device' this deconstructor is run.
++ * Albeit the pool might have already been freed earlier.
++ */
++static void ttm_dma_pool_release(struct device *dev, void *res)
++{
++	struct dma_pool *pool = *(struct dma_pool **)res;
++
++	if (pool)
++		ttm_dma_free_pool(dev, pool->type);
++}
++
++static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
++{
++	return *(struct dma_pool **)res == match_data;
++}
++
++static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
++					  enum pool_type type)
++{
++	char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
++	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
++	struct device_pools *sec_pool = NULL;
++	struct dma_pool *pool = NULL, **ptr;
++	unsigned i;
++	int ret = -ENODEV;
++	char *p;
++
++	if (!dev)
++		return NULL;
++
++	ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
++	if (!ptr)
++		return NULL;
++
++	ret = -ENOMEM;
++
++	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
++			    dev_to_node(dev));
++	if (!pool)
++		goto err_mem;
++
++	sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
++				dev_to_node(dev));
++	if (!sec_pool)
++		goto err_mem;
++
++	INIT_LIST_HEAD(&sec_pool->pools);
++	sec_pool->dev = dev;
++	sec_pool->pool =  pool;
++
++	INIT_LIST_HEAD(&pool->free_list);
++	INIT_LIST_HEAD(&pool->inuse_list);
++	INIT_LIST_HEAD(&pool->pools);
++	spin_lock_init(&pool->lock);
++	pool->dev = dev;
++	pool->npages_free = pool->npages_in_use = 0;
++	pool->nfrees = 0;
++	pool->gfp_flags = flags;
++	pool->size = PAGE_SIZE;
++	pool->type = type;
++	pool->nrefills = 0;
++	p = pool->name;
++	for (i = 0; i < 5; i++) {
++		if (type & t[i]) {
++			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
++				      "%s", n[i]);
++		}
++	}
++	*p = 0;
++	/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
++	 * - the kobj->name has already been deallocated.*/
++	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
++		 dev_driver_string(dev), dev_name(dev));
++	mutex_lock(&_manager->lock);
++	/* You can get the dma_pool from either the global: */
++	list_add(&sec_pool->pools, &_manager->pools);
++	_manager->npools++;
++	/* or from 'struct device': */
++	list_add(&pool->pools, &dev->dma_pools);
++	mutex_unlock(&_manager->lock);
++
++	*ptr = pool;
++	devres_add(dev, ptr);
++
++	return pool;
++err_mem:
++	devres_free(ptr);
++	kfree(sec_pool);
++	kfree(pool);
++	return ERR_PTR(ret);
++}
++
++static struct dma_pool *ttm_dma_find_pool(struct device *dev,
++					  enum pool_type type)
++{
++	struct dma_pool *pool, *tmp, *found = NULL;
++
++	if (type == IS_UNDEFINED)
++		return found;
++
++	/* NB: We iterate on the 'struct dev' which has no spinlock, but
++	 * it does have a kref which we have taken. The kref is taken during
++	 * graphic driver loading - in the drm_pci_init it calls either
++	 * pci_dev_get or pci_register_driver which both end up taking a kref
++	 * on 'struct device'.
++	 *
++	 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
++	 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
++	 * thing is at that point of time there are no pages associated with the
++	 * driver so this function will not be called.
++	 */
++	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
++		if (pool->type != type)
++			continue;
++		found = pool;
++		break;
++	}
++	return found;
++}
++
++/*
++ * Free pages the pages that failed to change the caching state. If there
++ * are pages that have changed their caching state already put them to the
++ * pool.
++ */
++static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
++						 struct list_head *d_pages,
++						 struct page **failed_pages,
++						 unsigned cpages)
++{
++	struct dma_page *d_page, *tmp;
++	struct page *p;
++	unsigned i = 0;
++
++	p = failed_pages[0];
++	if (!p)
++		return;
++	/* Find the failed page. */
++	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
++		if (d_page->p != p)
++			continue;
++		/* .. and then progress over the full list. */
++		list_del(&d_page->page_list);
++		__ttm_dma_free_page(pool, d_page);
++		if (++i < cpages)
++			p = failed_pages[i];
++		else
++			break;
++	}
++
++}
++
++/*
++ * Allocate 'count' pages, and put 'need' number of them on the
++ * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
++ * The full list of pages should also be on 'd_pages'.
++ * We return zero for success, and negative numbers as errors.
++ */
++static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
++					struct list_head *d_pages,
++					unsigned count)
++{
++	struct page **caching_array;
++	struct dma_page *dma_p;
++	struct page *p;
++	int r = 0;
++	unsigned i, cpages;
++	unsigned max_cpages = min(count,
++			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
++
++	/* allocate array for page caching change */
++	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
++
++	if (!caching_array) {
++		pr_err("%s: Unable to allocate table for new pages\n",
++		       pool->dev_name);
++		return -ENOMEM;
++	}
++
++	if (count > 1) {
++		pr_debug("%s: (%s:%d) Getting %d pages\n",
++			 pool->dev_name, pool->name, current->pid, count);
++	}
++
++	for (i = 0, cpages = 0; i < count; ++i) {
++		dma_p = __ttm_dma_alloc_page(pool);
++		if (!dma_p) {
++			pr_err("%s: Unable to get page %u\n",
++			       pool->dev_name, i);
++
++			/* store already allocated pages in the pool after
++			 * setting the caching state */
++			if (cpages) {
++				r = ttm_set_pages_caching(pool, caching_array,
++							  cpages);
++				if (r)
++					ttm_dma_handle_caching_state_failure(
++						pool, d_pages, caching_array,
++						cpages);
++			}
++			r = -ENOMEM;
++			goto out;
++		}
++		p = dma_p->p;
++#ifdef CONFIG_HIGHMEM
++		/* gfp flags of highmem page should never be dma32 so we
++		 * we should be fine in such case
++		 */
++		if (!PageHighMem(p))
++#endif
++		{
++			caching_array[cpages++] = p;
++			if (cpages == max_cpages) {
++				/* Note: Cannot hold the spinlock */
++				r = ttm_set_pages_caching(pool, caching_array,
++						 cpages);
++				if (r) {
++					ttm_dma_handle_caching_state_failure(
++						pool, d_pages, caching_array,
++						cpages);
++					goto out;
++				}
++				cpages = 0;
++			}
++		}
++		list_add(&dma_p->page_list, d_pages);
++	}
++
++	if (cpages) {
++		r = ttm_set_pages_caching(pool, caching_array, cpages);
++		if (r)
++			ttm_dma_handle_caching_state_failure(pool, d_pages,
++					caching_array, cpages);
++	}
++out:
++	kfree(caching_array);
++	return r;
++}
++
++/*
++ * @return count of pages still required to fulfill the request.
++ */
++static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
++					 unsigned long *irq_flags)
++{
++	unsigned count = _manager->options.small;
++	int r = pool->npages_free;
++
++	if (count > pool->npages_free) {
++		struct list_head d_pages;
++
++		INIT_LIST_HEAD(&d_pages);
++
++		spin_unlock_irqrestore(&pool->lock, *irq_flags);
++
++		/* Returns how many more are neccessary to fulfill the
++		 * request. */
++		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
++
++		spin_lock_irqsave(&pool->lock, *irq_flags);
++		if (!r) {
++			/* Add the fresh to the end.. */
++			list_splice(&d_pages, &pool->free_list);
++			++pool->nrefills;
++			pool->npages_free += count;
++			r = count;
++		} else {
++			struct dma_page *d_page;
++			unsigned cpages = 0;
++
++			pr_err("%s: Failed to fill %s pool (r:%d)!\n",
++			       pool->dev_name, pool->name, r);
++
++			list_for_each_entry(d_page, &d_pages, page_list) {
++				cpages++;
++			}
++			list_splice_tail(&d_pages, &pool->free_list);
++			pool->npages_free += cpages;
++			r = cpages;
++		}
++	}
++	return r;
++}
++
++/*
++ * @return count of pages still required to fulfill the request.
++ * The populate list is actually a stack (not that is matters as TTM
++ * allocates one page at a time.
++ */
++static int ttm_dma_pool_get_pages(struct dma_pool *pool,
++				  struct ttm_dma_tt *ttm_dma,
++				  unsigned index)
++{
++	struct dma_page *d_page;
++	struct ttm_tt *ttm = &ttm_dma->ttm;
++	unsigned long irq_flags;
++	int count, r = -ENOMEM;
++
++	spin_lock_irqsave(&pool->lock, irq_flags);
++	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
++	if (count) {
++		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
++		ttm->pages[index] = d_page->p;
++		ttm_dma->dma_address[index] = d_page->dma;
++		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
++		r = 0;
++		pool->npages_in_use += 1;
++		pool->npages_free -= 1;
++	}
++	spin_unlock_irqrestore(&pool->lock, irq_flags);
++	return r;
++}
++
++/*
++ * On success pages list will hold count number of correctly
++ * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
++ */
++int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
++{
++	struct ttm_tt *ttm = &ttm_dma->ttm;
++	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
++	struct dma_pool *pool;
++	enum pool_type type;
++	unsigned i;
++	gfp_t gfp_flags;
++	int ret;
++
++	if (ttm->state != tt_unpopulated)
++		return 0;
++
++	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
++	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
++		gfp_flags = GFP_USER | GFP_DMA32;
++	else
++		gfp_flags = GFP_HIGHUSER;
++	if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
++		gfp_flags |= __GFP_ZERO;
++
++	pool = ttm_dma_find_pool(dev, type);
++	if (!pool) {
++		pool = ttm_dma_pool_init(dev, gfp_flags, type);
++		if (IS_ERR_OR_NULL(pool)) {
++			return -ENOMEM;
++		}
++	}
++
++	INIT_LIST_HEAD(&ttm_dma->pages_list);
++	for (i = 0; i < ttm->num_pages; ++i) {
++		ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
++		if (ret != 0) {
++			ttm_dma_unpopulate(ttm_dma, dev);
++			return -ENOMEM;
++		}
++
++		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
++						false, false);
++		if (unlikely(ret != 0)) {
++			ttm_dma_unpopulate(ttm_dma, dev);
++			return -ENOMEM;
++		}
++	}
++
++	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
++		ret = ttm_tt_swapin(ttm);
++		if (unlikely(ret != 0)) {
++			ttm_dma_unpopulate(ttm_dma, dev);
++			return ret;
++		}
++	}
++
++	ttm->state = tt_unbound;
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ttm_dma_populate);
++
++/* Get good estimation how many pages are free in pools */
++static int ttm_dma_pool_get_num_unused_pages(void)
++{
++	struct device_pools *p;
++	unsigned total = 0;
++
++	mutex_lock(&_manager->lock);
++	list_for_each_entry(p, &_manager->pools, pools)
++		total += p->pool->npages_free;
++	mutex_unlock(&_manager->lock);
++	return total;
++}
++
++/* Put all pages in pages list to correct pool to wait for reuse */
++void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
++{
++	struct ttm_tt *ttm = &ttm_dma->ttm;
++	struct dma_pool *pool;
++	struct dma_page *d_page, *next;
++	enum pool_type type;
++	bool is_cached = false;
++	unsigned count = 0, i, npages = 0;
++	unsigned long irq_flags;
++
++	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
++	pool = ttm_dma_find_pool(dev, type);
++	if (!pool)
++		return;
++
++	is_cached = (ttm_dma_find_pool(pool->dev,
++		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
++
++	/* make sure pages array match list and count number of pages */
++	list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
++		ttm->pages[count] = d_page->p;
++		count++;
++	}
++
++	spin_lock_irqsave(&pool->lock, irq_flags);
++	pool->npages_in_use -= count;
++	if (is_cached) {
++		pool->nfrees += count;
++	} else {
++		pool->npages_free += count;
++		list_splice(&ttm_dma->pages_list, &pool->free_list);
++		npages = count;
++		if (pool->npages_free > _manager->options.max_size) {
++			npages = pool->npages_free - _manager->options.max_size;
++			/* free at least NUM_PAGES_TO_ALLOC number of pages
++			 * to reduce calls to set_memory_wb */
++			if (npages < NUM_PAGES_TO_ALLOC)
++				npages = NUM_PAGES_TO_ALLOC;
++		}
++	}
++	spin_unlock_irqrestore(&pool->lock, irq_flags);
++
++	if (is_cached) {
++		list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
++			ttm_mem_global_free_page(ttm->glob->mem_glob,
++						 d_page->p);
++			ttm_dma_page_put(pool, d_page);
++		}
++	} else {
++		for (i = 0; i < count; i++) {
++			ttm_mem_global_free_page(ttm->glob->mem_glob,
++						 ttm->pages[i]);
++		}
++	}
++
++	INIT_LIST_HEAD(&ttm_dma->pages_list);
++	for (i = 0; i < ttm->num_pages; i++) {
++		ttm->pages[i] = NULL;
++		ttm_dma->dma_address[i] = 0;
++	}
++
++	/* shrink pool if necessary (only on !is_cached pools)*/
++	if (npages)
++		ttm_dma_page_pool_free(pool, npages);
++	ttm->state = tt_unpopulated;
++}
++EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
++
++/**
++ * Callback for mm to request pool to reduce number of page held.
++ */
++static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
++				  struct shrink_control *sc)
++{
++	static atomic_t start_pool = ATOMIC_INIT(0);
++	unsigned idx = 0;
++	unsigned pool_offset = atomic_add_return(1, &start_pool);
++	unsigned shrink_pages = sc->nr_to_scan;
++	struct device_pools *p;
++
++	if (list_empty(&_manager->pools))
++		return 0;
++
++	mutex_lock(&_manager->lock);
++	pool_offset = pool_offset % _manager->npools;
++	list_for_each_entry(p, &_manager->pools, pools) {
++		unsigned nr_free;
++
++		if (!p->dev)
++			continue;
++		if (shrink_pages == 0)
++			break;
++		/* Do it in round-robin fashion. */
++		if (++idx < pool_offset)
++			continue;
++		nr_free = shrink_pages;
++		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
++		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
++			 p->pool->dev_name, p->pool->name, current->pid,
++			 nr_free, shrink_pages);
++	}
++	mutex_unlock(&_manager->lock);
++	/* return estimated number of unused pages in pool */
++	return ttm_dma_pool_get_num_unused_pages();
++}
++
++static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
++{
++	manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
++	manager->mm_shrink.seeks = 1;
++	register_shrinker(&manager->mm_shrink);
++}
++
++static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
++{
++	unregister_shrinker(&manager->mm_shrink);
++}
++
++int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
++{
++	int ret = -ENOMEM;
++
++	WARN_ON(_manager);
++
++	pr_info("Initializing DMA pool allocator\n");
++
++	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
++	if (!_manager)
++		goto err_manager;
++
++	mutex_init(&_manager->lock);
++	INIT_LIST_HEAD(&_manager->pools);
++
++	_manager->options.max_size = max_pages;
++	_manager->options.small = SMALL_ALLOCATION;
++	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
++
++	/* This takes care of auto-freeing the _manager */
++	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
++				   &glob->kobj, "dma_pool");
++	if (unlikely(ret != 0)) {
++		kobject_put(&_manager->kobj);
++		goto err;
++	}
++	ttm_dma_pool_mm_shrink_init(_manager);
++	return 0;
++err_manager:
++	kfree(_manager);
++	_manager = NULL;
++err:
++	return ret;
++}
++
++void ttm_dma_page_alloc_fini(void)
++{
++	struct device_pools *p, *t;
++
++	pr_info("Finalizing DMA pool allocator\n");
++	ttm_dma_pool_mm_shrink_fini(_manager);
++
++	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
++		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
++			current->pid);
++		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
++			ttm_dma_pool_match, p->pool));
++		ttm_dma_free_pool(p->dev, p->pool->type);
++	}
++	kobject_put(&_manager->kobj);
++	_manager = NULL;
++}
++
++int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
++{
++	struct device_pools *p;
++	struct dma_pool *pool = NULL;
++	char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
++		     "name", "virt", "busaddr"};
++
++	if (!_manager) {
++		seq_printf(m, "No pool allocator running.\n");
++		return 0;
++	}
++	seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
++		   h[0], h[1], h[2], h[3], h[4], h[5]);
++	mutex_lock(&_manager->lock);
++	list_for_each_entry(p, &_manager->pools, pools) {
++		struct device *dev = p->dev;
++		if (!dev)
++			continue;
++		pool = p->pool;
++		seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
++				pool->name, pool->nrefills,
++				pool->nfrees, pool->npages_in_use,
++				pool->npages_free,
++				pool->dev_name);
++	}
++	mutex_unlock(&_manager->lock);
++	return 0;
++}
++EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
+diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
+index f9cc548..fa09daf 100644
+--- a/drivers/gpu/drm/ttm/ttm_tt.c
++++ b/drivers/gpu/drm/ttm/ttm_tt.c
+@@ -28,6 +28,8 @@
+  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+  */
+ 
++#define pr_fmt(fmt) "[TTM] " fmt
++
+ #include <linux/sched.h>
+ #include <linux/highmem.h>
+ #include <linux/pagemap.h>
+@@ -43,139 +45,20 @@
+ #include "ttm/ttm_placement.h"
+ #include "ttm/ttm_page_alloc.h"
+ 
+-static int ttm_tt_swapin(struct ttm_tt *ttm);
+-
+ /**
+  * Allocates storage for pointers to the pages that back the ttm.
+  */
+ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
+ {
+-	ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
+-	ttm->dma_address = drm_calloc_large(ttm->num_pages,
+-					    sizeof(*ttm->dma_address));
+-}
+-
+-static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
+-{
+-	drm_free_large(ttm->pages);
+-	ttm->pages = NULL;
+-	drm_free_large(ttm->dma_address);
+-	ttm->dma_address = NULL;
+-}
+-
+-static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
+-{
+-	int write;
+-	int dirty;
+-	struct page *page;
+-	int i;
+-	struct ttm_backend *be = ttm->be;
+-
+-	BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
+-	write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
+-	dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
+-
+-	if (be)
+-		be->func->clear(be);
+-
+-	for (i = 0; i < ttm->num_pages; ++i) {
+-		page = ttm->pages[i];
+-		if (page == NULL)
+-			continue;
+-
+-		if (page == ttm->dummy_read_page) {
+-			BUG_ON(write);
+-			continue;
+-		}
+-
+-		if (write && dirty && !PageReserved(page))
+-			set_page_dirty_lock(page);
+-
+-		ttm->pages[i] = NULL;
+-		ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
+-		put_page(page);
+-	}
+-	ttm->state = tt_unpopulated;
+-	ttm->first_himem_page = ttm->num_pages;
+-	ttm->last_lomem_page = -1;
++	ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
+ }
+ 
+-static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
++static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+ {
+-	struct page *p;
+-	struct list_head h;
+-	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+-	int ret;
+-
+-	while (NULL == (p = ttm->pages[index])) {
+-
+-		INIT_LIST_HEAD(&h);
+-
+-		ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
+-				    &ttm->dma_address[index]);
+-
+-		if (ret != 0)
+-			return NULL;
+-
+-		p = list_first_entry(&h, struct page, lru);
+-
+-		ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
+-		if (unlikely(ret != 0))
+-			goto out_err;
+-
+-		if (PageHighMem(p))
+-			ttm->pages[--ttm->first_himem_page] = p;
+-		else
+-			ttm->pages[++ttm->last_lomem_page] = p;
+-	}
+-	return p;
+-out_err:
+-	put_page(p);
+-	return NULL;
+-}
+-
+-struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
+-{
+-	int ret;
+-
+-	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+-		ret = ttm_tt_swapin(ttm);
+-		if (unlikely(ret != 0))
+-			return NULL;
+-	}
+-	return __ttm_tt_get_page(ttm, index);
+-}
+-
+-int ttm_tt_populate(struct ttm_tt *ttm)
+-{
+-	struct page *page;
+-	unsigned long i;
+-	struct ttm_backend *be;
+-	int ret;
+-
+-	if (ttm->state != tt_unpopulated)
+-		return 0;
+-
+-	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+-		ret = ttm_tt_swapin(ttm);
+-		if (unlikely(ret != 0))
+-			return ret;
+-	}
+-
+-	be = ttm->be;
+-
+-	for (i = 0; i < ttm->num_pages; ++i) {
+-		page = __ttm_tt_get_page(ttm, i);
+-		if (!page)
+-			return -ENOMEM;
+-	}
+-
+-	be->func->populate(be, ttm->num_pages, ttm->pages,
+-			   ttm->dummy_read_page, ttm->dma_address);
+-	ttm->state = tt_unbound;
+-	return 0;
++	ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
++	ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
++					    sizeof(*ttm->dma_address));
+ }
+-EXPORT_SYMBOL(ttm_tt_populate);
+ 
+ #ifdef CONFIG_X86
+ static inline int ttm_tt_set_page_caching(struct page *p,
+@@ -278,153 +161,100 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
+ }
+ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
+ 
+-static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
+-{
+-	int i;
+-	unsigned count = 0;
+-	struct list_head h;
+-	struct page *cur_page;
+-	struct ttm_backend *be = ttm->be;
+-
+-	INIT_LIST_HEAD(&h);
+-
+-	if (be)
+-		be->func->clear(be);
+-	for (i = 0; i < ttm->num_pages; ++i) {
+-
+-		cur_page = ttm->pages[i];
+-		ttm->pages[i] = NULL;
+-		if (cur_page) {
+-			if (page_count(cur_page) != 1)
+-				printk(KERN_ERR TTM_PFX
+-				       "Erroneous page count. "
+-				       "Leaking pages.\n");
+-			ttm_mem_global_free_page(ttm->glob->mem_glob,
+-						 cur_page);
+-			list_add(&cur_page->lru, &h);
+-			count++;
+-		}
+-	}
+-	ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
+-		      ttm->dma_address);
+-	ttm->state = tt_unpopulated;
+-	ttm->first_himem_page = ttm->num_pages;
+-	ttm->last_lomem_page = -1;
+-}
+-
+ void ttm_tt_destroy(struct ttm_tt *ttm)
+ {
+-	struct ttm_backend *be;
+-
+ 	if (unlikely(ttm == NULL))
+ 		return;
+ 
+-	be = ttm->be;
+-	if (likely(be != NULL)) {
+-		be->func->destroy(be);
+-		ttm->be = NULL;
++	if (ttm->state == tt_bound) {
++		ttm_tt_unbind(ttm);
+ 	}
+ 
+ 	if (likely(ttm->pages != NULL)) {
+-		if (ttm->page_flags & TTM_PAGE_FLAG_USER)
+-			ttm_tt_free_user_pages(ttm);
+-		else
+-			ttm_tt_free_alloced_pages(ttm);
+-
+-		ttm_tt_free_page_directory(ttm);
++		ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ 	}
+ 
+ 	if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
+ 	    ttm->swap_storage)
+ 		fput(ttm->swap_storage);
+ 
+-	kfree(ttm);
++	ttm->swap_storage = NULL;
++	ttm->func->destroy(ttm);
+ }
+ 
+-int ttm_tt_set_user(struct ttm_tt *ttm,
+-		    struct task_struct *tsk,
+-		    unsigned long start, unsigned long num_pages)
++int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
++		unsigned long size, uint32_t page_flags,
++		struct page *dummy_read_page)
+ {
+-	struct mm_struct *mm = tsk->mm;
+-	int ret;
+-	int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
+-	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+-
+-	BUG_ON(num_pages != ttm->num_pages);
+-	BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
+-
+-	/**
+-	 * Account user pages as lowmem pages for now.
+-	 */
+-
+-	ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
+-				   false, false);
+-	if (unlikely(ret != 0))
+-		return ret;
+-
+-	down_read(&mm->mmap_sem);
+-	ret = get_user_pages(tsk, mm, start, num_pages,
+-			     write, 0, ttm->pages, NULL);
+-	up_read(&mm->mmap_sem);
++	ttm->bdev = bdev;
++	ttm->glob = bdev->glob;
++	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	ttm->caching_state = tt_cached;
++	ttm->page_flags = page_flags;
++	ttm->dummy_read_page = dummy_read_page;
++	ttm->state = tt_unpopulated;
++	ttm->swap_storage = NULL;
+ 
+-	if (ret != num_pages && write) {
+-		ttm_tt_free_user_pages(ttm);
+-		ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
++	ttm_tt_alloc_page_directory(ttm);
++	if (!ttm->pages) {
++		ttm_tt_destroy(ttm);
++		pr_err("Failed allocating page table\n");
+ 		return -ENOMEM;
+ 	}
+-
+-	ttm->tsk = tsk;
+-	ttm->start = start;
+-	ttm->state = tt_unbound;
+-
+ 	return 0;
+ }
++EXPORT_SYMBOL(ttm_tt_init);
+ 
+-struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
+-			     uint32_t page_flags, struct page *dummy_read_page)
++void ttm_tt_fini(struct ttm_tt *ttm)
+ {
+-	struct ttm_bo_driver *bo_driver = bdev->driver;
+-	struct ttm_tt *ttm;
+-
+-	if (!bo_driver)
+-		return NULL;
++	drm_free_large(ttm->pages);
++	ttm->pages = NULL;
++}
++EXPORT_SYMBOL(ttm_tt_fini);
+ 
+-	ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
+-	if (!ttm)
+-		return NULL;
++int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
++		unsigned long size, uint32_t page_flags,
++		struct page *dummy_read_page)
++{
++	struct ttm_tt *ttm = &ttm_dma->ttm;
+ 
++	ttm->bdev = bdev;
+ 	ttm->glob = bdev->glob;
+ 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+-	ttm->first_himem_page = ttm->num_pages;
+-	ttm->last_lomem_page = -1;
+ 	ttm->caching_state = tt_cached;
+ 	ttm->page_flags = page_flags;
+-
+ 	ttm->dummy_read_page = dummy_read_page;
++	ttm->state = tt_unpopulated;
++	ttm->swap_storage = NULL;
+ 
+-	ttm_tt_alloc_page_directory(ttm);
+-	if (!ttm->pages) {
+-		ttm_tt_destroy(ttm);
+-		printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
+-		return NULL;
+-	}
+-	ttm->be = bo_driver->create_ttm_backend_entry(bdev);
+-	if (!ttm->be) {
++	INIT_LIST_HEAD(&ttm_dma->pages_list);
++	ttm_dma_tt_alloc_page_directory(ttm_dma);
++	if (!ttm->pages || !ttm_dma->dma_address) {
+ 		ttm_tt_destroy(ttm);
+-		printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
+-		return NULL;
++		pr_err("Failed allocating page table\n");
++		return -ENOMEM;
+ 	}
+-	ttm->state = tt_unpopulated;
+-	return ttm;
++	return 0;
+ }
++EXPORT_SYMBOL(ttm_dma_tt_init);
++
++void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
++{
++	struct ttm_tt *ttm = &ttm_dma->ttm;
++
++	drm_free_large(ttm->pages);
++	ttm->pages = NULL;
++	drm_free_large(ttm_dma->dma_address);
++	ttm_dma->dma_address = NULL;
++}
++EXPORT_SYMBOL(ttm_dma_tt_fini);
+ 
+ void ttm_tt_unbind(struct ttm_tt *ttm)
+ {
+ 	int ret;
+-	struct ttm_backend *be = ttm->be;
+ 
+ 	if (ttm->state == tt_bound) {
+-		ret = be->func->unbind(be);
++		ret = ttm->func->unbind(ttm);
+ 		BUG_ON(ret);
+ 		ttm->state = tt_unbound;
+ 	}
+@@ -433,7 +263,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
+ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+ {
+ 	int ret = 0;
+-	struct ttm_backend *be;
+ 
+ 	if (!ttm)
+ 		return -EINVAL;
+@@ -441,25 +270,21 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+ 	if (ttm->state == tt_bound)
+ 		return 0;
+ 
+-	be = ttm->be;
+-
+-	ret = ttm_tt_populate(ttm);
++	ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = be->func->bind(be, bo_mem);
++	ret = ttm->func->bind(ttm, bo_mem);
+ 	if (unlikely(ret != 0))
+ 		return ret;
+ 
+ 	ttm->state = tt_bound;
+ 
+-	if (ttm->page_flags & TTM_PAGE_FLAG_USER)
+-		ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
+ 	return 0;
+ }
+ EXPORT_SYMBOL(ttm_tt_bind);
+ 
+-static int ttm_tt_swapin(struct ttm_tt *ttm)
++int ttm_tt_swapin(struct ttm_tt *ttm)
+ {
+ 	struct address_space *swap_space;
+ 	struct file *swap_storage;
+@@ -470,16 +295,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
+ 	int i;
+ 	int ret = -ENOMEM;
+ 
+-	if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
+-		ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
+-				      ttm->num_pages);
+-		if (unlikely(ret != 0))
+-			return ret;
+-
+-		ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+-		return 0;
+-	}
+-
+ 	swap_storage = ttm->swap_storage;
+ 	BUG_ON(swap_storage == NULL);
+ 
+@@ -491,16 +306,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
+ 			ret = PTR_ERR(from_page);
+ 			goto out_err;
+ 		}
+-		to_page = __ttm_tt_get_page(ttm, i);
++		to_page = ttm->pages[i];
+ 		if (unlikely(to_page == NULL))
+ 			goto out_err;
+ 
+ 		preempt_disable();
+-		from_virtual = kmap_atomic(from_page, KM_USER0);
+-		to_virtual = kmap_atomic(to_page, KM_USER1);
++		from_virtual = kmap_atomic(from_page);
++		to_virtual = kmap_atomic(to_page);
+ 		memcpy(to_virtual, from_virtual, PAGE_SIZE);
+-		kunmap_atomic(to_virtual, KM_USER1);
+-		kunmap_atomic(from_virtual, KM_USER0);
++		kunmap_atomic(to_virtual);
++		kunmap_atomic(from_virtual);
+ 		preempt_enable();
+ 		page_cache_release(from_page);
+ 	}
+@@ -512,7 +327,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
+ 
+ 	return 0;
+ out_err:
+-	ttm_tt_free_alloced_pages(ttm);
+ 	return ret;
+ }
+ 
+@@ -530,24 +344,12 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
+ 	BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
+ 	BUG_ON(ttm->caching_state != tt_cached);
+ 
+-	/*
+-	 * For user buffers, just unpin the pages, as there should be
+-	 * vma references.
+-	 */
+-
+-	if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
+-		ttm_tt_free_user_pages(ttm);
+-		ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+-		ttm->swap_storage = NULL;
+-		return 0;
+-	}
+-
+ 	if (!persistent_swap_storage) {
+ 		swap_storage = shmem_file_setup("ttm swap",
+ 						ttm->num_pages << PAGE_SHIFT,
+ 						0);
+ 		if (unlikely(IS_ERR(swap_storage))) {
+-			printk(KERN_ERR "Failed allocating swap storage.\n");
++			pr_err("Failed allocating swap storage\n");
+ 			return PTR_ERR(swap_storage);
+ 		}
+ 	} else
+@@ -565,18 +367,18 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
+ 			goto out_err;
+ 		}
+ 		preempt_disable();
+-		from_virtual = kmap_atomic(from_page, KM_USER0);
+-		to_virtual = kmap_atomic(to_page, KM_USER1);
++		from_virtual = kmap_atomic(from_page);
++		to_virtual = kmap_atomic(to_page);
+ 		memcpy(to_virtual, from_virtual, PAGE_SIZE);
+-		kunmap_atomic(to_virtual, KM_USER1);
+-		kunmap_atomic(from_virtual, KM_USER0);
++		kunmap_atomic(to_virtual);
++		kunmap_atomic(from_virtual);
+ 		preempt_enable();
+ 		set_page_dirty(to_page);
+ 		mark_page_accessed(to_page);
+ 		page_cache_release(to_page);
+ 	}
+ 
+-	ttm_tt_free_alloced_pages(ttm);
++	ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ 	ttm->swap_storage = swap_storage;
+ 	ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ 	if (persistent_swap_storage)
+diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
+new file mode 100644
+index 0000000..0b5e096
+--- /dev/null
++++ b/drivers/gpu/drm/udl/Kconfig
+@@ -0,0 +1,12 @@
++config DRM_UDL
++	tristate "DisplayLink"
++	depends on DRM && EXPERIMENTAL
++	select DRM_USB
++	select FB_SYS_FILLRECT
++	select FB_SYS_COPYAREA
++	select FB_SYS_IMAGEBLIT
++	select FB_DEFERRED_IO
++	select DRM_KMS_HELPER
++	help
++	  This is a KMS driver for the USB displaylink video adapters.
++          Say M/Y to add support for these devices via drm/kms interfaces.
+diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
+new file mode 100644
+index 0000000..05c7481
+--- /dev/null
++++ b/drivers/gpu/drm/udl/Makefile
+@@ -0,0 +1,6 @@
++
++ccflags-y := -Iinclude/drm
++
++udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
++
++obj-$(CONFIG_DRM_UDL) := udl.o
+diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
+new file mode 100644
+index 0000000..3234224
+--- /dev/null
++++ b/drivers/gpu/drm/udl/udl_connector.c
+@@ -0,0 +1,161 @@
++/*
++ * Copyright (C) 2012 Red Hat
++ * based in parts on udlfb.c:
++ * Copyright (C) 2009 Roberto De Ioris <roberto at unbit.it>
++ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml at gmail.com>
++ * Copyright (C) 2009 Bernie Thompson <bernie at plugable.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License v2. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++#include "drmP.h"
++#include "drm_crtc.h"
++#include "drm_edid.h"
++#include "drm_crtc_helper.h"
++#include "udl_drv.h"
++
++/* dummy connector to just get EDID,
++   all UDL appear to have a DVI-D */
++
++static u8 *udl_get_edid(struct udl_device *udl)
++{
++	u8 *block;
++	char *rbuf;
++	int ret, i;
++
++	block = kmalloc(EDID_LENGTH, GFP_KERNEL);
++	if (block == NULL)
++		return NULL;
++
++	rbuf = kmalloc(2, GFP_KERNEL);
++	if (rbuf == NULL)
++		goto error;
++
++	for (i = 0; i < EDID_LENGTH; i++) {
++		ret = usb_control_msg(udl->ddev->usbdev,
++				      usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
++				      (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
++				      HZ);
++		if (ret < 1) {
++			DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
++			goto error;
++		}
++		block[i] = rbuf[1];
++	}
++
++	kfree(rbuf);
++	return block;
++
++error:
++	kfree(block);
++	kfree(rbuf);
++	return NULL;
++}
++
++static int udl_get_modes(struct drm_connector *connector)
++{
++	struct udl_device *udl = connector->dev->dev_private;
++	struct edid *edid;
++	int ret;
++
++	edid = (struct edid *)udl_get_edid(udl);
++
++	connector->display_info.raw_edid = (char *)edid;
++
++	/*
++	 * We only read the main block, but if the monitor reports extension
++	 * blocks then the drm edid code expects them to be present, so patch
++	 * the extension count to 0.
++	 */
++	edid->checksum += edid->extensions;
++	edid->extensions = 0;
++
++	drm_mode_connector_update_edid_property(connector, edid);
++	ret = drm_add_edid_modes(connector, edid);
++	connector->display_info.raw_edid = NULL;
++	kfree(edid);
++	return ret;
++}
++
++static int udl_mode_valid(struct drm_connector *connector,
++			  struct drm_display_mode *mode)
++{
++	struct udl_device *udl = connector->dev->dev_private;
++	if (!udl->sku_pixel_limit)
++		return 0;
++
++	if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
++		return MODE_VIRTUAL_Y;
++
++	return 0;
++}
++
++static enum drm_connector_status
++udl_detect(struct drm_connector *connector, bool force)
++{
++	if (drm_device_is_unplugged(connector->dev))
++		return connector_status_disconnected;
++	return connector_status_connected;
++}
++
++struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
++{
++	int enc_id = connector->encoder_ids[0];
++	struct drm_mode_object *obj;
++	struct drm_encoder *encoder;
++
++	obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
++	if (!obj)
++		return NULL;
++	encoder = obj_to_encoder(obj);
++	return encoder;
++}
++
++int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property,
++			       uint64_t val)
++{
++	return 0;
++}
++
++static void udl_connector_destroy(struct drm_connector *connector)
++{
++	drm_sysfs_connector_remove(connector);
++	drm_connector_cleanup(connector);
++	kfree(connector);
++}
++
++struct drm_connector_helper_funcs udl_connector_helper_funcs = {
++	.get_modes = udl_get_modes,
++	.mode_valid = udl_mode_valid,
++	.best_encoder = udl_best_single_encoder,
++};
++
++struct drm_connector_funcs udl_connector_funcs = {
++	.dpms = drm_helper_connector_dpms,
++	.detect = udl_detect,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.destroy = udl_connector_destroy,
++	.set_property = udl_connector_set_property,
++};
++
++int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
++{
++	struct drm_connector *connector;
++
++	connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
++	if (!connector)
++		return -ENOMEM;
++
++	drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
++	drm_connector_helper_add(connector, &udl_connector_helper_funcs);
++
++	drm_sysfs_connector_add(connector);
++	drm_mode_connector_attach_encoder(connector, encoder);
++
++	drm_connector_attach_property(connector,
++				      dev->mode_config.dirty_info_property,
++				      1);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
+new file mode 100644
+index 0000000..08eff0d
+--- /dev/null
++++ b/drivers/gpu/drm/udl/udl_drv.c
+@@ -0,0 +1,112 @@
++/*
++ * Copyright (C) 2012 Red Hat
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License v2. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++#include <linux/module.h>
++#include "drm_usb.h"
++#include "drm_crtc_helper.h"
++#include "udl_drv.h"
++
++static struct drm_driver driver;
++
++/*
++ * There are many DisplayLink-based graphics products, all with unique PIDs.
++ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
++ * We also require a match on SubClass (0x00) and Protocol (0x00),
++ * which is compatible with all known USB 2.0 era graphics chips and firmware,
++ * but allows DisplayLink to increment those for any future incompatible chips
++ */
++static struct usb_device_id id_table[] = {
++	{.idVendor = 0x17e9, .bInterfaceClass = 0xff,
++	 .bInterfaceSubClass = 0x00,
++	 .bInterfaceProtocol = 0x00,
++	 .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
++			USB_DEVICE_ID_MATCH_INT_CLASS |
++			USB_DEVICE_ID_MATCH_INT_SUBCLASS |
++			USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
++	{},
++};
++MODULE_DEVICE_TABLE(usb, id_table);
++
++MODULE_LICENSE("GPL");
++
++static int udl_usb_probe(struct usb_interface *interface,
++			 const struct usb_device_id *id)
++{
++	return drm_get_usb_dev(interface, id, &driver);
++}
++
++static void udl_usb_disconnect(struct usb_interface *interface)
++{
++	struct drm_device *dev = usb_get_intfdata(interface);
++
++	drm_kms_helper_poll_disable(dev);
++	drm_connector_unplug_all(dev);
++	udl_fbdev_unplug(dev);
++	udl_drop_usb(dev);
++	drm_unplug_dev(dev);
++}
++
++static struct vm_operations_struct udl_gem_vm_ops = {
++	.fault = udl_gem_fault,
++	.open = drm_gem_vm_open,
++	.close = drm_gem_vm_close,
++};
++
++static const struct file_operations udl_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.mmap = udl_drm_gem_mmap,
++	.poll = drm_poll,
++	.read = drm_read,
++	.unlocked_ioctl	= drm_ioctl,
++	.release = drm_release,
++	.fasync = drm_fasync,
++	.llseek = noop_llseek,
++};
++
++static struct drm_driver driver = {
++	.driver_features = DRIVER_MODESET | DRIVER_GEM,
++	.load = udl_driver_load,
++	.unload = udl_driver_unload,
++
++	/* gem hooks */
++	.gem_init_object = udl_gem_init_object,
++	.gem_free_object = udl_gem_free_object,
++	.gem_vm_ops = &udl_gem_vm_ops,
++
++	.dumb_create = udl_dumb_create,
++	.dumb_map_offset = udl_gem_mmap,
++	.dumb_destroy = udl_dumb_destroy,
++	.fops = &udl_driver_fops,
++	.name = DRIVER_NAME,
++	.desc = DRIVER_DESC,
++	.date = DRIVER_DATE,
++	.major = DRIVER_MAJOR,
++	.minor = DRIVER_MINOR,
++	.patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static struct usb_driver udl_driver = {
++	.name = "udl",
++	.probe = udl_usb_probe,
++	.disconnect = udl_usb_disconnect,
++	.id_table = id_table,
++};
++
++static int __init udl_init(void)
++{
++	return drm_usb_init(&driver, &udl_driver);
++}
++
++static void __exit udl_exit(void)
++{
++	drm_usb_exit(&driver, &udl_driver);
++}
++
++module_init(udl_init);
++module_exit(udl_exit);
+diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
+new file mode 100644
+index 0000000..e760575
+--- /dev/null
++++ b/drivers/gpu/drm/udl/udl_drv.h
+@@ -0,0 +1,142 @@
++/*
++ * Copyright (C) 2012 Red Hat
++ *
++ * based in parts on udlfb.c:
++ * Copyright (C) 2009 Roberto De Ioris <roberto at unbit.it>
++ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml at gmail.com>
++ * Copyright (C) 2009 Bernie Thompson <bernie at plugable.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License v2. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++#ifndef UDL_DRV_H
++#define UDL_DRV_H
++
++#include <linux/usb.h>
++
++#define DRIVER_NAME		"udl"
++#define DRIVER_DESC		"DisplayLink"
++#define DRIVER_DATE		"20120220"
++
++#define DRIVER_MAJOR		0
++#define DRIVER_MINOR		0
++#define DRIVER_PATCHLEVEL	1
++
++struct udl_device;
++
++struct urb_node {
++	struct list_head entry;
++	struct udl_device *dev;
++	struct delayed_work release_urb_work;
++	struct urb *urb;
++};
++
++struct urb_list {
++	struct list_head list;
++	spinlock_t lock;
++	struct semaphore limit_sem;
++	int available;
++	int count;
++	size_t size;
++};
++
++struct udl_fbdev;
++
++struct udl_device {
++	struct device *dev;
++	struct drm_device *ddev;
++
++	int sku_pixel_limit;
++
++	struct urb_list urbs;
++	atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
++
++	struct udl_fbdev *fbdev;
++	char mode_buf[1024];
++	uint32_t mode_buf_len;
++	atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
++	atomic_t bytes_identical; /* saved effort with backbuffer comparison */
++	atomic_t bytes_sent; /* to usb, after compression including overhead */
++	atomic_t cpu_kcycles_used; /* transpired during pixel processing */
++};
++
++struct udl_gem_object {
++	struct drm_gem_object base;
++	struct page **pages;
++	void *vmapping;
++};
++
++#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
++
++struct udl_framebuffer {
++	struct drm_framebuffer base;
++	struct udl_gem_object *obj;
++	bool active_16; /* active on the 16-bit channel */
++};
++
++#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
++
++/* modeset */
++int udl_modeset_init(struct drm_device *dev);
++void udl_modeset_cleanup(struct drm_device *dev);
++int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
++
++struct drm_encoder *udl_encoder_init(struct drm_device *dev);
++
++struct urb *udl_get_urb(struct drm_device *dev);
++
++int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
++void udl_urb_completion(struct urb *urb);
++
++int udl_driver_load(struct drm_device *dev, unsigned long flags);
++int udl_driver_unload(struct drm_device *dev);
++
++int udl_fbdev_init(struct drm_device *dev);
++void udl_fbdev_cleanup(struct drm_device *dev);
++void udl_fbdev_unplug(struct drm_device *dev);
++struct drm_framebuffer *
++udl_fb_user_fb_create(struct drm_device *dev,
++		      struct drm_file *file,
++		      struct drm_mode_fb_cmd2 *mode_cmd);
++
++int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
++		     const char *front, char **urb_buf_ptr,
++		     u32 byte_offset, u32 device_byte_offset, u32 byte_width,
++		     int *ident_ptr, int *sent_ptr);
++
++int udl_dumb_create(struct drm_file *file_priv,
++		    struct drm_device *dev,
++		    struct drm_mode_create_dumb *args);
++int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
++		 uint32_t handle, uint64_t *offset);
++int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
++		     uint32_t handle);
++
++int udl_gem_init_object(struct drm_gem_object *obj);
++void udl_gem_free_object(struct drm_gem_object *gem_obj);
++struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
++					    size_t size);
++
++int udl_gem_vmap(struct udl_gem_object *obj);
++void udl_gem_vunmap(struct udl_gem_object *obj);
++int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
++int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
++
++int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
++		      int width, int height);
++
++int udl_drop_usb(struct drm_device *dev);
++
++#define CMD_WRITE_RAW8   "\xAF\x60" /**< 8 bit raw write command. */
++#define CMD_WRITE_RL8    "\xAF\x61" /**< 8 bit run length command. */
++#define CMD_WRITE_COPY8  "\xAF\x62" /**< 8 bit copy command. */
++#define CMD_WRITE_RLX8   "\xAF\x63" /**< 8 bit extended run length command. */
++
++#define CMD_WRITE_RAW16  "\xAF\x68" /**< 16 bit raw write command. */
++#define CMD_WRITE_RL16   "\xAF\x69" /**< 16 bit run length command. */
++#define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */
++#define CMD_WRITE_RLX16  "\xAF\x6B" /**< 16 bit extended run length command. */
++
++#endif
+diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
+new file mode 100644
+index 0000000..56e75f0
+--- /dev/null
++++ b/drivers/gpu/drm/udl/udl_encoder.c
+@@ -0,0 +1,80 @@
++/*
++ * Copyright (C) 2012 Red Hat
++ * based in parts on udlfb.c:
++ * Copyright (C) 2009 Roberto De Ioris <roberto at unbit.it>
++ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml at gmail.com>
++ * Copyright (C) 2009 Bernie Thompson <bernie at plugable.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License v2. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++#include "drmP.h"
++#include "drm_crtc.h"
++#include "drm_crtc_helper.h"
++#include "udl_drv.h"
++
++/* dummy encoder */
++void udl_enc_destroy(struct drm_encoder *encoder)
++{
++	drm_encoder_cleanup(encoder);
++	kfree(encoder);
++}
++
++static void udl_encoder_disable(struct drm_encoder *encoder)
++{
++}
++
++static bool udl_mode_fixup(struct drm_encoder *encoder,
++			   struct drm_display_mode *mode,
++			   struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++static void udl_encoder_prepare(struct drm_encoder *encoder)
++{
++}
++
++static void udl_encoder_commit(struct drm_encoder *encoder)
++{
++}
++
++static void udl_encoder_mode_set(struct drm_encoder *encoder,
++				 struct drm_display_mode *mode,
++				 struct drm_display_mode *adjusted_mode)
++{
++}
++
++static void
++udl_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++}
++
++static const struct drm_encoder_helper_funcs udl_helper_funcs = {
++	.dpms = udl_encoder_dpms,
++	.mode_fixup = udl_mode_fixup,
++	.prepare = udl_encoder_prepare,
++	.mode_set = udl_encoder_mode_set,
++	.commit = udl_encoder_commit,
++	.disable = udl_encoder_disable,
++};
++
++static const struct drm_encoder_funcs udl_enc_funcs = {
++	.destroy = udl_enc_destroy,
++};
++
++struct drm_encoder *udl_encoder_init(struct drm_device *dev)
++{
++	struct drm_encoder *encoder;
++
++	encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
++	if (!encoder)
++		return NULL;
++
++	drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS);
++	drm_encoder_helper_add(encoder, &udl_helper_funcs);
++	encoder->possible_crtcs = 1;
++	return encoder;
++}
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+new file mode 100644
+index 0000000..b9282cf
+--- /dev/null
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -0,0 +1,613 @@
++/*
++ * Copyright (C) 2012 Red Hat
++ *
++ * based in parts on udlfb.c:
++ * Copyright (C) 2009 Roberto De Ioris <roberto at unbit.it>
++ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml at gmail.com>
++ * Copyright (C) 2009 Bernie Thompson <bernie at plugable.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License v2. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/fb.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_crtc.h"
++#include "drm_crtc_helper.h"
++#include "udl_drv.h"
++
++#include "drm_fb_helper.h"
++
++#define DL_DEFIO_WRITE_DELAY    5 /* fb_deferred_io.delay in jiffies */
++
++static int fb_defio = 1;  /* Optionally enable experimental fb_defio mmap support */
++static int fb_bpp = 16;
++
++module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
++module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
++
++struct udl_fbdev {
++	struct drm_fb_helper helper;
++	struct udl_framebuffer ufb;
++	struct list_head fbdev_list;
++	int fb_count;
++};
++
++#define DL_ALIGN_UP(x, a) ALIGN(x, a)
++#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
++
++/** Read the red component (0..255) of a 32 bpp colour. */
++#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
++
++/** Read the green component (0..255) of a 32 bpp colour. */
++#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
++
++/** Read the blue component (0..255) of a 32 bpp colour. */
++#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
++
++/** Return red/green component of a 16 bpp colour number. */
++#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
++
++/** Return green/blue component of a 16 bpp colour number. */
++#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
++
++/** Return 8 bpp colour number from red, green and blue components. */
++#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
++
++#if 0
++static uint8_t rgb8(uint32_t col)
++{
++	uint8_t red = DLO_RGB_GETRED(col);
++	uint8_t grn = DLO_RGB_GETGRN(col);
++	uint8_t blu = DLO_RGB_GETBLU(col);
++
++	return DLO_RGB8(red, grn, blu);
++}
++
++static uint16_t rgb16(uint32_t col)
++{
++	uint8_t red = DLO_RGB_GETRED(col);
++	uint8_t grn = DLO_RGB_GETGRN(col);
++	uint8_t blu = DLO_RGB_GETBLU(col);
++
++	return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
++}
++#endif
++
++/*
++ * NOTE: fb_defio.c is holding info->fbdefio.mutex
++ *   Touching ANY framebuffer memory that triggers a page fault
++ *   in fb_defio will cause a deadlock, when it also tries to
++ *   grab the same mutex.
++ */
++static void udlfb_dpy_deferred_io(struct fb_info *info,
++				  struct list_head *pagelist)
++{
++	struct page *cur;
++	struct fb_deferred_io *fbdefio = info->fbdefio;
++	struct udl_fbdev *ufbdev = info->par;
++	struct drm_device *dev = ufbdev->ufb.base.dev;
++	struct udl_device *udl = dev->dev_private;
++	struct urb *urb;
++	char *cmd;
++	cycles_t start_cycles, end_cycles;
++	int bytes_sent = 0;
++	int bytes_identical = 0;
++	int bytes_rendered = 0;
++
++	if (!fb_defio)
++		return;
++
++	start_cycles = get_cycles();
++
++	urb = udl_get_urb(dev);
++	if (!urb)
++		return;
++
++	cmd = urb->transfer_buffer;
++
++	/* walk the written page list and render each to device */
++	list_for_each_entry(cur, &fbdefio->pagelist, lru) {
++
++		if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
++				     &urb, (char *) info->fix.smem_start,
++				     &cmd, cur->index << PAGE_SHIFT,
++				     cur->index << PAGE_SHIFT,
++				     PAGE_SIZE, &bytes_identical, &bytes_sent))
++			goto error;
++		bytes_rendered += PAGE_SIZE;
++	}
++
++	if (cmd > (char *) urb->transfer_buffer) {
++		/* Send partial buffer remaining before exiting */
++		int len = cmd - (char *) urb->transfer_buffer;
++		udl_submit_urb(dev, urb, len);
++		bytes_sent += len;
++	} else
++		udl_urb_completion(urb);
++
++error:
++	atomic_add(bytes_sent, &udl->bytes_sent);
++	atomic_add(bytes_identical, &udl->bytes_identical);
++	atomic_add(bytes_rendered, &udl->bytes_rendered);
++	end_cycles = get_cycles();
++	atomic_add(((unsigned int) ((end_cycles - start_cycles)
++		    >> 10)), /* Kcycles */
++		   &udl->cpu_kcycles_used);
++}
++
++int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
++		      int width, int height)
++{
++	struct drm_device *dev = fb->base.dev;
++	struct udl_device *udl = dev->dev_private;
++	int i, ret;
++	char *cmd;
++	cycles_t start_cycles, end_cycles;
++	int bytes_sent = 0;
++	int bytes_identical = 0;
++	struct urb *urb;
++	int aligned_x;
++	int bpp = (fb->base.bits_per_pixel / 8);
++
++	if (!fb->active_16)
++		return 0;
++
++	if (!fb->obj->vmapping)
++		udl_gem_vmap(fb->obj);
++
++	start_cycles = get_cycles();
++
++	aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
++	width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
++	x = aligned_x;
++
++	if ((width <= 0) ||
++	    (x + width > fb->base.width) ||
++	    (y + height > fb->base.height))
++		return -EINVAL;
++
++	urb = udl_get_urb(dev);
++	if (!urb)
++		return 0;
++	cmd = urb->transfer_buffer;
++
++	for (i = y; i < y + height ; i++) {
++		const int line_offset = fb->base.pitches[0] * i;
++		const int byte_offset = line_offset + (x * bpp);
++		const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
++		if (udl_render_hline(dev, bpp, &urb,
++				     (char *) fb->obj->vmapping,
++				     &cmd, byte_offset, dev_byte_offset,
++				     width * bpp,
++				     &bytes_identical, &bytes_sent))
++			goto error;
++	}
++
++	if (cmd > (char *) urb->transfer_buffer) {
++		/* Send partial buffer remaining before exiting */
++		int len = cmd - (char *) urb->transfer_buffer;
++		ret = udl_submit_urb(dev, urb, len);
++		bytes_sent += len;
++	} else
++		udl_urb_completion(urb);
++
++error:
++	atomic_add(bytes_sent, &udl->bytes_sent);
++	atomic_add(bytes_identical, &udl->bytes_identical);
++	atomic_add(width*height*bpp, &udl->bytes_rendered);
++	end_cycles = get_cycles();
++	atomic_add(((unsigned int) ((end_cycles - start_cycles)
++		    >> 10)), /* Kcycles */
++		   &udl->cpu_kcycles_used);
++
++	return 0;
++}
++
++static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
++{
++	unsigned long start = vma->vm_start;
++	unsigned long size = vma->vm_end - vma->vm_start;
++	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++	unsigned long page, pos;
++
++	if (offset + size > info->fix.smem_len)
++		return -EINVAL;
++
++	pos = (unsigned long)info->fix.smem_start + offset;
++
++	pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
++		  pos, size);
++
++	while (size > 0) {
++		page = vmalloc_to_pfn((void *)pos);
++		if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
++			return -EAGAIN;
++
++		start += PAGE_SIZE;
++		pos += PAGE_SIZE;
++		if (size > PAGE_SIZE)
++			size -= PAGE_SIZE;
++		else
++			size = 0;
++	}
++
++	vma->vm_flags |= VM_RESERVED;	/* avoid to swap out this VMA */
++	return 0;
++}
++
++static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
++{
++	struct udl_fbdev *ufbdev = info->par;
++
++	sys_fillrect(info, rect);
++
++	udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
++			  rect->height);
++}
++
++static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
++{
++	struct udl_fbdev *ufbdev = info->par;
++
++	sys_copyarea(info, region);
++
++	udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
++			  region->height);
++}
++
++static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
++{
++	struct udl_fbdev *ufbdev = info->par;
++
++	sys_imageblit(info, image);
++
++	udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
++			  image->height);
++}
++
++/*
++ * It's common for several clients to have framebuffer open simultaneously.
++ * e.g. both fbcon and X. Makes things interesting.
++ * Assumes caller is holding info->lock (for open and release at least)
++ */
++static int udl_fb_open(struct fb_info *info, int user)
++{
++	struct udl_fbdev *ufbdev = info->par;
++	struct drm_device *dev = ufbdev->ufb.base.dev;
++	struct udl_device *udl = dev->dev_private;
++
++	/* If the USB device is gone, we don't accept new opens */
++	if (drm_device_is_unplugged(udl->ddev))
++		return -ENODEV;
++
++	ufbdev->fb_count++;
++
++	if (fb_defio && (info->fbdefio == NULL)) {
++		/* enable defio at last moment if not disabled by client */
++
++		struct fb_deferred_io *fbdefio;
++
++		fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
++
++		if (fbdefio) {
++			fbdefio->delay = DL_DEFIO_WRITE_DELAY;
++			fbdefio->deferred_io = udlfb_dpy_deferred_io;
++		}
++
++		info->fbdefio = fbdefio;
++		fb_deferred_io_init(info);
++	}
++
++	pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
++		  info->node, user, info, ufbdev->fb_count);
++
++	return 0;
++}
++
++
++/*
++ * Assumes caller is holding info->lock mutex (for open and release at least)
++ */
++static int udl_fb_release(struct fb_info *info, int user)
++{
++	struct udl_fbdev *ufbdev = info->par;
++
++	ufbdev->fb_count--;
++
++	if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
++		fb_deferred_io_cleanup(info);
++		kfree(info->fbdefio);
++		info->fbdefio = NULL;
++		info->fbops->fb_mmap = udl_fb_mmap;
++	}
++
++	pr_warn("released /dev/fb%d user=%d count=%d\n",
++		info->node, user, ufbdev->fb_count);
++
++	return 0;
++}
++
++static struct fb_ops udlfb_ops = {
++	.owner = THIS_MODULE,
++	.fb_check_var = drm_fb_helper_check_var,
++	.fb_set_par = drm_fb_helper_set_par,
++	.fb_fillrect = udl_fb_fillrect,
++	.fb_copyarea = udl_fb_copyarea,
++	.fb_imageblit = udl_fb_imageblit,
++	.fb_pan_display = drm_fb_helper_pan_display,
++	.fb_blank = drm_fb_helper_blank,
++	.fb_setcmap = drm_fb_helper_setcmap,
++	.fb_debug_enter = drm_fb_helper_debug_enter,
++	.fb_debug_leave = drm_fb_helper_debug_leave,
++	.fb_mmap = udl_fb_mmap,
++	.fb_open = udl_fb_open,
++	.fb_release = udl_fb_release,
++};
++
++void udl_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
++			   u16 blue, int regno)
++{
++}
++
++void udl_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
++			     u16 *blue, int regno)
++{
++	*red = 0;
++	*green = 0;
++	*blue = 0;
++}
++
++static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
++				      struct drm_file *file,
++				      unsigned flags, unsigned color,
++				      struct drm_clip_rect *clips,
++				      unsigned num_clips)
++{
++	struct udl_framebuffer *ufb = to_udl_fb(fb);
++	int i;
++
++	if (!ufb->active_16)
++		return 0;
++
++	for (i = 0; i < num_clips; i++) {
++		udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
++				  clips[i].x2 - clips[i].x1,
++				  clips[i].y2 - clips[i].y1);
++	}
++	return 0;
++}
++
++static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
++{
++	struct udl_framebuffer *ufb = to_udl_fb(fb);
++
++	if (ufb->obj)
++		drm_gem_object_unreference_unlocked(&ufb->obj->base);
++
++	drm_framebuffer_cleanup(fb);
++	kfree(ufb);
++}
++
++static const struct drm_framebuffer_funcs udlfb_funcs = {
++	.destroy = udl_user_framebuffer_destroy,
++	.dirty = udl_user_framebuffer_dirty,
++	.create_handle = NULL,
++};
++
++
++static int
++udl_framebuffer_init(struct drm_device *dev,
++		     struct udl_framebuffer *ufb,
++		     struct drm_mode_fb_cmd2 *mode_cmd,
++		     struct udl_gem_object *obj)
++{
++	int ret;
++
++	ufb->obj = obj;
++	ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
++	drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
++	return ret;
++}
++
++
++static int udlfb_create(struct udl_fbdev *ufbdev,
++			struct drm_fb_helper_surface_size *sizes)
++{
++	struct drm_device *dev = ufbdev->helper.dev;
++	struct fb_info *info;
++	struct device *device = &dev->usbdev->dev;
++	struct drm_framebuffer *fb;
++	struct drm_mode_fb_cmd2 mode_cmd;
++	struct udl_gem_object *obj;
++	uint32_t size;
++	int ret = 0;
++
++	if (sizes->surface_bpp == 24)
++		sizes->surface_bpp = 32;
++
++	mode_cmd.width = sizes->surface_width;
++	mode_cmd.height = sizes->surface_height;
++	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
++
++	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
++							  sizes->surface_depth);
++
++	size = mode_cmd.pitches[0] * mode_cmd.height;
++	size = ALIGN(size, PAGE_SIZE);
++
++	obj = udl_gem_alloc_object(dev, size);
++	if (!obj)
++		goto out;
++
++	ret = udl_gem_vmap(obj);
++	if (ret) {
++		DRM_ERROR("failed to vmap fb\n");
++		goto out_gfree;
++	}
++
++	info = framebuffer_alloc(0, device);
++	if (!info) {
++		ret = -ENOMEM;
++		goto out_gfree;
++	}
++	info->par = ufbdev;
++
++	ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
++	if (ret)
++		goto out_gfree;
++
++	fb = &ufbdev->ufb.base;
++
++	ufbdev->helper.fb = fb;
++	ufbdev->helper.fbdev = info;
++
++	strcpy(info->fix.id, "udldrmfb");
++
++	info->screen_base = ufbdev->ufb.obj->vmapping;
++	info->fix.smem_len = size;
++	info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
++
++	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
++	info->fbops = &udlfb_ops;
++	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
++	drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
++
++	ret = fb_alloc_cmap(&info->cmap, 256, 0);
++	if (ret) {
++		ret = -ENOMEM;
++		goto out_gfree;
++	}
++
++
++	DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
++		      fb->width, fb->height,
++		      ufbdev->ufb.obj->vmapping);
++
++	return ret;
++out_gfree:
++	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
++out:
++	return ret;
++}
++
++static int udl_fb_find_or_create_single(struct drm_fb_helper *helper,
++					struct drm_fb_helper_surface_size *sizes)
++{
++	struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
++	int new_fb = 0;
++	int ret;
++
++	if (!helper->fb) {
++		ret = udlfb_create(ufbdev, sizes);
++		if (ret)
++			return ret;
++
++		new_fb = 1;
++	}
++	return new_fb;
++}
++
++static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
++	.gamma_set = udl_crtc_fb_gamma_set,
++	.gamma_get = udl_crtc_fb_gamma_get,
++	.fb_probe = udl_fb_find_or_create_single,
++};
++
++static void udl_fbdev_destroy(struct drm_device *dev,
++			      struct udl_fbdev *ufbdev)
++{
++	struct fb_info *info;
++	if (ufbdev->helper.fbdev) {
++		info = ufbdev->helper.fbdev;
++		unregister_framebuffer(info);
++		if (info->cmap.len)
++			fb_dealloc_cmap(&info->cmap);
++		framebuffer_release(info);
++	}
++	drm_fb_helper_fini(&ufbdev->helper);
++	drm_framebuffer_cleanup(&ufbdev->ufb.base);
++	drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
++}
++
++int udl_fbdev_init(struct drm_device *dev)
++{
++	struct udl_device *udl = dev->dev_private;
++	int bpp_sel = fb_bpp;
++	struct udl_fbdev *ufbdev;
++	int ret;
++
++	ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL);
++	if (!ufbdev)
++		return -ENOMEM;
++
++	udl->fbdev = ufbdev;
++	ufbdev->helper.funcs = &udl_fb_helper_funcs;
++
++	ret = drm_fb_helper_init(dev, &ufbdev->helper,
++				 1, 1);
++	if (ret) {
++		kfree(ufbdev);
++		return ret;
++
++	}
++
++	drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
++	drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
++	return 0;
++}
++
++void udl_fbdev_cleanup(struct drm_device *dev)
++{
++	struct udl_device *udl = dev->dev_private;
++	if (!udl->fbdev)
++		return;
++
++	udl_fbdev_destroy(dev, udl->fbdev);
++	kfree(udl->fbdev);
++	udl->fbdev = NULL;
++}
++
++void udl_fbdev_unplug(struct drm_device *dev)
++{
++	struct udl_device *udl = dev->dev_private;
++	struct udl_fbdev *ufbdev;
++	if (!udl->fbdev)
++		return;
++
++	ufbdev = udl->fbdev;
++	if (ufbdev->helper.fbdev) {
++		struct fb_info *info;
++		info = ufbdev->helper.fbdev;
++		unlink_framebuffer(info);
++	}
++}
++
++struct drm_framebuffer *
++udl_fb_user_fb_create(struct drm_device *dev,
++		   struct drm_file *file,
++		   struct drm_mode_fb_cmd2 *mode_cmd)
++{
++	struct drm_gem_object *obj;
++	struct udl_framebuffer *ufb;
++	int ret;
++
++	obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
++	if (obj == NULL)
++		return ERR_PTR(-ENOENT);
++
++	ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
++	if (ufb == NULL)
++		return ERR_PTR(-ENOMEM);
++
++	ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
++	if (ret) {
++		kfree(ufb);
++		return ERR_PTR(-EINVAL);
++	}
++	return &ufb->base;
++}
+diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
+new file mode 100644
+index 0000000..92f19ef
+--- /dev/null
++++ b/drivers/gpu/drm/udl/udl_gem.c
+@@ -0,0 +1,241 @@
++/*
++ * Copyright (C) 2012 Red Hat
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License v2. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++#include "drmP.h"
++#include "udl_drv.h"
++#include <linux/shmem_fs.h>
++
++struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
++					    size_t size)
++{
++	struct udl_gem_object *obj;
++
++	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
++	if (obj == NULL)
++		return NULL;
++
++	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
++		kfree(obj);
++		return NULL;
++	}
++
++	return obj;
++}
++
++static int
++udl_gem_create(struct drm_file *file,
++	       struct drm_device *dev,
++	       uint64_t size,
++	       uint32_t *handle_p)
++{
++	struct udl_gem_object *obj;
++	int ret;
++	u32 handle;
++
++	size = roundup(size, PAGE_SIZE);
++
++	obj = udl_gem_alloc_object(dev, size);
++	if (obj == NULL)
++		return -ENOMEM;
++
++	ret = drm_gem_handle_create(file, &obj->base, &handle);
++	if (ret) {
++		drm_gem_object_release(&obj->base);
++		kfree(obj);
++		return ret;
++	}
++
++	drm_gem_object_unreference(&obj->base);
++	*handle_p = handle;
++	return 0;
++}
++
++int udl_dumb_create(struct drm_file *file,
++		    struct drm_device *dev,
++		    struct drm_mode_create_dumb *args)
++{
++	args->pitch = args->width * ((args->bpp + 1) / 8);
++	args->size = args->pitch * args->height;
++	return udl_gem_create(file, dev,
++			      args->size, &args->handle);
++}
++
++int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
++		     uint32_t handle)
++{
++	return drm_gem_handle_delete(file, handle);
++}
++
++int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++	int ret;
++
++	ret = drm_gem_mmap(filp, vma);
++	if (ret)
++		return ret;
++
++	vma->vm_flags &= ~VM_PFNMAP;
++	vma->vm_flags |= VM_MIXEDMAP;
++
++	return ret;
++}
++
++int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
++	struct page *page;
++	unsigned int page_offset;
++	int ret = 0;
++
++	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
++		PAGE_SHIFT;
++
++	if (!obj->pages)
++		return VM_FAULT_SIGBUS;
++
++	page = obj->pages[page_offset];
++	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
++	switch (ret) {
++	case -EAGAIN:
++		set_need_resched();
++	case 0:
++	case -ERESTARTSYS:
++		return VM_FAULT_NOPAGE;
++	case -ENOMEM:
++		return VM_FAULT_OOM;
++	default:
++		return VM_FAULT_SIGBUS;
++	}
++}
++
++int udl_gem_init_object(struct drm_gem_object *obj)
++{
++	BUG();
++
++	return 0;
++}
++
++static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
++{
++	int page_count, i;
++	struct page *page;
++	struct inode *inode;
++	struct address_space *mapping;
++
++	if (obj->pages)
++		return 0;
++
++	page_count = obj->base.size / PAGE_SIZE;
++	BUG_ON(obj->pages != NULL);
++	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
++	if (obj->pages == NULL)
++		return -ENOMEM;
++
++	inode = obj->base.filp->f_path.dentry->d_inode;
++	mapping = inode->i_mapping;
++	gfpmask |= mapping_gfp_mask(mapping);
++
++	for (i = 0; i < page_count; i++) {
++		page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
++		if (IS_ERR(page))
++			goto err_pages;
++		obj->pages[i] = page;
++	}
++
++	return 0;
++err_pages:
++	while (i--)
++		page_cache_release(obj->pages[i]);
++	drm_free_large(obj->pages);
++	obj->pages = NULL;
++	return PTR_ERR(page);
++}
++
++static void udl_gem_put_pages(struct udl_gem_object *obj)
++{
++	int page_count = obj->base.size / PAGE_SIZE;
++	int i;
++
++	for (i = 0; i < page_count; i++)
++		page_cache_release(obj->pages[i]);
++
++	drm_free_large(obj->pages);
++	obj->pages = NULL;
++}
++
++int udl_gem_vmap(struct udl_gem_object *obj)
++{
++	int page_count = obj->base.size / PAGE_SIZE;
++	int ret;
++
++	ret = udl_gem_get_pages(obj, GFP_KERNEL);
++	if (ret)
++		return ret;
++
++	obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
++	if (!obj->vmapping)
++		return -ENOMEM;
++	return 0;
++}
++
++void udl_gem_vunmap(struct udl_gem_object *obj)
++{
++	if (obj->vmapping)
++		vunmap(obj->vmapping);
++
++	udl_gem_put_pages(obj);
++}
++
++void udl_gem_free_object(struct drm_gem_object *gem_obj)
++{
++	struct udl_gem_object *obj = to_udl_bo(gem_obj);
++
++	if (obj->vmapping)
++		udl_gem_vunmap(obj);
++
++	if (obj->pages)
++		udl_gem_put_pages(obj);
++
++	if (gem_obj->map_list.map)
++		drm_gem_free_mmap_offset(gem_obj);
++}
++
++/* the dumb interface doesn't work with the GEM straight MMAP
++   interface, it expects to do MMAP on the drm fd, like normal */
++int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
++		 uint32_t handle, uint64_t *offset)
++{
++	struct udl_gem_object *gobj;
++	struct drm_gem_object *obj;
++	int ret = 0;
++
++	mutex_lock(&dev->struct_mutex);
++	obj = drm_gem_object_lookup(dev, file, handle);
++	if (obj == NULL) {
++		ret = -ENOENT;
++		goto unlock;
++	}
++	gobj = to_udl_bo(obj);
++
++	ret = udl_gem_get_pages(gobj, GFP_KERNEL);
++	if (ret)
++		return ret;
++	if (!gobj->base.map_list.map) {
++		ret = drm_gem_create_mmap_offset(obj);
++		if (ret)
++			goto out;
++	}
++
++	*offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
++
++out:
++	drm_gem_object_unreference(&gobj->base);
++unlock:
++	mutex_unlock(&dev->struct_mutex);
++	return ret;
++}
+diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
+new file mode 100644
+index 0000000..a8d5f09
+--- /dev/null
++++ b/drivers/gpu/drm/udl/udl_main.c
+@@ -0,0 +1,338 @@
++/*
++ * Copyright (C) 2012 Red Hat
++ *
++ * based in parts on udlfb.c:
++ * Copyright (C) 2009 Roberto De Ioris <roberto at unbit.it>
++ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml at gmail.com>
++ * Copyright (C) 2009 Bernie Thompson <bernie at plugable.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License v2. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++#include "drmP.h"
++#include "udl_drv.h"
++
++/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
++#define BULK_SIZE 512
++
++#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
++#define WRITES_IN_FLIGHT (4)
++#define MAX_VENDOR_DESCRIPTOR_SIZE 256
++
++#define GET_URB_TIMEOUT	HZ
++#define FREE_URB_TIMEOUT (HZ*2)
++
++static int udl_parse_vendor_descriptor(struct drm_device *dev,
++				       struct usb_device *usbdev)
++{
++	struct udl_device *udl = dev->dev_private;
++	char *desc;
++	char *buf;
++	char *desc_end;
++
++	u8 total_len = 0;
++
++	buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
++	if (!buf)
++		return false;
++	desc = buf;
++
++	total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
++				    0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
++	if (total_len > 5) {
++		DRM_INFO("vendor descriptor length:%x data:%02x %02x %02x %02x" \
++			"%02x %02x %02x %02x %02x %02x %02x\n",
++			total_len, desc[0],
++			desc[1], desc[2], desc[3], desc[4], desc[5], desc[6],
++			desc[7], desc[8], desc[9], desc[10]);
++
++		if ((desc[0] != total_len) || /* descriptor length */
++		    (desc[1] != 0x5f) ||   /* vendor descriptor type */
++		    (desc[2] != 0x01) ||   /* version (2 bytes) */
++		    (desc[3] != 0x00) ||
++		    (desc[4] != total_len - 2)) /* length after type */
++			goto unrecognized;
++
++		desc_end = desc + total_len;
++		desc += 5; /* the fixed header we've already parsed */
++
++		while (desc < desc_end) {
++			u8 length;
++			u16 key;
++
++			key = *((u16 *) desc);
++			desc += sizeof(u16);
++			length = *desc;
++			desc++;
++
++			switch (key) {
++			case 0x0200: { /* max_area */
++				u32 max_area;
++				max_area = le32_to_cpu(*((u32 *)desc));
++				DRM_DEBUG("DL chip limited to %d pixel modes\n",
++					max_area);
++				udl->sku_pixel_limit = max_area;
++				break;
++			}
++			default:
++				break;
++			}
++			desc += length;
++		}
++	}
++
++	goto success;
++
++unrecognized:
++	/* allow udlfb to load for now even if firmware unrecognized */
++	DRM_ERROR("Unrecognized vendor firmware descriptor\n");
++
++success:
++	kfree(buf);
++	return true;
++}
++
++static void udl_release_urb_work(struct work_struct *work)
++{
++	struct urb_node *unode = container_of(work, struct urb_node,
++					      release_urb_work.work);
++
++	up(&unode->dev->urbs.limit_sem);
++}
++
++void udl_urb_completion(struct urb *urb)
++{
++	struct urb_node *unode = urb->context;
++	struct udl_device *udl = unode->dev;
++	unsigned long flags;
++
++	/* sync/async unlink faults aren't errors */
++	if (urb->status) {
++		if (!(urb->status == -ENOENT ||
++		    urb->status == -ECONNRESET ||
++		    urb->status == -ESHUTDOWN)) {
++			DRM_ERROR("%s - nonzero write bulk status received: %d\n",
++				__func__, urb->status);
++			atomic_set(&udl->lost_pixels, 1);
++		}
++	}
++
++	urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
++
++	spin_lock_irqsave(&udl->urbs.lock, flags);
++	list_add_tail(&unode->entry, &udl->urbs.list);
++	udl->urbs.available++;
++	spin_unlock_irqrestore(&udl->urbs.lock, flags);
++
++#if 0
++	/*
++	 * When using fb_defio, we deadlock if up() is called
++	 * while another is waiting. So queue to another process.
++	 */
++	if (fb_defio)
++		schedule_delayed_work(&unode->release_urb_work, 0);
++	else
++#endif
++		up(&udl->urbs.limit_sem);
++}
++
++static void udl_free_urb_list(struct drm_device *dev)
++{
++	struct udl_device *udl = dev->dev_private;
++	int count = udl->urbs.count;
++	struct list_head *node;
++	struct urb_node *unode;
++	struct urb *urb;
++	int ret;
++	unsigned long flags;
++
++	DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
++
++	/* keep waiting and freeing, until we've got 'em all */
++	while (count--) {
++
++		/* Getting interrupted means a leak, but ok at shutdown*/
++		ret = down_interruptible(&udl->urbs.limit_sem);
++		if (ret)
++			break;
++
++		spin_lock_irqsave(&udl->urbs.lock, flags);
++
++		node = udl->urbs.list.next; /* have reserved one with sem */
++		list_del_init(node);
++
++		spin_unlock_irqrestore(&udl->urbs.lock, flags);
++
++		unode = list_entry(node, struct urb_node, entry);
++		urb = unode->urb;
++
++		/* Free each separately allocated piece */
++		usb_free_coherent(urb->dev, udl->urbs.size,
++				  urb->transfer_buffer, urb->transfer_dma);
++		usb_free_urb(urb);
++		kfree(node);
++	}
++	udl->urbs.count = 0;
++}
++
++static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
++{
++	struct udl_device *udl = dev->dev_private;
++	int i = 0;
++	struct urb *urb;
++	struct urb_node *unode;
++	char *buf;
++
++	spin_lock_init(&udl->urbs.lock);
++
++	udl->urbs.size = size;
++	INIT_LIST_HEAD(&udl->urbs.list);
++
++	while (i < count) {
++		unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
++		if (!unode)
++			break;
++		unode->dev = udl;
++
++		INIT_DELAYED_WORK(&unode->release_urb_work,
++			  udl_release_urb_work);
++
++		urb = usb_alloc_urb(0, GFP_KERNEL);
++		if (!urb) {
++			kfree(unode);
++			break;
++		}
++		unode->urb = urb;
++
++		buf = usb_alloc_coherent(udl->ddev->usbdev, MAX_TRANSFER, GFP_KERNEL,
++					 &urb->transfer_dma);
++		if (!buf) {
++			kfree(unode);
++			usb_free_urb(urb);
++			break;
++		}
++
++		/* urb->transfer_buffer_length set to actual before submit */
++		usb_fill_bulk_urb(urb, udl->ddev->usbdev, usb_sndbulkpipe(udl->ddev->usbdev, 1),
++			buf, size, udl_urb_completion, unode);
++		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++
++		list_add_tail(&unode->entry, &udl->urbs.list);
++
++		i++;
++	}
++
++	sema_init(&udl->urbs.limit_sem, i);
++	udl->urbs.count = i;
++	udl->urbs.available = i;
++
++	DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
++
++	return i;
++}
++
++struct urb *udl_get_urb(struct drm_device *dev)
++{
++	struct udl_device *udl = dev->dev_private;
++	int ret = 0;
++	struct list_head *entry;
++	struct urb_node *unode;
++	struct urb *urb = NULL;
++	unsigned long flags;
++
++	/* Wait for an in-flight buffer to complete and get re-queued */
++	ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
++	if (ret) {
++		atomic_set(&udl->lost_pixels, 1);
++		DRM_INFO("wait for urb interrupted: %x available: %d\n",
++		       ret, udl->urbs.available);
++		goto error;
++	}
++
++	spin_lock_irqsave(&udl->urbs.lock, flags);
++
++	BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
++	entry = udl->urbs.list.next;
++	list_del_init(entry);
++	udl->urbs.available--;
++
++	spin_unlock_irqrestore(&udl->urbs.lock, flags);
++
++	unode = list_entry(entry, struct urb_node, entry);
++	urb = unode->urb;
++
++error:
++	return urb;
++}
++
++int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
++{
++	struct udl_device *udl = dev->dev_private;
++	int ret;
++
++	BUG_ON(len > udl->urbs.size);
++
++	urb->transfer_buffer_length = len; /* set to actual payload len */
++	ret = usb_submit_urb(urb, GFP_ATOMIC);
++	if (ret) {
++		udl_urb_completion(urb); /* because no one else will */
++		atomic_set(&udl->lost_pixels, 1);
++		DRM_ERROR("usb_submit_urb error %x\n", ret);
++	}
++	return ret;
++}
++
++int udl_driver_load(struct drm_device *dev, unsigned long flags)
++{
++	struct udl_device *udl;
++	int ret;
++
++	DRM_DEBUG("\n");
++	udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
++	if (!udl)
++		return -ENOMEM;
++
++	udl->ddev = dev;
++	dev->dev_private = udl;
++
++	if (!udl_parse_vendor_descriptor(dev, dev->usbdev)) {
++		DRM_ERROR("firmware not recognized. Assume incompatible device\n");
++		goto err;
++	}
++
++	if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
++		ret = -ENOMEM;
++		DRM_ERROR("udl_alloc_urb_list failed\n");
++		goto err;
++	}
++
++	DRM_DEBUG("\n");
++	ret = udl_modeset_init(dev);
++
++	ret = udl_fbdev_init(dev);
++	return 0;
++err:
++	kfree(udl);
++	DRM_ERROR("%d\n", ret);
++	return ret;
++}
++
++int udl_drop_usb(struct drm_device *dev)
++{
++	udl_free_urb_list(dev);
++	return 0;
++}
++
++int udl_driver_unload(struct drm_device *dev)
++{
++	struct udl_device *udl = dev->dev_private;
++
++	if (udl->urbs.count)
++		udl_free_urb_list(dev);
++
++	udl_fbdev_cleanup(dev);
++	udl_modeset_cleanup(dev);
++	kfree(udl);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
+new file mode 100644
+index 0000000..b3ecb3d
+--- /dev/null
++++ b/drivers/gpu/drm/udl/udl_modeset.c
+@@ -0,0 +1,414 @@
++/*
++ * Copyright (C) 2012 Red Hat
++ *
++ * based in parts on udlfb.c:
++ * Copyright (C) 2009 Roberto De Ioris <roberto at unbit.it>
++ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml at gmail.com>
++ * Copyright (C) 2009 Bernie Thompson <bernie at plugable.com>
++
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License v2. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++#include "drmP.h"
++#include "drm_crtc.h"
++#include "drm_crtc_helper.h"
++#include "udl_drv.h"
++
++/*
++ * All DisplayLink bulk operations start with 0xAF, followed by specific code
++ * All operations are written to buffers which then later get sent to device
++ */
++static char *udl_set_register(char *buf, u8 reg, u8 val)
++{
++	*buf++ = 0xAF;
++	*buf++ = 0x20;
++	*buf++ = reg;
++	*buf++ = val;
++	return buf;
++}
++
++static char *udl_vidreg_lock(char *buf)
++{
++	return udl_set_register(buf, 0xFF, 0x00);
++}
++
++static char *udl_vidreg_unlock(char *buf)
++{
++	return udl_set_register(buf, 0xFF, 0xFF);
++}
++
++/*
++ * On/Off for driving the DisplayLink framebuffer to the display
++ *  0x00 H and V sync on
++ *  0x01 H and V sync off (screen blank but powered)
++ *  0x07 DPMS powerdown (requires modeset to come back)
++ */
++static char *udl_enable_hvsync(char *buf, bool enable)
++{
++	if (enable)
++		return udl_set_register(buf, 0x1F, 0x00);
++	else
++		return udl_set_register(buf, 0x1F, 0x07);
++}
++
++static char *udl_set_color_depth(char *buf, u8 selection)
++{
++	return udl_set_register(buf, 0x00, selection);
++}
++
++static char *udl_set_base16bpp(char *wrptr, u32 base)
++{
++	/* the base pointer is 16 bits wide, 0x20 is hi byte. */
++	wrptr = udl_set_register(wrptr, 0x20, base >> 16);
++	wrptr = udl_set_register(wrptr, 0x21, base >> 8);
++	return udl_set_register(wrptr, 0x22, base);
++}
++
++/*
++ * DisplayLink HW has separate 16bpp and 8bpp framebuffers.
++ * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
++ */
++static char *udl_set_base8bpp(char *wrptr, u32 base)
++{
++	wrptr = udl_set_register(wrptr, 0x26, base >> 16);
++	wrptr = udl_set_register(wrptr, 0x27, base >> 8);
++	return udl_set_register(wrptr, 0x28, base);
++}
++
++static char *udl_set_register_16(char *wrptr, u8 reg, u16 value)
++{
++	wrptr = udl_set_register(wrptr, reg, value >> 8);
++	return udl_set_register(wrptr, reg+1, value);
++}
++
++/*
++ * This is kind of weird because the controller takes some
++ * register values in a different byte order than other registers.
++ */
++static char *udl_set_register_16be(char *wrptr, u8 reg, u16 value)
++{
++	wrptr = udl_set_register(wrptr, reg, value);
++	return udl_set_register(wrptr, reg+1, value >> 8);
++}
++
++/*
++ * LFSR is linear feedback shift register. The reason we have this is
++ * because the display controller needs to minimize the clock depth of
++ * various counters used in the display path. So this code reverses the
++ * provided value into the lfsr16 value by counting backwards to get
++ * the value that needs to be set in the hardware comparator to get the
++ * same actual count. This makes sense once you read above a couple of
++ * times and think about it from a hardware perspective.
++ */
++static u16 udl_lfsr16(u16 actual_count)
++{
++	u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
++
++	while (actual_count--) {
++		lv =	 ((lv << 1) |
++			(((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
++			& 0xFFFF;
++	}
++
++	return (u16) lv;
++}
++
++/*
++ * This does LFSR conversion on the value that is to be written.
++ * See LFSR explanation above for more detail.
++ */
++static char *udl_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
++{
++	return udl_set_register_16(wrptr, reg, udl_lfsr16(value));
++}
++
++/*
++ * This takes a standard fbdev screeninfo struct and all of its monitor mode
++ * details and converts them into the DisplayLink equivalent register commands.
++  ERR(vreg(dev,               0x00, (color_depth == 16) ? 0 : 1));
++  ERR(vreg_lfsr16(dev,        0x01, xDisplayStart));
++  ERR(vreg_lfsr16(dev,        0x03, xDisplayEnd));
++  ERR(vreg_lfsr16(dev,        0x05, yDisplayStart));
++  ERR(vreg_lfsr16(dev,        0x07, yDisplayEnd));
++  ERR(vreg_lfsr16(dev,        0x09, xEndCount));
++  ERR(vreg_lfsr16(dev,        0x0B, hSyncStart));
++  ERR(vreg_lfsr16(dev,        0x0D, hSyncEnd));
++  ERR(vreg_big_endian(dev,    0x0F, hPixels));
++  ERR(vreg_lfsr16(dev,        0x11, yEndCount));
++  ERR(vreg_lfsr16(dev,        0x13, vSyncStart));
++  ERR(vreg_lfsr16(dev,        0x15, vSyncEnd));
++  ERR(vreg_big_endian(dev,    0x17, vPixels));
++  ERR(vreg_little_endian(dev, 0x1B, pixelClock5KHz));
++
++  ERR(vreg(dev,               0x1F, 0));
++
++  ERR(vbuf(dev, WRITE_VIDREG_UNLOCK, DSIZEOF(WRITE_VIDREG_UNLOCK)));
++ */
++static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode)
++{
++	u16 xds, yds;
++	u16 xde, yde;
++	u16 yec;
++
++	/* x display start */
++	xds = mode->crtc_htotal - mode->crtc_hsync_start;
++	wrptr = udl_set_register_lfsr16(wrptr, 0x01, xds);
++	/* x display end */
++	xde = xds + mode->crtc_hdisplay;
++	wrptr = udl_set_register_lfsr16(wrptr, 0x03, xde);
++
++	/* y display start */
++	yds = mode->crtc_vtotal - mode->crtc_vsync_start;
++	wrptr = udl_set_register_lfsr16(wrptr, 0x05, yds);
++	/* y display end */
++	yde = yds + mode->crtc_vdisplay;
++	wrptr = udl_set_register_lfsr16(wrptr, 0x07, yde);
++
++	/* x end count is active + blanking - 1 */
++	wrptr = udl_set_register_lfsr16(wrptr, 0x09,
++					mode->crtc_htotal - 1);
++
++	/* libdlo hardcodes hsync start to 1 */
++	wrptr = udl_set_register_lfsr16(wrptr, 0x0B, 1);
++
++	/* hsync end is width of sync pulse + 1 */
++	wrptr = udl_set_register_lfsr16(wrptr, 0x0D,
++					mode->crtc_hsync_end - mode->crtc_hsync_start + 1);
++
++	/* hpixels is active pixels */
++	wrptr = udl_set_register_16(wrptr, 0x0F, mode->hdisplay);
++
++	/* yendcount is vertical active + vertical blanking */
++	yec = mode->crtc_vtotal;
++	wrptr = udl_set_register_lfsr16(wrptr, 0x11, yec);
++
++	/* libdlo hardcodes vsync start to 0 */
++	wrptr = udl_set_register_lfsr16(wrptr, 0x13, 0);
++
++	/* vsync end is width of vsync pulse */
++	wrptr = udl_set_register_lfsr16(wrptr, 0x15, mode->crtc_vsync_end - mode->crtc_vsync_start);
++
++	/* vpixels is active pixels */
++	wrptr = udl_set_register_16(wrptr, 0x17, mode->crtc_vdisplay);
++
++	wrptr = udl_set_register_16be(wrptr, 0x1B,
++				      mode->clock / 5);
++
++	return wrptr;
++}
++
++static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct udl_device *udl = dev->dev_private;
++	struct urb *urb;
++	char *buf;
++	int retval;
++
++	urb = udl_get_urb(dev);
++	if (!urb)
++		return -ENOMEM;
++
++	buf = (char *)urb->transfer_buffer;
++
++	memcpy(buf, udl->mode_buf, udl->mode_buf_len);
++	retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
++	DRM_INFO("write mode info %d\n", udl->mode_buf_len);
++	return retval;
++}
++
++
++static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++	struct drm_device *dev = crtc->dev;
++	struct udl_device *udl = dev->dev_private;
++	int retval;
++
++	if (mode == DRM_MODE_DPMS_OFF) {
++		char *buf;
++		struct urb *urb;
++		urb = udl_get_urb(dev);
++		if (!urb)
++			return;
++
++		buf = (char *)urb->transfer_buffer;
++		buf = udl_vidreg_lock(buf);
++		buf = udl_enable_hvsync(buf, false);
++		buf = udl_vidreg_unlock(buf);
++
++		retval = udl_submit_urb(dev, urb, buf - (char *)
++					urb->transfer_buffer);
++	} else {
++		if (udl->mode_buf_len == 0) {
++			DRM_ERROR("Trying to enable DPMS with no mode\n");
++			return;
++		}
++		udl_crtc_write_mode_to_hw(crtc);
++	}
++
++}
++
++static bool udl_crtc_mode_fixup(struct drm_crtc *crtc,
++				  struct drm_display_mode *mode,
++				  struct drm_display_mode *adjusted_mode)
++
++{
++	return true;
++}
++
++#if 0
++static int
++udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
++			   int x, int y, enum mode_set_atomic state)
++{
++	return 0;
++}
++
++static int
++udl_pipe_set_base(struct drm_crtc *crtc, int x, int y,
++		    struct drm_framebuffer *old_fb)
++{
++	return 0;
++}
++#endif
++
++static int udl_crtc_mode_set(struct drm_crtc *crtc,
++			       struct drm_display_mode *mode,
++			       struct drm_display_mode *adjusted_mode,
++			       int x, int y,
++			       struct drm_framebuffer *old_fb)
++
++{
++	struct drm_device *dev = crtc->dev;
++	struct udl_framebuffer *ufb = to_udl_fb(crtc->fb);
++	struct udl_device *udl = dev->dev_private;
++	char *buf;
++	char *wrptr;
++	int color_depth = 0;
++
++	buf = (char *)udl->mode_buf;
++
++	/* for now we just clip 24 -> 16 - if we fix that fix this */
++	/*if  (crtc->fb->bits_per_pixel != 16)
++	  color_depth = 1; */
++
++	/* This first section has to do with setting the base address on the
++	* controller * associated with the display. There are 2 base
++	* pointers, currently, we only * use the 16 bpp segment.
++	*/
++	wrptr = udl_vidreg_lock(buf);
++	wrptr = udl_set_color_depth(wrptr, color_depth);
++	/* set base for 16bpp segment to 0 */
++	wrptr = udl_set_base16bpp(wrptr, 0);
++	/* set base for 8bpp segment to end of fb */
++	wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
++
++	wrptr = udl_set_vid_cmds(wrptr, adjusted_mode);
++	wrptr = udl_enable_hvsync(wrptr, true);
++	wrptr = udl_vidreg_unlock(wrptr);
++
++	ufb->active_16 = true;
++	if (old_fb) {
++		struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
++		uold_fb->active_16 = false;
++	}
++	udl->mode_buf_len = wrptr - buf;
++
++	/* damage all of it */
++	udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
++	return 0;
++}
++
++
++static void udl_crtc_disable(struct drm_crtc *crtc)
++{
++
++
++}
++
++static void udl_crtc_destroy(struct drm_crtc *crtc)
++{
++	drm_crtc_cleanup(crtc);
++	kfree(crtc);
++}
++
++static void udl_load_lut(struct drm_crtc *crtc)
++{
++}
++
++static void udl_crtc_prepare(struct drm_crtc *crtc)
++{
++}
++
++static void udl_crtc_commit(struct drm_crtc *crtc)
++{
++	udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
++}
++
++static struct drm_crtc_helper_funcs udl_helper_funcs = {
++	.dpms = udl_crtc_dpms,
++	.mode_fixup = udl_crtc_mode_fixup,
++	.mode_set = udl_crtc_mode_set,
++	.prepare = udl_crtc_prepare,
++	.commit = udl_crtc_commit,
++	.disable = udl_crtc_disable,
++	.load_lut = udl_load_lut,
++};
++
++static const struct drm_crtc_funcs udl_crtc_funcs = {
++	.set_config = drm_crtc_helper_set_config,
++	.destroy = udl_crtc_destroy,
++};
++
++int udl_crtc_init(struct drm_device *dev)
++{
++	struct drm_crtc *crtc;
++
++	crtc = kzalloc(sizeof(struct drm_crtc) + sizeof(struct drm_connector *), GFP_KERNEL);
++	if (crtc == NULL)
++		return -ENOMEM;
++
++	drm_crtc_init(dev, crtc, &udl_crtc_funcs);
++	drm_crtc_helper_add(crtc, &udl_helper_funcs);
++
++	return 0;
++}
++
++static const struct drm_mode_config_funcs udl_mode_funcs = {
++	.fb_create = udl_fb_user_fb_create,
++	.output_poll_changed = NULL,
++};
++
++int udl_modeset_init(struct drm_device *dev)
++{
++	struct drm_encoder *encoder;
++	drm_mode_config_init(dev);
++
++	dev->mode_config.min_width = 640;
++	dev->mode_config.min_height = 480;
++
++	dev->mode_config.max_width = 2048;
++	dev->mode_config.max_height = 2048;
++
++	dev->mode_config.prefer_shadow = 0;
++	dev->mode_config.preferred_depth = 24;
++
++	dev->mode_config.funcs = (void *)&udl_mode_funcs;
++
++	drm_mode_create_dirty_info_property(dev);
++
++	udl_crtc_init(dev);
++
++	encoder = udl_encoder_init(dev);
++
++	udl_connector_init(dev, encoder);
++
++	return 0;
++}
++
++void udl_modeset_cleanup(struct drm_device *dev)
++{
++	drm_mode_config_cleanup(dev);
++}
+diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
+new file mode 100644
+index 0000000..fc11344
+--- /dev/null
++++ b/drivers/gpu/drm/udl/udl_transfer.c
+@@ -0,0 +1,254 @@
++/*
++ * Copyright (C) 2012 Red Hat
++ * based in parts on udlfb.c:
++ * Copyright (C) 2009 Roberto De Ioris <roberto at unbit.it>
++ * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml at gmail.com>
++ * Copyright (C) 2009 Bernie Thompson <bernie at plugable.com>
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License v2. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/fb.h>
++#include <linux/prefetch.h>
++
++#include "drmP.h"
++#include "udl_drv.h"
++
++#define MAX_CMD_PIXELS		255
++
++#define RLX_HEADER_BYTES	7
++#define MIN_RLX_PIX_BYTES       4
++#define MIN_RLX_CMD_BYTES	(RLX_HEADER_BYTES + MIN_RLX_PIX_BYTES)
++
++#define RLE_HEADER_BYTES	6
++#define MIN_RLE_PIX_BYTES	3
++#define MIN_RLE_CMD_BYTES	(RLE_HEADER_BYTES + MIN_RLE_PIX_BYTES)
++
++#define RAW_HEADER_BYTES	6
++#define MIN_RAW_PIX_BYTES	2
++#define MIN_RAW_CMD_BYTES	(RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
++
++/*
++ * Trims identical data from front and back of line
++ * Sets new front buffer address and width
++ * And returns byte count of identical pixels
++ * Assumes CPU natural alignment (unsigned long)
++ * for back and front buffer ptrs and width
++ */
++#if 0
++static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
++{
++	int j, k;
++	const unsigned long *back = (const unsigned long *) bback;
++	const unsigned long *front = (const unsigned long *) *bfront;
++	const int width = *width_bytes / sizeof(unsigned long);
++	int identical = width;
++	int start = width;
++	int end = width;
++
++	prefetch((void *) front);
++	prefetch((void *) back);
++
++	for (j = 0; j < width; j++) {
++		if (back[j] != front[j]) {
++			start = j;
++			break;
++		}
++	}
++
++	for (k = width - 1; k > j; k--) {
++		if (back[k] != front[k]) {
++			end = k+1;
++			break;
++		}
++	}
++
++	identical = start + (width - end);
++	*bfront = (u8 *) &front[start];
++	*width_bytes = (end - start) * sizeof(unsigned long);
++
++	return identical * sizeof(unsigned long);
++}
++#endif
++
++static inline u16 pixel32_to_be16p(const uint8_t *pixel)
++{
++	uint32_t pix = *(uint32_t *)pixel;
++	u16 retval;
++
++	retval =  (((pix >> 3) & 0x001f) |
++		   ((pix >> 5) & 0x07e0) |
++		   ((pix >> 8) & 0xf800));
++	return retval;
++}
++
++/*
++ * Render a command stream for an encoded horizontal line segment of pixels.
++ *
++ * A command buffer holds several commands.
++ * It always begins with a fresh command header
++ * (the protocol doesn't require this, but we enforce it to allow
++ * multiple buffers to be potentially encoded and sent in parallel).
++ * A single command encodes one contiguous horizontal line of pixels
++ *
++ * The function relies on the client to do all allocation, so that
++ * rendering can be done directly to output buffers (e.g. USB URBs).
++ * The function fills the supplied command buffer, providing information
++ * on where it left off, so the client may call in again with additional
++ * buffers if the line will take several buffers to complete.
++ *
++ * A single command can transmit a maximum of 256 pixels,
++ * regardless of the compression ratio (protocol design limit).
++ * To the hardware, 0 for a size byte means 256
++ *
++ * Rather than 256 pixel commands which are either rl or raw encoded,
++ * the rlx command simply assumes alternating raw and rl spans within one cmd.
++ * This has a slightly larger header overhead, but produces more even results.
++ * It also processes all data (read and write) in a single pass.
++ * Performance benchmarks of common cases show it having just slightly better
++ * compression than 256 pixel raw or rle commands, with similar CPU consumpion.
++ * But for very rl friendly data, will compress not quite as well.
++ */
++static void udl_compress_hline16(
++	const u8 **pixel_start_ptr,
++	const u8 *const pixel_end,
++	uint32_t *device_address_ptr,
++	uint8_t **command_buffer_ptr,
++	const uint8_t *const cmd_buffer_end, int bpp)
++{
++	const u8 *pixel = *pixel_start_ptr;
++	uint32_t dev_addr  = *device_address_ptr;
++	uint8_t *cmd = *command_buffer_ptr;
++
++	while ((pixel_end > pixel) &&
++	       (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
++		uint8_t *raw_pixels_count_byte = 0;
++		uint8_t *cmd_pixels_count_byte = 0;
++		const u8 *raw_pixel_start = 0;
++		const u8 *cmd_pixel_start, *cmd_pixel_end = 0;
++
++		prefetchw((void *) cmd); /* pull in one cache line at least */
++
++		*cmd++ = 0xaf;
++		*cmd++ = 0x6b;
++		*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
++		*cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF);
++		*cmd++ = (uint8_t) ((dev_addr) & 0xFF);
++
++		cmd_pixels_count_byte = cmd++; /*  we'll know this later */
++		cmd_pixel_start = pixel;
++
++		raw_pixels_count_byte = cmd++; /*  we'll know this later */
++		raw_pixel_start = pixel;
++
++		cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
++			min((int)(pixel_end - pixel) / bpp,
++			    (int)(cmd_buffer_end - cmd) / 2))) * bpp;
++
++		prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
++
++		while (pixel < cmd_pixel_end) {
++			const u8 * const repeating_pixel = pixel;
++
++			if (bpp == 2)
++				*(uint16_t *)cmd = cpu_to_be16p((uint16_t *)pixel);
++			else if (bpp == 4)
++				*(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16p(pixel));
++
++			cmd += 2;
++			pixel += bpp;
++
++			if (unlikely((pixel < cmd_pixel_end) &&
++				     (!memcmp(pixel, repeating_pixel, bpp)))) {
++				/* go back and fill in raw pixel count */
++				*raw_pixels_count_byte = (((repeating_pixel -
++						raw_pixel_start) / bpp) + 1) & 0xFF;
++
++				while ((pixel < cmd_pixel_end)
++				       && (!memcmp(pixel, repeating_pixel, bpp))) {
++					pixel += bpp;
++				}
++
++				/* immediately after raw data is repeat byte */
++				*cmd++ = (((pixel - repeating_pixel) / bpp) - 1) & 0xFF;
++
++				/* Then start another raw pixel span */
++				raw_pixel_start = pixel;
++				raw_pixels_count_byte = cmd++;
++			}
++		}
++
++		if (pixel > raw_pixel_start) {
++			/* finalize last RAW span */
++			*raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
++		}
++
++		*cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
++		dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
++	}
++
++	if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
++		/* Fill leftover bytes with no-ops */
++		if (cmd_buffer_end > cmd)
++			memset(cmd, 0xAF, cmd_buffer_end - cmd);
++		cmd = (uint8_t *) cmd_buffer_end;
++	}
++
++	*command_buffer_ptr = cmd;
++	*pixel_start_ptr = pixel;
++	*device_address_ptr = dev_addr;
++
++	return;
++}
++
++/*
++ * There are 3 copies of every pixel: The front buffer that the fbdev
++ * client renders to, the actual framebuffer across the USB bus in hardware
++ * (that we can only write to, slowly, and can never read), and (optionally)
++ * our shadow copy that tracks what's been sent to that hardware buffer.
++ */
++int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
++		     const char *front, char **urb_buf_ptr,
++		     u32 byte_offset, u32 device_byte_offset,
++		     u32 byte_width,
++		     int *ident_ptr, int *sent_ptr)
++{
++	const u8 *line_start, *line_end, *next_pixel;
++	u32 base16 = 0 + (device_byte_offset / bpp) * 2;
++	struct urb *urb = *urb_ptr;
++	u8 *cmd = *urb_buf_ptr;
++	u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
++
++	line_start = (u8 *) (front + byte_offset);
++	next_pixel = line_start;
++	line_end = next_pixel + byte_width;
++
++	while (next_pixel < line_end) {
++
++		udl_compress_hline16(&next_pixel,
++			     line_end, &base16,
++			     (u8 **) &cmd, (u8 *) cmd_end, bpp);
++
++		if (cmd >= cmd_end) {
++			int len = cmd - (u8 *) urb->transfer_buffer;
++			if (udl_submit_urb(dev, urb, len))
++				return 1; /* lost pixels is set */
++			*sent_ptr += len;
++			urb = udl_get_urb(dev);
++			if (!urb)
++				return 1; /* lost_pixels is set */
++			*urb_ptr = urb;
++			cmd = urb->transfer_buffer;
++			cmd_end = &cmd[urb->transfer_buffer_length];
++		}
++	}
++
++	*urb_buf_ptr = cmd;
++
++	return 0;
++}
++
+diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
+index a83e86d..02661f3 100644
+--- a/drivers/gpu/drm/via/via_drv.c
++++ b/drivers/gpu/drm/via/via_drv.c
+@@ -30,16 +30,52 @@
+ 
+ #include "drm_pciids.h"
+ 
++static int via_driver_open(struct drm_device *dev, struct drm_file *file)
++{
++	struct via_file_private *file_priv;
++
++	DRM_DEBUG_DRIVER("\n");
++	file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
++	if (!file_priv)
++		return -ENOMEM;
++
++	file->driver_priv = file_priv;
++
++	INIT_LIST_HEAD(&file_priv->obj_list);
++
++	return 0;
++}
++
++void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
++{
++	struct via_file_private *file_priv = file->driver_priv;
++
++	kfree(file_priv);
++}
++
+ static struct pci_device_id pciidlist[] = {
+ 	viadrv_PCI_IDS
+ };
+ 
++static const struct file_operations via_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = drm_ioctl,
++	.mmap = drm_mmap,
++	.poll = drm_poll,
++	.fasync = drm_fasync,
++	.llseek = noop_llseek,
++};
++
+ static struct drm_driver driver = {
+ 	.driver_features =
+ 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
+ 	    DRIVER_IRQ_SHARED,
+ 	.load = via_driver_load,
+ 	.unload = via_driver_unload,
++	.open = via_driver_open,
++	.postclose = via_driver_postclose,
+ 	.context_dtor = via_final_context,
+ 	.get_vblank_counter = via_get_vblank_counter,
+ 	.enable_vblank = via_enable_vblank,
+@@ -54,17 +90,7 @@ static struct drm_driver driver = {
+ 	.reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
+ 	.lastclose = via_lastclose,
+ 	.ioctls = via_ioctls,
+-	.fops = {
+-		.owner = THIS_MODULE,
+-		.open = drm_open,
+-		.release = drm_release,
+-		.unlocked_ioctl = drm_ioctl,
+-		.mmap = drm_mmap,
+-		.poll = drm_poll,
+-		.fasync = drm_fasync,
+-		.llseek = noop_llseek,
+-		},
+-
++	.fops = &via_driver_fops,
+ 	.name = DRIVER_NAME,
+ 	.desc = DRIVER_DESC,
+ 	.date = DRIVER_DATE,
+diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
+index 9cf87d9..88edacc 100644
+--- a/drivers/gpu/drm/via/via_drv.h
++++ b/drivers/gpu/drm/via/via_drv.h
+@@ -24,7 +24,7 @@
+ #ifndef _VIA_DRV_H_
+ #define _VIA_DRV_H_
+ 
+-#include "drm_sman.h"
++#include "drm_mm.h"
+ #define DRIVER_AUTHOR	"Various"
+ 
+ #define DRIVER_NAME		"via"
+@@ -88,9 +88,12 @@ typedef struct drm_via_private {
+ 	uint32_t irq_pending_mask;
+ 	int *irq_map;
+ 	unsigned int idle_fault;
+-	struct drm_sman sman;
+ 	int vram_initialized;
++	struct drm_mm vram_mm;
+ 	int agp_initialized;
++	struct drm_mm agp_mm;
++	/** Mapping of userspace keys to mm objects */
++	struct idr object_idr;
+ 	unsigned long vram_offset;
+ 	unsigned long agp_offset;
+ 	drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
+diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
+index 6cca9a7..c126182 100644
+--- a/drivers/gpu/drm/via/via_map.c
++++ b/drivers/gpu/drm/via/via_map.c
+@@ -100,19 +100,15 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
+ 	if (dev_priv == NULL)
+ 		return -ENOMEM;
+ 
++	idr_init(&dev_priv->object_idr);
+ 	dev->dev_private = (void *)dev_priv;
+ 
+ 	dev_priv->chipset = chipset;
+ 
+-	ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
+-	if (ret) {
+-		kfree(dev_priv);
+-		return ret;
+-	}
++	pci_set_master(dev->pdev);
+ 
+ 	ret = drm_vblank_init(dev, 1);
+ 	if (ret) {
+-		drm_sman_takedown(&dev_priv->sman);
+ 		kfree(dev_priv);
+ 		return ret;
+ 	}
+@@ -124,7 +120,8 @@ int via_driver_unload(struct drm_device *dev)
+ {
+ 	drm_via_private_t *dev_priv = dev->dev_private;
+ 
+-	drm_sman_takedown(&dev_priv->sman);
++	idr_remove_all(&dev_priv->object_idr);
++	idr_destroy(&dev_priv->object_idr);
+ 
+ 	kfree(dev_priv);
+ 
+diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
+index 6cc2dad..a3574d0 100644
+--- a/drivers/gpu/drm/via/via_mm.c
++++ b/drivers/gpu/drm/via/via_mm.c
+@@ -28,26 +28,22 @@
+ #include "drmP.h"
+ #include "via_drm.h"
+ #include "via_drv.h"
+-#include "drm_sman.h"
+ 
+ #define VIA_MM_ALIGN_SHIFT 4
+ #define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
+ 
++struct via_memblock {
++	struct drm_mm_node mm_node;
++	struct list_head owner_list;
++};
++
+ int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ {
+ 	drm_via_agp_t *agp = data;
+ 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+-	int ret;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
+-				 agp->size >> VIA_MM_ALIGN_SHIFT);
+-
+-	if (ret) {
+-		DRM_ERROR("AGP memory manager initialisation error\n");
+-		mutex_unlock(&dev->struct_mutex);
+-		return ret;
+-	}
++	drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
+ 
+ 	dev_priv->agp_initialized = 1;
+ 	dev_priv->agp_offset = agp->offset;
+@@ -61,17 +57,9 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ {
+ 	drm_via_fb_t *fb = data;
+ 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+-	int ret;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
+-				 fb->size >> VIA_MM_ALIGN_SHIFT);
+-
+-	if (ret) {
+-		DRM_ERROR("VRAM memory manager initialisation error\n");
+-		mutex_unlock(&dev->struct_mutex);
+-		return ret;
+-	}
++	drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
+ 
+ 	dev_priv->vram_initialized = 1;
+ 	dev_priv->vram_offset = fb->offset;
+@@ -108,19 +96,25 @@ void via_lastclose(struct drm_device *dev)
+ 		return;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	drm_sman_cleanup(&dev_priv->sman);
+-	dev_priv->vram_initialized = 0;
+-	dev_priv->agp_initialized = 0;
++	if (dev_priv->vram_initialized) {
++		drm_mm_takedown(&dev_priv->vram_mm);
++		dev_priv->vram_initialized = 0;
++	}
++	if (dev_priv->agp_initialized) {
++		drm_mm_takedown(&dev_priv->agp_mm);
++		dev_priv->agp_initialized = 0;
++	}
+ 	mutex_unlock(&dev->struct_mutex);
+ }
+ 
+ int via_mem_alloc(struct drm_device *dev, void *data,
+-		  struct drm_file *file_priv)
++		  struct drm_file *file)
+ {
+ 	drm_via_mem_t *mem = data;
+-	int retval = 0;
+-	struct drm_memblock_item *item;
++	int retval = 0, user_key;
++	struct via_memblock *item;
+ 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
++	struct via_file_private *file_priv = file->driver_priv;
+ 	unsigned long tmpSize;
+ 
+ 	if (mem->type > VIA_MEM_AGP) {
+@@ -136,24 +130,57 @@ int via_mem_alloc(struct drm_device *dev, void *data,
+ 		return -EINVAL;
+ 	}
+ 
++	item = kzalloc(sizeof(*item), GFP_KERNEL);
++	if (!item) {
++		retval = -ENOMEM;
++		goto fail_alloc;
++	}
++
+ 	tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
+-	item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
+-			      (unsigned long)file_priv);
+-	mutex_unlock(&dev->struct_mutex);
+-	if (item) {
+-		mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
+-			      dev_priv->vram_offset : dev_priv->agp_offset) +
+-		    (item->mm->
+-		     offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
+-		mem->index = item->user_hash.key;
+-	} else {
+-		mem->offset = 0;
+-		mem->size = 0;
+-		mem->index = 0;
+-		DRM_DEBUG("Video memory allocation failed\n");
++	if (mem->type == VIA_MEM_AGP)
++		retval = drm_mm_insert_node(&dev_priv->agp_mm,
++					    &item->mm_node,
++					    tmpSize, 0);
++	else
++		retval = drm_mm_insert_node(&dev_priv->vram_mm,
++					    &item->mm_node,
++					    tmpSize, 0);
++	if (retval)
++		goto fail_alloc;
++
++again:
++	if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
+ 		retval = -ENOMEM;
++		goto fail_idr;
+ 	}
+ 
++	retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
++	if (retval == -EAGAIN)
++		goto again;
++	if (retval)
++		goto fail_idr;
++
++	list_add(&item->owner_list, &file_priv->obj_list);
++	mutex_unlock(&dev->struct_mutex);
++
++	mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
++		      dev_priv->vram_offset : dev_priv->agp_offset) +
++	    ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
++	mem->index = user_key;
++
++	return 0;
++
++fail_idr:
++	drm_mm_remove_node(&item->mm_node);
++fail_alloc:
++	kfree(item);
++	mutex_unlock(&dev->struct_mutex);
++
++	mem->offset = 0;
++	mem->size = 0;
++	mem->index = 0;
++	DRM_DEBUG("Video memory allocation failed\n");
++
+ 	return retval;
+ }
+ 
+@@ -161,24 +188,35 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+ {
+ 	drm_via_private_t *dev_priv = dev->dev_private;
+ 	drm_via_mem_t *mem = data;
+-	int ret;
++	struct via_memblock *obj;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	ret = drm_sman_free_key(&dev_priv->sman, mem->index);
++	obj = idr_find(&dev_priv->object_idr, mem->index);
++	if (obj == NULL) {
++		mutex_unlock(&dev->struct_mutex);
++		return -EINVAL;
++	}
++
++	idr_remove(&dev_priv->object_idr, mem->index);
++	list_del(&obj->owner_list);
++	drm_mm_remove_node(&obj->mm_node);
++	kfree(obj);
+ 	mutex_unlock(&dev->struct_mutex);
++
+ 	DRM_DEBUG("free = 0x%lx\n", mem->index);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ 
+ void via_reclaim_buffers_locked(struct drm_device *dev,
+-				struct drm_file *file_priv)
++				struct drm_file *file)
+ {
+-	drm_via_private_t *dev_priv = dev->dev_private;
++	struct via_file_private *file_priv = file->driver_priv;
++	struct via_memblock *entry, *next;
+ 
+ 	mutex_lock(&dev->struct_mutex);
+-	if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
++	if (list_empty(&file_priv->obj_list)) {
+ 		mutex_unlock(&dev->struct_mutex);
+ 		return;
+ 	}
+@@ -186,7 +224,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
+ 	if (dev->driver->dma_quiescent)
+ 		dev->driver->dma_quiescent(dev);
+ 
+-	drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
++	list_for_each_entry_safe(entry, next, &file_priv->obj_list,
++				 owner_list) {
++		list_del(&entry->owner_list);
++		drm_mm_remove_node(&entry->mm_node);
++		kfree(entry);
++	}
+ 	mutex_unlock(&dev->struct_mutex);
+ 	return;
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+index 5a72ed9..1e2c0fb 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+@@ -28,6 +28,7 @@
+ #include "vmwgfx_drv.h"
+ #include "ttm/ttm_bo_driver.h"
+ #include "ttm/ttm_placement.h"
++#include "ttm/ttm_page_alloc.h"
+ 
+ static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
+ 	TTM_PL_FLAG_CACHED;
+@@ -139,85 +140,63 @@ struct ttm_placement vmw_srf_placement = {
+ 	.busy_placement = gmr_vram_placement_flags
+ };
+ 
+-struct vmw_ttm_backend {
+-	struct ttm_backend backend;
+-	struct page **pages;
+-	unsigned long num_pages;
++struct vmw_ttm_tt {
++	struct ttm_tt ttm;
+ 	struct vmw_private *dev_priv;
+ 	int gmr_id;
+ };
+ 
+-static int vmw_ttm_populate(struct ttm_backend *backend,
+-			    unsigned long num_pages, struct page **pages,
+-			    struct page *dummy_read_page,
+-			    dma_addr_t *dma_addrs)
++static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+ {
+-	struct vmw_ttm_backend *vmw_be =
+-	    container_of(backend, struct vmw_ttm_backend, backend);
+-
+-	vmw_be->pages = pages;
+-	vmw_be->num_pages = num_pages;
+-
+-	return 0;
+-}
+-
+-static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
+-{
+-	struct vmw_ttm_backend *vmw_be =
+-	    container_of(backend, struct vmw_ttm_backend, backend);
++	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ 
+ 	vmw_be->gmr_id = bo_mem->start;
+ 
+-	return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
+-			    vmw_be->num_pages, vmw_be->gmr_id);
++	return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
++			    ttm->num_pages, vmw_be->gmr_id);
+ }
+ 
+-static int vmw_ttm_unbind(struct ttm_backend *backend)
++static int vmw_ttm_unbind(struct ttm_tt *ttm)
+ {
+-	struct vmw_ttm_backend *vmw_be =
+-	    container_of(backend, struct vmw_ttm_backend, backend);
++	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ 
+ 	vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+ 	return 0;
+ }
+ 
+-static void vmw_ttm_clear(struct ttm_backend *backend)
++static void vmw_ttm_destroy(struct ttm_tt *ttm)
+ {
+-	struct vmw_ttm_backend *vmw_be =
+-		container_of(backend, struct vmw_ttm_backend, backend);
+-
+-	vmw_be->pages = NULL;
+-	vmw_be->num_pages = 0;
+-}
+-
+-static void vmw_ttm_destroy(struct ttm_backend *backend)
+-{
+-	struct vmw_ttm_backend *vmw_be =
+-	    container_of(backend, struct vmw_ttm_backend, backend);
++	struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ 
++	ttm_tt_fini(ttm);
+ 	kfree(vmw_be);
+ }
+ 
+ static struct ttm_backend_func vmw_ttm_func = {
+-	.populate = vmw_ttm_populate,
+-	.clear = vmw_ttm_clear,
+ 	.bind = vmw_ttm_bind,
+ 	.unbind = vmw_ttm_unbind,
+ 	.destroy = vmw_ttm_destroy,
+ };
+ 
+-struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
++struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
++				 unsigned long size, uint32_t page_flags,
++				 struct page *dummy_read_page)
+ {
+-	struct vmw_ttm_backend *vmw_be;
++	struct vmw_ttm_tt *vmw_be;
+ 
+ 	vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
+ 	if (!vmw_be)
+ 		return NULL;
+ 
+-	vmw_be->backend.func = &vmw_ttm_func;
++	vmw_be->ttm.func = &vmw_ttm_func;
+ 	vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
+ 
+-	return &vmw_be->backend;
++	if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
++		kfree(vmw_be);
++		return NULL;
++	}
++
++	return &vmw_be->ttm;
+ }
+ 
+ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+@@ -357,7 +336,9 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
+ }
+ 
+ struct ttm_bo_driver vmw_bo_driver = {
+-	.create_ttm_backend_entry = vmw_ttm_backend_init,
++	.ttm_tt_create = &vmw_ttm_tt_create,
++	.ttm_tt_populate = &ttm_pool_populate,
++	.ttm_tt_unpopulate = &ttm_pool_unpopulate,
+ 	.invalidate_caches = vmw_invalidate_caches,
+ 	.init_mem_type = vmw_init_mem_type,
+ 	.evict_flags = vmw_evict_flags,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index b639536..db50604 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -38,6 +38,10 @@
+ #define VMWGFX_CHIP_SVGAII 0
+ #define VMW_FB_RESERVATION 0
+ 
++#define VMW_MIN_INITIAL_WIDTH 800
++#define VMW_MIN_INITIAL_HEIGHT 600
++
++
+ /**
+  * Fully encoded drm commands. Might move to vmw_drm.h
+  */
+@@ -388,6 +392,41 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
+ 	BUG_ON(n3d < 0);
+ }
+ 
++/**
++ * Sets the initial_[width|height] fields on the given vmw_private.
++ *
++ * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
++ * clamping the value to fb_max_[width|height] fields and the
++ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
++ * If the values appear to be invalid, set them to
++ * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
++ */
++static void vmw_get_initial_size(struct vmw_private *dev_priv)
++{
++	uint32_t width;
++	uint32_t height;
++
++	width = vmw_read(dev_priv, SVGA_REG_WIDTH);
++	height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
++
++	width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
++	height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
++
++	if (width > dev_priv->fb_max_width ||
++	    height > dev_priv->fb_max_height) {
++
++		/*
++		 * This is a host error and shouldn't occur.
++		 */
++
++		width = VMW_MIN_INITIAL_WIDTH;
++		height = VMW_MIN_INITIAL_HEIGHT;
++	}
++
++	dev_priv->initial_width = width;
++	dev_priv->initial_height = height;
++}
++
+ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ {
+ 	struct vmw_private *dev_priv;
+@@ -401,6 +440,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ 	}
+ 	memset(dev_priv, 0, sizeof(*dev_priv));
+ 
++	pci_set_master(dev->pdev);
++
+ 	dev_priv->dev = dev;
+ 	dev_priv->vmw_chipset = chipset;
+ 	dev_priv->last_read_seqno = (uint32_t) -100;
+@@ -431,7 +472,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ 	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
+ 	if (svga_id != SVGA_ID_2) {
+ 		ret = -ENOSYS;
+-		DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
++		DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
+ 		mutex_unlock(&dev_priv->hw_mutex);
+ 		goto out_err0;
+ 	}
+@@ -442,6 +483,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
+ 	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+ 	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
+ 	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
++
++	vmw_get_initial_size(dev_priv);
++
+ 	if (dev_priv->capabilities & SVGA_CAP_GMR) {
+ 		dev_priv->max_gmr_descriptors =
+ 			vmw_read(dev_priv,
+@@ -689,6 +733,15 @@ static int vmw_driver_unload(struct drm_device *dev)
+ 	return 0;
+ }
+ 
++static void vmw_preclose(struct drm_device *dev,
++			 struct drm_file *file_priv)
++{
++	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
++	struct vmw_private *dev_priv = vmw_priv(dev);
++
++	vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
++}
++
+ static void vmw_postclose(struct drm_device *dev,
+ 			 struct drm_file *file_priv)
+ {
+@@ -711,6 +764,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+ 	if (unlikely(vmw_fp == NULL))
+ 		return ret;
+ 
++	INIT_LIST_HEAD(&vmw_fp->fence_events);
+ 	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
+ 	if (unlikely(vmw_fp->tfile == NULL))
+ 		goto out_no_tfile;
+@@ -1070,6 +1124,21 @@ static const struct dev_pm_ops vmw_pm_ops = {
+ 	.resume = vmw_pm_resume,
+ };
+ 
++static const struct file_operations vmwgfx_driver_fops = {
++	.owner = THIS_MODULE,
++	.open = drm_open,
++	.release = drm_release,
++	.unlocked_ioctl = vmw_unlocked_ioctl,
++	.mmap = vmw_mmap,
++	.poll = vmw_fops_poll,
++	.read = vmw_fops_read,
++	.fasync = drm_fasync,
++#if defined(CONFIG_COMPAT)
++	.compat_ioctl = drm_compat_ioctl,
++#endif
++	.llseek = noop_llseek,
++};
++
+ static struct drm_driver driver = {
+ 	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+ 	DRIVER_MODESET,
+@@ -1093,26 +1162,14 @@ static struct drm_driver driver = {
+ 	.master_set = vmw_master_set,
+ 	.master_drop = vmw_master_drop,
+ 	.open = vmw_driver_open,
++	.preclose = vmw_preclose,
+ 	.postclose = vmw_postclose,
+ 
+ 	.dumb_create = vmw_dumb_create,
+ 	.dumb_map_offset = vmw_dumb_map_offset,
+ 	.dumb_destroy = vmw_dumb_destroy,
+ 
+-	.fops = {
+-		 .owner = THIS_MODULE,
+-		 .open = drm_open,
+-		 .release = drm_release,
+-		 .unlocked_ioctl = vmw_unlocked_ioctl,
+-		 .mmap = vmw_mmap,
+-		 .poll = vmw_fops_poll,
+-		 .read = vmw_fops_read,
+-		 .fasync = drm_fasync,
+-#if defined(CONFIG_COMPAT)
+-		 .compat_ioctl = drm_compat_ioctl,
+-#endif
+-		 .llseek = noop_llseek,
+-	},
++	.fops = &vmwgfx_driver_fops,
+ 	.name = VMWGFX_DRIVER_NAME,
+ 	.desc = VMWGFX_DRIVER_DESC,
+ 	.date = VMWGFX_DRIVER_DATE,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index 0e3fa7d..29c984f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -40,9 +40,9 @@
+ #include "ttm/ttm_module.h"
+ #include "vmwgfx_fence.h"
+ 
+-#define VMWGFX_DRIVER_DATE "20111025"
++#define VMWGFX_DRIVER_DATE "20120209"
+ #define VMWGFX_DRIVER_MAJOR 2
+-#define VMWGFX_DRIVER_MINOR 3
++#define VMWGFX_DRIVER_MINOR 4
+ #define VMWGFX_DRIVER_PATCHLEVEL 0
+ #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
+ #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
+@@ -62,6 +62,7 @@
+ struct vmw_fpriv {
+ 	struct drm_master *locked_master;
+ 	struct ttm_object_file *tfile;
++	struct list_head fence_events;
+ };
+ 
+ struct vmw_dma_buffer {
+@@ -202,6 +203,8 @@ struct vmw_private {
+ 	uint32_t mmio_size;
+ 	uint32_t fb_max_width;
+ 	uint32_t fb_max_height;
++	uint32_t initial_width;
++	uint32_t initial_height;
+ 	__le32 __iomem *mmio_virt;
+ 	int mmio_mtrr;
+ 	uint32_t capabilities;
+@@ -533,7 +536,8 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
+ 			       uint32_t command_size,
+ 			       uint64_t throttle_us,
+ 			       struct drm_vmw_fence_rep __user
+-			       *user_fence_rep);
++			       *user_fence_rep,
++			       struct vmw_fence_obj **out_fence);
+ 
+ extern void
+ vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 40932fb..4acced4 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -1109,10 +1109,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ 			void *kernel_commands,
+ 			uint32_t command_size,
+ 			uint64_t throttle_us,
+-			struct drm_vmw_fence_rep __user *user_fence_rep)
++			struct drm_vmw_fence_rep __user *user_fence_rep,
++			struct vmw_fence_obj **out_fence)
+ {
+ 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
+-	struct vmw_fence_obj *fence;
++	struct vmw_fence_obj *fence = NULL;
+ 	uint32_t handle;
+ 	void *cmd;
+ 	int ret;
+@@ -1208,8 +1209,13 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
+ 				    user_fence_rep, fence, handle);
+ 
+-	if (likely(fence != NULL))
++	/* Don't unreference when handing fence out */
++	if (unlikely(out_fence != NULL)) {
++		*out_fence = fence;
++		fence = NULL;
++	} else if (likely(fence != NULL)) {
+ 		vmw_fence_obj_unreference(&fence);
++	}
+ 
+ 	mutex_unlock(&dev_priv->cmdbuf_mutex);
+ 	return 0;
+@@ -1362,7 +1368,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ 	ret = vmw_execbuf_process(file_priv, dev_priv,
+ 				  (void __user *)(unsigned long)arg->commands,
+ 				  NULL, arg->command_size, arg->throttle_us,
+-				  (void __user *)(unsigned long)arg->fence_rep);
++				  (void __user *)(unsigned long)arg->fence_rep,
++				  NULL);
+ 
+ 	if (unlikely(ret != 0))
+ 		goto out_unlock;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+index 34e51a1..3c447bf 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+@@ -414,10 +414,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
+ 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+ 	int ret;
+ 
+-	/* XXX These shouldn't be hardcoded. */
+-	initial_width = 800;
+-	initial_height = 600;
+-
+ 	fb_bpp = 32;
+ 	fb_depth = 24;
+ 
+@@ -425,8 +421,8 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
+ 	fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
+ 	fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
+ 
+-	initial_width = min(fb_width, initial_width);
+-	initial_height = min(fb_height, initial_height);
++	initial_width = min(vmw_priv->initial_width, fb_width);
++	initial_height = min(vmw_priv->initial_height, fb_height);
+ 
+ 	fb_pitch = fb_width * fb_bpp / 8;
+ 	fb_size = fb_pitch * fb_height;
+@@ -515,19 +511,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
+ 	info->var.xres = initial_width;
+ 	info->var.yres = initial_height;
+ 
+-#if 0
+-	info->pixmap.size = 64*1024;
+-	info->pixmap.buf_align = 8;
+-	info->pixmap.access_align = 32;
+-	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+-	info->pixmap.scan_align = 1;
+-#else
+-	info->pixmap.size = 0;
+-	info->pixmap.buf_align = 8;
+-	info->pixmap.access_align = 32;
+-	info->pixmap.flags = FB_PIXMAP_SYSTEM;
+-	info->pixmap.scan_align = 1;
+-#endif
++	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
+ 
+ 	info->apertures = alloc_apertures(1);
+ 	if (!info->apertures) {
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+index 15fb260..7e07433 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -69,12 +69,13 @@ struct vmw_user_fence {
+  * be assigned the current time tv_usec val when the fence signals.
+  */
+ struct vmw_event_fence_action {
+-	struct drm_pending_event e;
+ 	struct vmw_fence_action action;
++	struct list_head fpriv_head;
++
++	struct drm_pending_event *event;
+ 	struct vmw_fence_obj *fence;
+ 	struct drm_device *dev;
+-	struct kref kref;
+-	uint32_t size;
++
+ 	uint32_t *tv_sec;
+ 	uint32_t *tv_usec;
+ };
+@@ -784,46 +785,40 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
+ }
+ 
+ /**
+- * vmw_event_fence_action_destroy
+- *
+- * @kref: The struct kref embedded in a struct vmw_event_fence_action.
+- *
+- * The vmw_event_fence_action destructor that may be called either after
+- * the fence action cleanup, or when the event is delivered.
+- * It frees both the vmw_event_fence_action struct and the actual
+- * event structure copied to user-space.
+- */
+-static void vmw_event_fence_action_destroy(struct kref *kref)
+-{
+-	struct vmw_event_fence_action *eaction =
+-		container_of(kref, struct vmw_event_fence_action, kref);
+-	struct ttm_mem_global *mem_glob =
+-		vmw_mem_glob(vmw_priv(eaction->dev));
+-	uint32_t size = eaction->size;
+-
+-	kfree(eaction->e.event);
+-	kfree(eaction);
+-	ttm_mem_global_free(mem_glob, size);
+-}
+-
+-
+-/**
+- * vmw_event_fence_action_delivered
++ * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
+  *
+- * @e: The struct drm_pending_event embedded in a struct
+- * vmw_event_fence_action.
++ * @fman: Pointer to a struct vmw_fence_manager
++ * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
++ * with pointers to a struct drm_file object about to be closed.
+  *
+- * The struct drm_pending_event destructor that is called by drm
+- * once the event is delivered. Since we don't know whether this function
+- * will be called before or after the fence action destructor, we
+- * free a refcount and destroy if it becomes zero.
++ * This function removes all pending fence events with references to a
++ * specific struct drm_file object about to be closed. The caller is required
++ * to pass a list of all struct vmw_event_fence_action objects with such
++ * events attached. This function is typically called before the
++ * struct drm_file object's event management is taken down.
+  */
+-static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
++void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
++				struct list_head *event_list)
+ {
+-	struct vmw_event_fence_action *eaction =
+-		container_of(e, struct vmw_event_fence_action, e);
++	struct vmw_event_fence_action *eaction;
++	struct drm_pending_event *event;
++	unsigned long irq_flags;
+ 
+-	kref_put(&eaction->kref, vmw_event_fence_action_destroy);
++	while (1) {
++		spin_lock_irqsave(&fman->lock, irq_flags);
++		if (list_empty(event_list))
++			goto out_unlock;
++		eaction = list_first_entry(event_list,
++					   struct vmw_event_fence_action,
++					   fpriv_head);
++		list_del_init(&eaction->fpriv_head);
++		event = eaction->event;
++		eaction->event = NULL;
++		spin_unlock_irqrestore(&fman->lock, irq_flags);
++		event->destroy(event);
++	}
++out_unlock:
++	spin_unlock_irqrestore(&fman->lock, irq_flags);
+ }
+ 
+ 
+@@ -836,18 +831,21 @@ static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
+  * This function is called when the seqno of the fence where @action is
+  * attached has passed. It queues the event on the submitter's event list.
+  * This function is always called from atomic context, and may be called
+- * from irq context. It ups a refcount reflecting that we now have two
+- * destructors.
++ * from irq context.
+  */
+ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+ {
+ 	struct vmw_event_fence_action *eaction =
+ 		container_of(action, struct vmw_event_fence_action, action);
+ 	struct drm_device *dev = eaction->dev;
+-	struct drm_file *file_priv = eaction->e.file_priv;
++	struct drm_pending_event *event = eaction->event;
++	struct drm_file *file_priv;
+ 	unsigned long irq_flags;
+ 
+-	kref_get(&eaction->kref);
++	if (unlikely(event == NULL))
++		return;
++
++	file_priv = event->file_priv;
+ 	spin_lock_irqsave(&dev->event_lock, irq_flags);
+ 
+ 	if (likely(eaction->tv_sec != NULL)) {
+@@ -858,7 +856,9 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+ 		*eaction->tv_usec = tv.tv_usec;
+ 	}
+ 
+-	list_add_tail(&eaction->e.link, &file_priv->event_list);
++	list_del_init(&eaction->fpriv_head);
++	list_add_tail(&eaction->event->link, &file_priv->event_list);
++	eaction->event = NULL;
+ 	wake_up_all(&file_priv->event_wait);
+ 	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+ }
+@@ -876,9 +876,15 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
+ {
+ 	struct vmw_event_fence_action *eaction =
+ 		container_of(action, struct vmw_event_fence_action, action);
++	struct vmw_fence_manager *fman = eaction->fence->fman;
++	unsigned long irq_flags;
++
++	spin_lock_irqsave(&fman->lock, irq_flags);
++	list_del(&eaction->fpriv_head);
++	spin_unlock_irqrestore(&fman->lock, irq_flags);
+ 
+ 	vmw_fence_obj_unreference(&eaction->fence);
+-	kref_put(&eaction->kref, vmw_event_fence_action_destroy);
++	kfree(eaction);
+ }
+ 
+ 
+@@ -946,39 +952,23 @@ void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
+  * an error code, the caller needs to free that object.
+  */
+ 
+-int vmw_event_fence_action_create(struct drm_file *file_priv,
+-				  struct vmw_fence_obj *fence,
+-				  struct drm_event *event,
+-				  uint32_t *tv_sec,
+-				  uint32_t *tv_usec,
+-				  bool interruptible)
++int vmw_event_fence_action_queue(struct drm_file *file_priv,
++				 struct vmw_fence_obj *fence,
++				 struct drm_pending_event *event,
++				 uint32_t *tv_sec,
++				 uint32_t *tv_usec,
++				 bool interruptible)
+ {
+ 	struct vmw_event_fence_action *eaction;
+-	struct ttm_mem_global *mem_glob =
+-		vmw_mem_glob(fence->fman->dev_priv);
+ 	struct vmw_fence_manager *fman = fence->fman;
+-	uint32_t size = fman->event_fence_action_size +
+-		ttm_round_pot(event->length);
+-	int ret;
+-
+-	/*
+-	 * Account for internal structure size as well as the
+-	 * event size itself.
+-	 */
+-
+-	ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible);
+-	if (unlikely(ret != 0))
+-		return ret;
++	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
++	unsigned long irq_flags;
+ 
+ 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
+-	if (unlikely(eaction == NULL)) {
+-		ttm_mem_global_free(mem_glob, size);
++	if (unlikely(eaction == NULL))
+ 		return -ENOMEM;
+-	}
+ 
+-	eaction->e.event = event;
+-	eaction->e.file_priv = file_priv;
+-	eaction->e.destroy = vmw_event_fence_action_delivered;
++	eaction->event = event;
+ 
+ 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
+ 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
+@@ -986,16 +976,89 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
+ 
+ 	eaction->fence = vmw_fence_obj_reference(fence);
+ 	eaction->dev = fman->dev_priv->dev;
+-	eaction->size = size;
+ 	eaction->tv_sec = tv_sec;
+ 	eaction->tv_usec = tv_usec;
+ 
+-	kref_init(&eaction->kref);
++	spin_lock_irqsave(&fman->lock, irq_flags);
++	list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
++	spin_unlock_irqrestore(&fman->lock, irq_flags);
++
+ 	vmw_fence_obj_add_action(fence, &eaction->action);
+ 
+ 	return 0;
+ }
+ 
++struct vmw_event_fence_pending {
++	struct drm_pending_event base;
++	struct drm_vmw_event_fence event;
++};
++
++int vmw_event_fence_action_create(struct drm_file *file_priv,
++				  struct vmw_fence_obj *fence,
++				  uint32_t flags,
++				  uint64_t user_data,
++				  bool interruptible)
++{
++	struct vmw_event_fence_pending *event;
++	struct drm_device *dev = fence->fman->dev_priv->dev;
++	unsigned long irq_flags;
++	int ret;
++
++	spin_lock_irqsave(&dev->event_lock, irq_flags);
++
++	ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
++	if (likely(ret == 0))
++		file_priv->event_space -= sizeof(event->event);
++
++	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
++
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Failed to allocate event space for this file.\n");
++		goto out_no_space;
++	}
++
++
++	event = kzalloc(sizeof(*event), GFP_KERNEL);
++	if (unlikely(event == NULL)) {
++		DRM_ERROR("Failed to allocate an event.\n");
++		ret = -ENOMEM;
++		goto out_no_event;
++	}
++
++	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
++	event->event.base.length = sizeof(*event);
++	event->event.user_data = user_data;
++
++	event->base.event = &event->event.base;
++	event->base.file_priv = file_priv;
++	event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
++
++
++	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
++		ret = vmw_event_fence_action_queue(file_priv, fence,
++						   &event->base,
++						   &event->event.tv_sec,
++						   &event->event.tv_usec,
++						   interruptible);
++	else
++		ret = vmw_event_fence_action_queue(file_priv, fence,
++						   &event->base,
++						   NULL,
++						   NULL,
++						   interruptible);
++	if (ret != 0)
++		goto out_no_queue;
++
++out_no_queue:
++	event->base.destroy(&event->base);
++out_no_event:
++	spin_lock_irqsave(&dev->event_lock, irq_flags);
++	file_priv->event_space += sizeof(*event);
++	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
++out_no_space:
++	return ret;
++}
++
+ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ 			  struct drm_file *file_priv)
+ {
+@@ -1008,8 +1071,6 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ 		(struct drm_vmw_fence_rep __user *)(unsigned long)
+ 		arg->fence_rep;
+ 	uint32_t handle;
+-	unsigned long irq_flags;
+-	struct drm_vmw_event_fence *event;
+ 	int ret;
+ 
+ 	/*
+@@ -1062,59 +1123,28 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ 
+ 	BUG_ON(fence == NULL);
+ 
+-	spin_lock_irqsave(&dev->event_lock, irq_flags);
+-
+-	ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0;
+-	if (likely(ret == 0))
+-		file_priv->event_space -= sizeof(*event);
+-
+-	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+-
+-	if (unlikely(ret != 0)) {
+-		DRM_ERROR("Failed to allocate event space for this file.\n");
+-		goto out_no_event_space;
+-	}
+-
+-	event = kzalloc(sizeof(*event), GFP_KERNEL);
+-	if (unlikely(event == NULL)) {
+-		DRM_ERROR("Failed to allocate an event.\n");
+-		goto out_no_event;
+-	}
+-
+-	event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
+-	event->base.length = sizeof(*event);
+-	event->user_data = arg->user_data;
+-
+ 	if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
+ 		ret = vmw_event_fence_action_create(file_priv, fence,
+-						    &event->base,
+-						    &event->tv_sec,
+-						    &event->tv_usec,
++						    arg->flags,
++						    arg->user_data,
+ 						    true);
+ 	else
+ 		ret = vmw_event_fence_action_create(file_priv, fence,
+-						    &event->base,
+-						    NULL,
+-						    NULL,
++						    arg->flags,
++						    arg->user_data,
+ 						    true);
+ 
+ 	if (unlikely(ret != 0)) {
+ 		if (ret != -ERESTARTSYS)
+ 			DRM_ERROR("Failed to attach event to fence.\n");
+-		goto out_no_attach;
++		goto out_no_create;
+ 	}
+ 
+ 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
+ 				    handle);
+ 	vmw_fence_obj_unreference(&fence);
+ 	return 0;
+-out_no_attach:
+-	kfree(event);
+-out_no_event:
+-	spin_lock_irqsave(&dev->event_lock, irq_flags);
+-	file_priv->event_space += sizeof(*event);
+-	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+-out_no_event_space:
++out_no_create:
+ 	if (user_fence_rep != NULL)
+ 		ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ 					  handle, TTM_REF_USAGE);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+index 0854a20..faf2e78 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+@@ -109,5 +109,12 @@ extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
+ 				     struct drm_file *file_priv);
+ extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
+ 				 struct drm_file *file_priv);
+-
++extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
++				       struct list_head *event_list);
++extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
++					struct vmw_fence_obj *fence,
++					struct drm_pending_event *event,
++					uint32_t *tv_sec,
++					uint32_t *tv_usec,
++					bool interruptible);
+ #endif /* _VMWGFX_FENCE_H_ */
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+index c41226a..21ee782 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+@@ -136,10 +136,10 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
+ 
+ 		if (likely(page_virtual != NULL)) {
+ 			desc_virtual->ppn = page_to_pfn(page);
+-			kunmap_atomic(page_virtual, KM_USER0);
++			kunmap_atomic(page_virtual);
+ 		}
+ 
+-		page_virtual = kmap_atomic(page, KM_USER0);
++		page_virtual = kmap_atomic(page);
+ 		desc_virtual = page_virtual - 1;
+ 		prev_pfn = ~(0UL);
+ 
+@@ -169,7 +169,7 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
+ 	}
+ 
+ 	if (likely(page_virtual != NULL))
+-		kunmap_atomic(page_virtual, KM_USER0);
++		kunmap_atomic(page_virtual);
+ 
+ 	return 0;
+ out_err:
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+index 7c88f1f..00fb5aa 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -422,7 +422,8 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
+ 				struct vmw_framebuffer *framebuffer,
+ 				unsigned flags, unsigned color,
+ 				struct drm_clip_rect *clips,
+-				unsigned num_clips, int inc)
++				unsigned num_clips, int inc,
++				struct vmw_fence_obj **out_fence)
+ {
+ 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ 	struct drm_clip_rect *clips_ptr;
+@@ -542,12 +543,15 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
+ 		if (num == 0)
+ 			continue;
+ 
++		/* only return the last fence */
++		if (out_fence && *out_fence)
++			vmw_fence_obj_unreference(out_fence);
+ 
+ 		/* recalculate package length */
+ 		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ 		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
+ 		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+-					  fifo_size, 0, NULL);
++					  fifo_size, 0, NULL, out_fence);
+ 
+ 		if (unlikely(ret != 0))
+ 			break;
+@@ -598,7 +602,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
+ 
+ 	ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
+ 				   flags, color,
+-				   clips, num_clips, inc);
++				   clips, num_clips, inc, NULL);
+ 
+ 	ttm_read_unlock(&vmaster->lock);
+ 	return 0;
+@@ -690,7 +694,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
+ 
+ 	/* XXX get the first 3 from the surface info */
+ 	vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
+-	vfbs->base.base.pitch = mode_cmd->pitch;
++	vfbs->base.base.pitches[0] = mode_cmd->pitch;
+ 	vfbs->base.base.depth = mode_cmd->depth;
+ 	vfbs->base.base.width = mode_cmd->width;
+ 	vfbs->base.base.height = mode_cmd->height;
+@@ -804,12 +808,12 @@ static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
+ 	cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+ 	cmd->body.format.colorDepth = depth;
+ 	cmd->body.format.reserved = 0;
+-	cmd->body.bytesPerLine = framebuffer->base.pitch;
++	cmd->body.bytesPerLine = framebuffer->base.pitches[0];
+ 	cmd->body.ptr.gmrId = framebuffer->user_handle;
+ 	cmd->body.ptr.offset = 0;
+ 
+ 	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+-				  fifo_size, 0, NULL);
++				  fifo_size, 0, NULL, NULL);
+ 
+ 	kfree(cmd);
+ 
+@@ -821,7 +825,8 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
+ 			       struct vmw_framebuffer *framebuffer,
+ 			       unsigned flags, unsigned color,
+ 			       struct drm_clip_rect *clips,
+-			       unsigned num_clips, int increment)
++			       unsigned num_clips, int increment,
++			       struct vmw_fence_obj **out_fence)
+ {
+ 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ 	struct drm_clip_rect *clips_ptr;
+@@ -894,9 +899,13 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
+ 		if (hit_num == 0)
+ 			continue;
+ 
++		/* only return the last fence */
++		if (out_fence && *out_fence)
++			vmw_fence_obj_unreference(out_fence);
++
+ 		fifo_size = sizeof(*blits) * hit_num;
+ 		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
+-					  fifo_size, 0, NULL);
++					  fifo_size, 0, NULL, out_fence);
+ 
+ 		if (unlikely(ret != 0))
+ 			break;
+@@ -942,7 +951,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+ 	} else {
+ 		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
+ 					  flags, color,
+-					  clips, num_clips, increment);
++					  clips, num_clips, increment, NULL);
+ 	}
+ 
+ 	ttm_read_unlock(&vmaster->lock);
+@@ -1056,7 +1065,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
+ 	}
+ 
+ 	vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
+-	vfbd->base.base.pitch = mode_cmd->pitch;
++	vfbd->base.base.pitches[0] = mode_cmd->pitch;
+ 	vfbd->base.base.depth = mode_cmd->depth;
+ 	vfbd->base.base.width = mode_cmd->width;
+ 	vfbd->base.base.height = mode_cmd->height;
+@@ -1085,7 +1094,7 @@ out_err1:
+ 
+ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ 						 struct drm_file *file_priv,
+-						 struct drm_mode_fb_cmd *mode_cmd)
++						 struct drm_mode_fb_cmd2 *mode_cmd2)
+ {
+ 	struct vmw_private *dev_priv = vmw_priv(dev);
+ 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+@@ -1093,8 +1102,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ 	struct vmw_surface *surface = NULL;
+ 	struct vmw_dma_buffer *bo = NULL;
+ 	struct ttm_base_object *user_obj;
++	struct drm_mode_fb_cmd mode_cmd;
+ 	int ret;
+ 
++	mode_cmd.width = mode_cmd2->width;
++	mode_cmd.height = mode_cmd2->height;
++	mode_cmd.pitch = mode_cmd2->pitches[0];
++	mode_cmd.handle = mode_cmd2->handles[0];
++	drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
++				    &mode_cmd.bpp);
++
+ 	/**
+ 	 * This code should be conditioned on Screen Objects not being used.
+ 	 * If screen objects are used, we can allocate a GMR to hold the
+@@ -1102,8 +1119,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ 	 */
+ 
+ 	if (!vmw_kms_validate_mode_vram(dev_priv,
+-					mode_cmd->pitch,
+-					mode_cmd->height)) {
++					mode_cmd.pitch,
++					mode_cmd.height)) {
+ 		DRM_ERROR("VRAM size is too small for requested mode.\n");
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+@@ -1117,15 +1134,19 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ 	 * command stream using user-space handles.
+ 	 */
+ 
+-	user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle);
++	user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
+ 	if (unlikely(user_obj == NULL)) {
+ 		DRM_ERROR("Could not locate requested kms frame buffer.\n");
+ 		return ERR_PTR(-ENOENT);
+ 	}
+ 
++	/**
++	 * End conditioned code.
++	 */
++
+ 	/* returns either a dmabuf or surface */
+ 	ret = vmw_user_lookup_handle(dev_priv, tfile,
+-				     mode_cmd->handle,
++				     mode_cmd.handle,
+ 				     &surface, &bo);
+ 	if (ret)
+ 		goto err_out;
+@@ -1133,10 +1154,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ 	/* Create the new framebuffer depending one what we got back */
+ 	if (bo)
+ 		ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+-						     mode_cmd);
++						     &mode_cmd);
+ 	else if (surface)
+ 		ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+-						      surface, &vfb, mode_cmd);
++						      surface, &vfb, &mode_cmd);
+ 	else
+ 		BUG();
+ 
+@@ -1284,7 +1305,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
+ 		fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+ 		cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
+ 		ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+-					  fifo_size, 0, NULL);
++					  fifo_size, 0, NULL, NULL);
+ 
+ 		if (unlikely(ret != 0))
+ 			break;
+@@ -1344,7 +1365,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
+ 	cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
+ 	cmd->body.format.colorDepth = vfb->base.depth;
+ 	cmd->body.format.reserved = 0;
+-	cmd->body.bytesPerLine = vfb->base.pitch;
++	cmd->body.bytesPerLine = vfb->base.pitches[0];
+ 	cmd->body.ptr.gmrId = vfb->user_handle;
+ 	cmd->body.ptr.offset = 0;
+ 
+@@ -1397,7 +1418,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
+ 	fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
+ 
+ 	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
+-				  0, user_fence_rep);
++				  0, user_fence_rep, NULL);
+ 
+ 	kfree(cmd);
+ 
+@@ -1660,6 +1681,74 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+ 	return 0;
+ }
+ 
++int vmw_du_page_flip(struct drm_crtc *crtc,
++		     struct drm_framebuffer *fb,
++		     struct drm_pending_vblank_event *event)
++{
++	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
++	struct drm_framebuffer *old_fb = crtc->fb;
++	struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
++	struct drm_file *file_priv ;
++	struct vmw_fence_obj *fence = NULL;
++	struct drm_clip_rect clips;
++	int ret;
++
++	if (event == NULL)
++		return -EINVAL;
++
++	/* require ScreenObject support for page flipping */
++	if (!dev_priv->sou_priv)
++		return -ENOSYS;
++
++	file_priv = event->base.file_priv;
++	if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
++		return -EINVAL;
++
++	crtc->fb = fb;
++
++	/* do a full screen dirty update */
++	clips.x1 = clips.y1 = 0;
++	clips.x2 = fb->width;
++	clips.y2 = fb->height;
++
++	if (vfb->dmabuf)
++		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
++					  0, 0, &clips, 1, 1, &fence);
++	else
++		ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
++					   0, 0, &clips, 1, 1, &fence);
++
++
++	if (ret != 0)
++		goto out_no_fence;
++	if (!fence) {
++		ret = -EINVAL;
++		goto out_no_fence;
++	}
++
++	ret = vmw_event_fence_action_queue(file_priv, fence,
++					   &event->base,
++					   &event->event.tv_sec,
++					   &event->event.tv_usec,
++					   true);
++
++	/*
++	 * No need to hold on to this now. The only cleanup
++	 * we need to do if we fail is unref the fence.
++	 */
++	vmw_fence_obj_unreference(&fence);
++
++	if (vmw_crtc_to_du(crtc)->is_implicit)
++		vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
++
++	return ret;
++
++out_no_fence:
++	crtc->fb = old_fb;
++	return ret;
++}
++
++
+ void vmw_du_crtc_save(struct drm_crtc *crtc)
+ {
+ }
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+index e1cb855..8184bc5 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -29,6 +29,7 @@
+ #define VMWGFX_KMS_H_
+ 
+ #include "drmP.h"
++#include "drm_crtc_helper.h"
+ #include "vmwgfx_drv.h"
+ 
+ #define VMWGFX_NUM_DISPLAY_UNITS 8
+@@ -120,6 +121,9 @@ struct vmw_display_unit {
+  * Shared display unit functions - vmwgfx_kms.c
+  */
+ void vmw_display_unit_cleanup(struct vmw_display_unit *du);
++int vmw_du_page_flip(struct drm_crtc *crtc,
++		     struct drm_framebuffer *fb,
++		     struct drm_pending_vblank_event *event);
+ void vmw_du_crtc_save(struct drm_crtc *crtc);
+ void vmw_du_crtc_restore(struct drm_crtc *crtc);
+ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+@@ -153,5 +157,10 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
+ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
+ int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
+ 			      struct drm_vmw_rect *rects);
++bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
++				     struct drm_crtc *crtc);
++void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
++					      struct drm_crtc *crtc);
++
+ 
+ #endif
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+index 8f8dbd4..070fb23 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+@@ -95,7 +95,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
+ 			return 0;
+ 		fb = entry->base.crtc.fb;
+ 
+-		return vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
++		return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
+ 					  fb->bits_per_pixel, fb->depth);
+ 	}
+ 
+@@ -103,7 +103,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
+ 		entry = list_entry(lds->active.next, typeof(*entry), active);
+ 		fb = entry->base.crtc.fb;
+ 
+-		vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
++		vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
+ 				   fb->bits_per_pixel, fb->depth);
+ 	}
+ 
+@@ -354,8 +354,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
+ 	INIT_LIST_HEAD(&ldu->active);
+ 
+ 	ldu->base.pref_active = (unit == 0);
+-	ldu->base.pref_width = 800;
+-	ldu->base.pref_height = 600;
++	ldu->base.pref_width = dev_priv->initial_width;
++	ldu->base.pref_height = dev_priv->initial_height;
+ 	ldu->base.pref_mode = NULL;
+ 	ldu->base.is_implicit = true;
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index 0795d17..059b32c 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -1540,29 +1540,10 @@ out_bad_surface:
+ /**
+  * Buffer management.
+  */
+-
+-static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
+-				  unsigned long num_pages)
+-{
+-	static size_t bo_user_size = ~0;
+-
+-	size_t page_array_size =
+-	    (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
+-
+-	if (unlikely(bo_user_size == ~0)) {
+-		bo_user_size = glob->ttm_bo_extra_size +
+-		    ttm_round_pot(sizeof(struct vmw_dma_buffer));
+-	}
+-
+-	return bo_user_size + page_array_size;
+-}
+-
+ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
+ {
+ 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+-	struct ttm_bo_global *glob = bo->glob;
+ 
+-	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
+ 	kfree(vmw_bo);
+ }
+ 
+@@ -1573,24 +1554,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
+ 		    void (*bo_free) (struct ttm_buffer_object *bo))
+ {
+ 	struct ttm_bo_device *bdev = &dev_priv->bdev;
+-	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ 	size_t acc_size;
+ 	int ret;
+ 
+ 	BUG_ON(!bo_free);
+ 
+-	acc_size =
+-	    vmw_dmabuf_acc_size(bdev->glob,
+-				(size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+-
+-	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+-	if (unlikely(ret != 0)) {
+-		/* we must free the bo here as
+-		 * ttm_buffer_object_init does so as well */
+-		bo_free(&vmw_bo->base);
+-		return ret;
+-	}
+-
++	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
+ 	memset(vmw_bo, 0, sizeof(*vmw_bo));
+ 
+ 	INIT_LIST_HEAD(&vmw_bo->validate_list);
+@@ -1605,9 +1574,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
+ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
+ {
+ 	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+-	struct ttm_bo_global *glob = bo->glob;
+ 
+-	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
+ 	kfree(vmw_user_bo);
+ }
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+index 4defdcf..6deaf2f 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+@@ -394,6 +394,7 @@ static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
+ 	.gamma_set = vmw_du_crtc_gamma_set,
+ 	.destroy = vmw_sou_crtc_destroy,
+ 	.set_config = vmw_sou_crtc_set_config,
++	.page_flip = vmw_du_page_flip,
+ };
+ 
+ /*
+@@ -448,8 +449,8 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
+ 	sou->active_implicit = false;
+ 
+ 	sou->base.pref_active = (unit == 0);
+-	sou->base.pref_width = 800;
+-	sou->base.pref_height = 600;
++	sou->base.pref_width = dev_priv->initial_width;
++	sou->base.pref_height = dev_priv->initial_height;
+ 	sou->base.pref_mode = NULL;
+ 	sou->base.is_implicit = true;
+ 
+@@ -535,3 +536,36 @@ int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
+ 
+ 	return 0;
+ }
++
++/**
++ * Returns if this unit can be page flipped.
++ * Must be called with the mode_config mutex held.
++ */
++bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
++				     struct drm_crtc *crtc)
++{
++	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
++
++	if (!sou->base.is_implicit)
++		return true;
++
++	if (dev_priv->sou_priv->num_implicit != 1)
++		return false;
++
++	return true;
++}
++
++/**
++ * Update the implicit fb to the current fb of this crtc.
++ * Must be called with the mode_config mutex held.
++ */
++void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
++					      struct drm_crtc *crtc)
++{
++	struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
++
++	BUG_ON(!sou->base.is_implicit);
++
++	dev_priv->sou_priv->implicit_fb =
++		vmw_framebuffer_to_vfb(sou->base.crtc.fb);
++}
+diff --git a/include/drm/Kbuild b/include/drm/Kbuild
+index 3a60ac8..1e38a19 100644
+--- a/include/drm/Kbuild
++++ b/include/drm/Kbuild
+@@ -1,6 +1,8 @@
+ header-y += drm.h
++header-y += drm_fourcc.h
+ header-y += drm_mode.h
+ header-y += drm_sarea.h
++header-y += exynos_drm.h
+ header-y += i810_drm.h
+ header-y += i915_drm.h
+ header-y += mga_drm.h
+diff --git a/include/drm/drm.h b/include/drm/drm.h
+index 4be33b4..64ff02d 100644
+--- a/include/drm/drm.h
++++ b/include/drm/drm.h
+@@ -617,6 +617,17 @@ struct drm_get_cap {
+ 	__u64 value;
+ };
+ 
++#define DRM_CLOEXEC O_CLOEXEC
++struct drm_prime_handle {
++	__u32 handle;
++
++	/** Flags.. only applicable for handle->fd */
++	__u32 flags;
++
++	/** Returned dmabuf file descriptor */
++	__s32 fd;
++};
++
+ #include "drm_mode.h"
+ 
+ #define DRM_IOCTL_BASE			'd'
+@@ -673,7 +684,8 @@ struct drm_get_cap {
+ #define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
+ #define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
+ 
+-#define DRM_IOCTL_GEM_PRIME_OPEN        DRM_IOWR(0x2e, struct drm_gem_open)
++#define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
++#define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
+ 
+ #define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
+ #define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
+@@ -714,6 +726,10 @@ struct drm_get_cap {
+ #define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+ #define DRM_IOCTL_MODE_MAP_DUMB    DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+ #define DRM_IOCTL_MODE_DESTROY_DUMB    DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
++#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
++#define DRM_IOCTL_MODE_GETPLANE	DRM_IOWR(0xB6, struct drm_mode_get_plane)
++#define DRM_IOCTL_MODE_SETPLANE	DRM_IOWR(0xB7, struct drm_mode_set_plane)
++#define DRM_IOCTL_MODE_ADDFB2		DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
+ 
+ /**
+  * Device specific ioctls should only be in their respective headers
+@@ -757,6 +773,8 @@ struct drm_event_vblank {
+ 
+ #define DRM_CAP_DUMB_BUFFER 0x1
+ #define DRM_CAP_VBLANK_HIGH_CRTC 0x2
++#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
++#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+ 
+ /* typedef area */
+ #ifndef __KERNEL__
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index bf4b2dc..dd73104 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -91,6 +91,7 @@ struct drm_device;
+ #define DRM_UT_CORE 		0x01
+ #define DRM_UT_DRIVER		0x02
+ #define DRM_UT_KMS		0x04
++#define DRM_UT_PRIME		0x08
+ /*
+  * Three debug levels are defined.
+  * drm_core, drm_driver, drm_kms
+@@ -150,6 +151,7 @@ int drm_err(const char *func, const char *format, ...);
+ #define DRIVER_IRQ_VBL2    0x800
+ #define DRIVER_GEM         0x1000
+ #define DRIVER_MODESET     0x2000
++#define DRIVER_PRIME       0x4000
+ 
+ #define DRIVER_BUS_PCI 0x1
+ #define DRIVER_BUS_PLATFORM 0x2
+@@ -215,6 +217,11 @@ int drm_err(const char *func, const char *format, ...);
+ 		drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, 		\
+ 					 __func__, fmt, ##args);	\
+ 	} while (0)
++#define DRM_DEBUG_PRIME(fmt, args...)					\
++	do {								\
++		drm_ut_debug_printk(DRM_UT_PRIME, DRM_NAME,		\
++					__func__, fmt, ##args);		\
++	} while (0)
+ #define DRM_LOG(fmt, args...)						\
+ 	do {								\
+ 		drm_ut_debug_printk(DRM_UT_CORE, NULL,			\
+@@ -238,6 +245,7 @@ int drm_err(const char *func, const char *format, ...);
+ #else
+ #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
+ #define DRM_DEBUG_KMS(fmt, args...)	do { } while (0)
++#define DRM_DEBUG_PRIME(fmt, args...)	do { } while (0)
+ #define DRM_DEBUG(fmt, arg...)		 do { } while (0)
+ #define DRM_LOG(fmt, arg...)		do { } while (0)
+ #define DRM_LOG_KMS(fmt, args...) do { } while (0)
+@@ -410,6 +418,12 @@ struct drm_pending_event {
+ 	void (*destroy)(struct drm_pending_event *event);
+ };
+ 
++/* initial implementaton using a linked list - todo hashtab */
++struct drm_prime_file_private {
++	struct list_head head;
++	struct mutex lock;
++};
++
+ /** File private data */
+ struct drm_file {
+ 	int authenticated;
+@@ -437,6 +451,8 @@ struct drm_file {
+ 	wait_queue_head_t event_wait;
+ 	struct list_head event_list;
+ 	int event_space;
++
++	struct drm_prime_file_private prime;
+ };
+ 
+ /** Wait queue */
+@@ -652,6 +668,12 @@ struct drm_gem_object {
+ 	uint32_t pending_write_domain;
+ 
+ 	void *driver_private;
++
++	/* dma buf exported from this GEM object */
++	struct dma_buf *export_dma_buf;
++
++	/* dma buf attachment backing this object */
++	struct dma_buf_attachment *import_attach;
+ };
+ 
+ #include "drm_crtc.h"
+@@ -820,7 +842,7 @@ struct drm_driver {
+ 	 * Specifically, the timestamp in @vblank_time should correspond as
+ 	 * closely as possible to the time when the first video scanline of
+ 	 * the video frame after the end of VBLANK will start scanning out,
+-	 * the time immmediately after end of the VBLANK interval. If the
++	 * the time immediately after end of the VBLANK interval. If the
+ 	 * @crtc is currently inside VBLANK, this will be a time in the future.
+ 	 * If the @crtc is currently scanning out a frame, this will be the
+ 	 * past start time of the current scanout. This is meant to adhere
+@@ -890,6 +912,20 @@ struct drm_driver {
+ 	int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
+ 	void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
+ 
++	/* prime: */
++	/* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
++	int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
++				uint32_t handle, uint32_t flags, int *prime_fd);
++	/* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
++	int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
++				int prime_fd, uint32_t *handle);
++	/* export GEM -> dmabuf */
++	struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
++				struct drm_gem_object *obj, int flags);
++	/* import dmabuf -> GEM */
++	struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
++				struct dma_buf *dma_buf);
++
+ 	/* vga arb irq handler */
+ 	void (*vgaarb_irq)(struct drm_device *dev, bool state);
+ 
+@@ -918,7 +954,7 @@ struct drm_driver {
+ 	int dev_priv_size;
+ 	struct drm_ioctl_desc *ioctls;
+ 	int num_ioctls;
+-	struct file_operations fops;
++	const struct file_operations *fops;
+ 	union {
+ 		struct pci_driver *pci;
+ 		struct platform_device *platform_device;
+@@ -1170,6 +1206,8 @@ struct drm_device {
+ 	struct idr object_name_idr;
+ 	/*@} */
+ 	int switch_power_state;
++
++	atomic_t unplugged; /* device has been unplugged or gone away */
+ };
+ 
+ #define DRM_SWITCH_POWER_ON 0
+@@ -1235,6 +1273,19 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
+ }
+ #endif
+ 
++static inline void drm_device_set_unplugged(struct drm_device *dev)
++{
++	smp_wmb();
++	atomic_set(&dev->unplugged, 1);
++}
++
++static inline int drm_device_is_unplugged(struct drm_device *dev)
++{
++	int ret = atomic_read(&dev->unplugged);
++	smp_rmb();
++	return ret;
++}
++
+ /******************************************************************/
+ /** \name Internal function definitions */
+ /*@{*/
+@@ -1264,11 +1315,6 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+ 
+ 				/* Memory management support (drm_memory.h) */
+ #include "drm_memory.h"
+-extern void drm_mem_init(void);
+-extern int drm_mem_info(char *buf, char **start, off_t offset,
+-			int request, int *eof, void *data);
+-extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
+-
+ extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
+ extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+ extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+@@ -1383,12 +1429,8 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
+ 				/* IRQ support (drm_irq.h) */
+ extern int drm_control(struct drm_device *dev, void *data,
+ 		       struct drm_file *file_priv);
+-extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
+ extern int drm_irq_install(struct drm_device *dev);
+ extern int drm_irq_uninstall(struct drm_device *dev);
+-extern void drm_driver_irq_preinstall(struct drm_device *dev);
+-extern void drm_driver_irq_postinstall(struct drm_device *dev);
+-extern void drm_driver_irq_uninstall(struct drm_device *dev);
+ 
+ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+ extern int drm_wait_vblank(struct drm_device *dev, void *data,
+@@ -1464,6 +1506,7 @@ extern void drm_master_put(struct drm_master **master);
+ 
+ extern void drm_put_dev(struct drm_device *dev);
+ extern int drm_put_minor(struct drm_minor **minor);
++extern void drm_unplug_dev(struct drm_device *dev);
+ extern unsigned int drm_debug;
+ 
+ extern unsigned int drm_vblank_offdelay;
+@@ -1502,6 +1545,32 @@ extern int drm_vblank_info(struct seq_file *m, void *data);
+ extern int drm_clients_info(struct seq_file *m, void* data);
+ extern int drm_gem_name_info(struct seq_file *m, void *data);
+ 
++
++extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
++		struct drm_file *file_priv, uint32_t handle, uint32_t flags,
++		int *prime_fd);
++extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
++		struct drm_file *file_priv, int prime_fd, uint32_t *handle);
++
++extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
++					struct drm_file *file_priv);
++extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
++					struct drm_file *file_priv);
++
++extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
++extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
++
++
++void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
++void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
++int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
++int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
++void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
++
++int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
++int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
++			 struct drm_gem_object **obj);
++
+ #if DRM_DEBUG_CODE
+ extern int drm_vma_info(struct seq_file *m, void *data);
+ #endif
+@@ -1697,5 +1766,13 @@ extern void drm_platform_exit(struct drm_driver *driver, struct platform_device
+ extern int drm_get_platform_dev(struct platform_device *pdev,
+ 				struct drm_driver *driver);
+ 
++/* returns true if currently okay to sleep */
++static __inline__ bool drm_can_sleep(void)
++{
++	if (in_atomic() || in_dbg_master() || irqs_disabled())
++		return false;
++	return true;
++}
++
+ #endif				/* __KERNEL__ */
+ #endif
+diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
+index 8020798..e250eda 100644
+--- a/include/drm/drm_crtc.h
++++ b/include/drm/drm_crtc.h
+@@ -29,9 +29,10 @@
+ #include <linux/spinlock.h>
+ #include <linux/types.h>
+ #include <linux/idr.h>
+-
+ #include <linux/fb.h>
+ 
++#include <drm/drm_fourcc.h>
++
+ struct drm_device;
+ struct drm_mode_set;
+ struct drm_framebuffer;
+@@ -44,6 +45,7 @@ struct drm_framebuffer;
+ #define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
+ #define DRM_MODE_OBJECT_FB 0xfbfbfbfb
+ #define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
++#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+ 
+ struct drm_mode_object {
+ 	uint32_t id;
+@@ -118,9 +120,8 @@ struct drm_display_mode {
+ 
+ 	char name[DRM_DISPLAY_MODE_LEN];
+ 
+-	int connector_count;
+ 	enum drm_mode_status status;
+-	int type;
++	unsigned int type;
+ 
+ 	/* Proposed mode values */
+ 	int clock;		/* in kHz */
+@@ -238,13 +239,15 @@ struct drm_framebuffer {
+ 	struct list_head head;
+ 	struct drm_mode_object base;
+ 	const struct drm_framebuffer_funcs *funcs;
+-	unsigned int pitch;
++	unsigned int pitches[4];
++	unsigned int offsets[4];
+ 	unsigned int width;
+ 	unsigned int height;
+ 	/* depth can be 15 or 16 */
+ 	unsigned int depth;
+ 	int bits_per_pixel;
+ 	int flags;
++	uint32_t pixel_format; /* fourcc format */
+ 	struct list_head filp_head;
+ 	/* if you are using the helper */
+ 	void *helper_private;
+@@ -254,7 +257,7 @@ struct drm_property_blob {
+ 	struct drm_mode_object base;
+ 	struct list_head head;
+ 	unsigned int length;
+-	void *data;
++	unsigned char data[];
+ };
+ 
+ struct drm_property_enum {
+@@ -278,6 +281,7 @@ struct drm_crtc;
+ struct drm_connector;
+ struct drm_encoder;
+ struct drm_pending_vblank_event;
++struct drm_plane;
+ 
+ /**
+  * drm_crtc_funcs - control CRTCs for a given device
+@@ -341,10 +345,21 @@ struct drm_crtc_funcs {
+ 
+ /**
+  * drm_crtc - central CRTC control structure
++ * @dev: parent DRM device
++ * @head: list management
++ * @base: base KMS object for ID tracking etc.
+  * @enabled: is this CRTC enabled?
++ * @mode: current mode timings
++ * @hwmode: mode timings as programmed to hw regs
+  * @x: x position on screen
+  * @y: y position on screen
+  * @funcs: CRTC control functions
++ * @gamma_size: size of gamma ramp
++ * @gamma_store: gamma ramp values
++ * @framedur_ns: precise frame timing
++ * @framedur_ns: precise line timing
++ * @pixeldur_ns: precise pixel timing
++ * @helper_private: mid-layer private data
+  *
+  * Each CRTC may have one or more connectors associated with it.  This structure
+  * allows the CRTC to be controlled.
+@@ -423,6 +438,13 @@ struct drm_connector_funcs {
+ 	void (*force)(struct drm_connector *connector);
+ };
+ 
++/**
++ * drm_encoder_funcs - encoder controls
++ * @reset: reset state (e.g. at init or resume time)
++ * @destroy: cleanup and free associated data
++ *
++ * Encoders sit between CRTCs and connectors.
++ */
+ struct drm_encoder_funcs {
+ 	void (*reset)(struct drm_encoder *encoder);
+ 	void (*destroy)(struct drm_encoder *encoder);
+@@ -431,10 +453,22 @@ struct drm_encoder_funcs {
+ #define DRM_CONNECTOR_MAX_UMODES 16
+ #define DRM_CONNECTOR_MAX_PROPERTY 16
+ #define DRM_CONNECTOR_LEN 32
+-#define DRM_CONNECTOR_MAX_ENCODER 2
++#define DRM_CONNECTOR_MAX_ENCODER 3
+ 
+ /**
+  * drm_encoder - central DRM encoder structure
++ * @dev: parent DRM device
++ * @head: list management
++ * @base: base KMS object
++ * @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h
++ * @possible_crtcs: bitmask of potential CRTC bindings
++ * @possible_clones: bitmask of potential sibling encoders for cloning
++ * @crtc: currently bound CRTC
++ * @funcs: control functions
++ * @helper_private: mid-layer private data
++ *
++ * CRTCs drive pixels to encoders, which convert them into signals
++ * appropriate for a given connector or set of connectors.
+  */
+ struct drm_encoder {
+ 	struct drm_device *dev;
+@@ -470,14 +504,37 @@ enum drm_connector_force {
+ 
+ /**
+  * drm_connector - central DRM connector control structure
+- * @crtc: CRTC this connector is currently connected to, NULL if none
++ * @dev: parent DRM device
++ * @kdev: kernel device for sysfs attributes
++ * @attr: sysfs attributes
++ * @head: list management
++ * @base: base KMS object
++ * @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
++ * @connector_type_id: index into connector type enum
+  * @interlace_allowed: can this connector handle interlaced modes?
+  * @doublescan_allowed: can this connector handle doublescan?
+- * @available_modes: modes available on this connector (from get_modes() + user)
+- * @initial_x: initial x position for this connector
+- * @initial_y: initial y position for this connector
+- * @status: connector connected?
++ * @modes: modes available on this connector (from fill_modes() + user)
++ * @status: one of the drm_connector_status enums (connected, not, or unknown)
++ * @probed_modes: list of modes derived directly from the display
++ * @display_info: information about attached display (e.g. from EDID)
+  * @funcs: connector control functions
++ * @user_modes: user added mode list
++ * @edid_blob_ptr: DRM property containing EDID if present
++ * @property_ids: property tracking for this connector
++ * @property_values: value pointers or data for properties
++ * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
++ * @dpms: current dpms state
++ * @helper_private: mid-layer private data
++ * @force: a %DRM_FORCE_<foo> state for forced mode sets
++ * @encoder_ids: valid encoders for this connector
++ * @encoder: encoder driving this connector, if any
++ * @eld: EDID-like data, if present
++ * @dvi_dual: dual link DVI, if found
++ * @max_tmds_clock: max clock rate, if found
++ * @latency_present: AV delay info from ELD, if found
++ * @video_latency: video latency info from ELD, if found
++ * @audio_latency: audio latency info from ELD, if found
++ * @null_edid_counter: track sinks that give us all zeros for the EDID
+  *
+  * Each connector may be connected to one or more CRTCs, or may be clonable by
+  * another connector if they can share a CRTC.  Each connector also has a specific
+@@ -498,7 +555,6 @@ struct drm_connector {
+ 	bool doublescan_allowed;
+ 	struct list_head modes; /* list of modes on this connector */
+ 
+-	int initial_x, initial_y;
+ 	enum drm_connector_status status;
+ 
+ 	/* these are modes added by probing with DDC or the BIOS */
+@@ -522,7 +578,6 @@ struct drm_connector {
+ 	/* forced on connector */
+ 	enum drm_connector_force force;
+ 	uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+-	uint32_t force_encoder_id;
+ 	struct drm_encoder *encoder; /* currently active encoder */
+ 
+ 	/* EDID bits */
+@@ -536,7 +591,71 @@ struct drm_connector {
+ };
+ 
+ /**
+- * struct drm_mode_set
++ * drm_plane_funcs - driver plane control functions
++ * @update_plane: update the plane configuration
++ * @disable_plane: shut down the plane
++ * @destroy: clean up plane resources
++ */
++struct drm_plane_funcs {
++	int (*update_plane)(struct drm_plane *plane,
++			    struct drm_crtc *crtc, struct drm_framebuffer *fb,
++			    int crtc_x, int crtc_y,
++			    unsigned int crtc_w, unsigned int crtc_h,
++			    uint32_t src_x, uint32_t src_y,
++			    uint32_t src_w, uint32_t src_h);
++	int (*disable_plane)(struct drm_plane *plane);
++	void (*destroy)(struct drm_plane *plane);
++};
++
++/**
++ * drm_plane - central DRM plane control structure
++ * @dev: DRM device this plane belongs to
++ * @head: for list management
++ * @base: base mode object
++ * @possible_crtcs: pipes this plane can be bound to
++ * @format_types: array of formats supported by this plane
++ * @format_count: number of formats supported
++ * @crtc: currently bound CRTC
++ * @fb: currently bound fb
++ * @gamma_size: size of gamma table
++ * @gamma_store: gamma correction table
++ * @enabled: enabled flag
++ * @funcs: helper functions
++ * @helper_private: storage for drver layer
++ */
++struct drm_plane {
++	struct drm_device *dev;
++	struct list_head head;
++
++	struct drm_mode_object base;
++
++	uint32_t possible_crtcs;
++	uint32_t *format_types;
++	uint32_t format_count;
++
++	struct drm_crtc *crtc;
++	struct drm_framebuffer *fb;
++
++	/* CRTC gamma size for reporting to userspace */
++	uint32_t gamma_size;
++	uint16_t *gamma_store;
++
++	bool enabled;
++
++	const struct drm_plane_funcs *funcs;
++	void *helper_private;
++};
++
++/**
++ * drm_mode_set - new values for a CRTC config change
++ * @head: list management
++ * @fb: framebuffer to use for new config
++ * @crtc: CRTC whose configuration we're about to change
++ * @mode: mode timings to use
++ * @x: position of this CRTC relative to @fb
++ * @y: position of this CRTC relative to @fb
++ * @connectors: array of connectors to drive with this CRTC if possible
++ * @num_connectors: size of @connectors array
+  *
+  * Represents a single crtc the connectors that it drives with what mode
+  * and from which framebuffer it scans out from.
+@@ -558,13 +677,33 @@ struct drm_mode_set {
+ };
+ 
+ /**
+- * struct drm_mode_config_funcs - configure CRTCs for a given screen layout
++ * struct drm_mode_config_funcs - basic driver provided mode setting functions
++ * @fb_create: create a new framebuffer object
++ * @output_poll_changed: function to handle output configuration changes
++ *
++ * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
++ * involve drivers.
+  */
+ struct drm_mode_config_funcs {
+-	struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd);
++	struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
++					     struct drm_file *file_priv,
++					     struct drm_mode_fb_cmd2 *mode_cmd);
+ 	void (*output_poll_changed)(struct drm_device *dev);
+ };
+ 
++/**
++ * drm_mode_group - group of mode setting resources for potential sub-grouping
++ * @num_crtcs: CRTC count
++ * @num_encoders: encoder count
++ * @num_connectors: connector count
++ * @id_list: list of KMS object IDs in this group
++ *
++ * Currently this simply tracks the global mode setting state.  But in the
++ * future it could allow groups of objects to be set aside into independent
++ * control groups for use by different user level processes (e.g. two X servers
++ * running simultaneously on different heads, each with their own mode
++ * configuration and freedom of mode setting).
++ */
+ struct drm_mode_group {
+ 	uint32_t num_crtcs;
+ 	uint32_t num_encoders;
+@@ -576,7 +715,30 @@ struct drm_mode_group {
+ 
+ /**
+  * drm_mode_config - Mode configuration control structure
++ * @mutex: mutex protecting KMS related lists and structures
++ * @idr_mutex: mutex for KMS ID allocation and management
++ * @crtc_idr: main KMS ID tracking object
++ * @num_fb: number of fbs available
++ * @fb_list: list of framebuffers available
++ * @num_connector: number of connectors on this device
++ * @connector_list: list of connector objects
++ * @num_encoder: number of encoders on this device
++ * @encoder_list: list of encoder objects
++ * @num_crtc: number of CRTCs on this device
++ * @crtc_list: list of CRTC objects
++ * @min_width: minimum pixel width on this device
++ * @min_height: minimum pixel height on this device
++ * @max_width: maximum pixel width on this device
++ * @max_height: maximum pixel height on this device
++ * @funcs: core driver provided mode setting functions
++ * @fb_base: base address of the framebuffer
++ * @poll_enabled: track polling status for this device
++ * @output_poll_work: delayed work for polling in process context
++ * @*_property: core property tracking
+  *
++ * Core mode resource tracking structure.  All CRTC, encoders, and connectors
++ * enumerated by the driver are added here, as are global properties.  Some
++ * global restrictions are also here, e.g. dimension restrictions.
+  */
+ struct drm_mode_config {
+ 	struct mutex mutex; /* protects configuration (mode lists etc.) */
+@@ -589,6 +751,8 @@ struct drm_mode_config {
+ 	struct list_head connector_list;
+ 	int num_encoder;
+ 	struct list_head encoder_list;
++	int num_plane;
++	struct list_head plane_list;
+ 
+ 	int num_crtc;
+ 	struct list_head crtc_list;
+@@ -632,6 +796,9 @@ struct drm_mode_config {
+ 	struct drm_property *scaling_mode_property;
+ 	struct drm_property *dithering_mode_property;
+ 	struct drm_property *dirty_info_property;
++
++	/* dumb ioctl parameters */
++	uint32_t preferred_depth, prefer_shadow;
+ };
+ 
+ #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+@@ -641,24 +808,39 @@ struct drm_mode_config {
+ #define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
+ #define obj_to_property(x) container_of(x, struct drm_property, base)
+ #define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
++#define obj_to_plane(x) container_of(x, struct drm_plane, base)
+ 
++struct drm_prop_enum_list {
++	int type;
++	char *name;
++};
+ 
+-extern void drm_crtc_init(struct drm_device *dev,
+-			  struct drm_crtc *crtc,
+-			  const struct drm_crtc_funcs *funcs);
++extern int drm_crtc_init(struct drm_device *dev,
++			 struct drm_crtc *crtc,
++			 const struct drm_crtc_funcs *funcs);
+ extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+ 
+-extern void drm_connector_init(struct drm_device *dev,
+-			    struct drm_connector *connector,
+-			    const struct drm_connector_funcs *funcs,
+-			    int connector_type);
++extern int drm_connector_init(struct drm_device *dev,
++			      struct drm_connector *connector,
++			      const struct drm_connector_funcs *funcs,
++			      int connector_type);
+ 
+ extern void drm_connector_cleanup(struct drm_connector *connector);
+-
+-extern void drm_encoder_init(struct drm_device *dev,
+-			     struct drm_encoder *encoder,
+-			     const struct drm_encoder_funcs *funcs,
+-			     int encoder_type);
++/* helper to unplug all connectors from sysfs for device */
++extern void drm_connector_unplug_all(struct drm_device *dev);
++
++extern int drm_encoder_init(struct drm_device *dev,
++			    struct drm_encoder *encoder,
++			    const struct drm_encoder_funcs *funcs,
++			    int encoder_type);
++
++extern int drm_plane_init(struct drm_device *dev,
++			  struct drm_plane *plane,
++			  unsigned long possible_crtcs,
++			  const struct drm_plane_funcs *funcs,
++			  const uint32_t *formats, uint32_t format_count,
++			  bool priv);
++extern void drm_plane_cleanup(struct drm_plane *plane);
+ 
+ extern void drm_encoder_cleanup(struct drm_encoder *encoder);
+ 
+@@ -675,6 +857,7 @@ extern struct edid *drm_get_edid(struct drm_connector *connector,
+ extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
+ extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
+ extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
++extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
+ extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
+ 						   const struct drm_display_mode *mode);
+ extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
+@@ -689,7 +872,7 @@ extern int drm_mode_height(struct drm_display_mode *mode);
+ /* for us by fb module */
+ extern int drm_mode_attachmode_crtc(struct drm_device *dev,
+ 				    struct drm_crtc *crtc,
+-				    struct drm_display_mode *mode);
++				    const struct drm_display_mode *mode);
+ extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
+ 
+ extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
+@@ -731,6 +914,13 @@ extern int drm_connector_attach_property(struct drm_connector *connector,
+ 				      struct drm_property *property, uint64_t init_val);
+ extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
+ 						const char *name, int num_values);
++extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
++					 const char *name,
++					 const struct drm_prop_enum_list *props,
++					 int num_values);
++struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
++					 const char *name,
++					 uint64_t min, uint64_t max);
+ extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
+ extern int drm_property_add_enum(struct drm_property *property, int index,
+ 				 uint64_t value, const char *name);
+@@ -746,24 +936,32 @@ extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+ 					     struct drm_encoder *encoder);
+ extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ 					   struct drm_encoder *encoder);
+-extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
++extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ 					 int gamma_size);
+ extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+ 		uint32_t id, uint32_t type);
+ /* IOCTLs */
+ extern int drm_mode_getresources(struct drm_device *dev,
+ 				 void *data, struct drm_file *file_priv);
+-
++extern int drm_mode_getplane_res(struct drm_device *dev, void *data,
++				   struct drm_file *file_priv);
+ extern int drm_mode_getcrtc(struct drm_device *dev,
+ 			    void *data, struct drm_file *file_priv);
+ extern int drm_mode_getconnector(struct drm_device *dev,
+ 			      void *data, struct drm_file *file_priv);
+ extern int drm_mode_setcrtc(struct drm_device *dev,
+ 			    void *data, struct drm_file *file_priv);
++extern int drm_mode_getplane(struct drm_device *dev,
++			       void *data, struct drm_file *file_priv);
++extern int drm_mode_setplane(struct drm_device *dev,
++			       void *data, struct drm_file *file_priv);
+ extern int drm_mode_cursor_ioctl(struct drm_device *dev,
+ 				void *data, struct drm_file *file_priv);
+ extern int drm_mode_addfb(struct drm_device *dev,
+ 			  void *data, struct drm_file *file_priv);
++extern int drm_mode_addfb2(struct drm_device *dev,
++			   void *data, struct drm_file *file_priv);
++extern uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
+ extern int drm_mode_rmfb(struct drm_device *dev,
+ 			 void *data, struct drm_file *file_priv);
+ extern int drm_mode_getfb(struct drm_device *dev,
+@@ -814,6 +1012,7 @@ extern int drm_add_modes_noedid(struct drm_connector *connector,
+ 				int hdisplay, int vdisplay);
+ 
+ extern int drm_edid_header_is_valid(const u8 *raw_edid);
++extern bool drm_edid_block_valid(u8 *raw_edid);
+ extern bool drm_edid_is_valid(struct edid *edid);
+ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
+ 					   int hsize, int vsize, int fresh);
+@@ -824,4 +1023,7 @@ extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ 				    void *data, struct drm_file *file_priv);
+ extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ 				      void *data, struct drm_file *file_priv);
++
++extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
++				 int *bpp);
+ #endif /* __DRM_CRTC_H__ */
+diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
+index 73b0712..37515d1 100644
+--- a/include/drm/drm_crtc_helper.h
++++ b/include/drm/drm_crtc_helper.h
+@@ -117,7 +117,7 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
+ extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+ 
+ extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
+-					  struct drm_mode_fb_cmd *mode_cmd);
++					  struct drm_mode_fb_cmd2 *mode_cmd);
+ 
+ static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
+ 				       const struct drm_crtc_helper_funcs *funcs)
+@@ -144,4 +144,7 @@ extern void drm_helper_hpd_irq_event(struct drm_device *dev);
+ 
+ extern void drm_kms_helper_poll_disable(struct drm_device *dev);
+ extern void drm_kms_helper_poll_enable(struct drm_device *dev);
++
++extern int drm_format_num_planes(uint32_t format);
++
+ #endif
+diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
+index 74ce916..bcb9a66 100644
+--- a/include/drm/drm_edid.h
++++ b/include/drm/drm_edid.h
+@@ -238,5 +238,6 @@ int drm_av_sync_delay(struct drm_connector *connector,
+ 		      struct drm_display_mode *mode);
+ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ 				     struct drm_display_mode *mode);
++int drm_load_edid_firmware(struct drm_connector *connector);
+ 
+ #endif /* __DRM_EDID_H__ */
+diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
+index 6e3076a..5120b01 100644
+--- a/include/drm/drm_fb_helper.h
++++ b/include/drm/drm_fb_helper.h
+@@ -35,7 +35,6 @@ struct drm_fb_helper;
+ #include <linux/kgdb.h>
+ 
+ struct drm_fb_helper_crtc {
+-	uint32_t crtc_id;
+ 	struct drm_mode_set mode_set;
+ 	struct drm_display_mode *desired_mode;
+ };
+@@ -74,7 +73,6 @@ struct drm_fb_helper {
+ 	int connector_count;
+ 	struct drm_fb_helper_connector **connector_info;
+ 	struct drm_fb_helper_funcs *funcs;
+-	int conn_limit;
+ 	struct fb_info *fbdev;
+ 	u32 pseudo_palette[17];
+ 	struct list_head kernel_fb_list;
+diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
+new file mode 100644
+index 0000000..bdf0152
+--- /dev/null
++++ b/include/drm/drm_fourcc.h
+@@ -0,0 +1,137 @@
++/*
++ * Copyright 2011 Intel Corporation
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef DRM_FOURCC_H
++#define DRM_FOURCC_H
++
++#include <linux/types.h>
++
++#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
++				 ((__u32)(c) << 16) | ((__u32)(d) << 24))
++
++#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
++
++/* color index */
++#define DRM_FORMAT_C8		fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
++
++/* 8 bpp RGB */
++#define DRM_FORMAT_RGB332	fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
++#define DRM_FORMAT_BGR233	fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
++
++/* 16 bpp RGB */
++#define DRM_FORMAT_XRGB4444	fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
++#define DRM_FORMAT_XBGR4444	fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
++#define DRM_FORMAT_RGBX4444	fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
++#define DRM_FORMAT_BGRX4444	fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
++
++#define DRM_FORMAT_ARGB4444	fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
++#define DRM_FORMAT_ABGR4444	fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
++#define DRM_FORMAT_RGBA4444	fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
++#define DRM_FORMAT_BGRA4444	fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
++
++#define DRM_FORMAT_XRGB1555	fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
++#define DRM_FORMAT_XBGR1555	fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
++#define DRM_FORMAT_RGBX5551	fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
++#define DRM_FORMAT_BGRX5551	fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
++
++#define DRM_FORMAT_ARGB1555	fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
++#define DRM_FORMAT_ABGR1555	fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
++#define DRM_FORMAT_RGBA5551	fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
++#define DRM_FORMAT_BGRA5551	fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
++
++#define DRM_FORMAT_RGB565	fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
++#define DRM_FORMAT_BGR565	fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
++
++/* 24 bpp RGB */
++#define DRM_FORMAT_RGB888	fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
++#define DRM_FORMAT_BGR888	fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
++
++/* 32 bpp RGB */
++#define DRM_FORMAT_XRGB8888	fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
++#define DRM_FORMAT_XBGR8888	fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
++#define DRM_FORMAT_RGBX8888	fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
++#define DRM_FORMAT_BGRX8888	fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
++
++#define DRM_FORMAT_ARGB8888	fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
++#define DRM_FORMAT_ABGR8888	fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
++#define DRM_FORMAT_RGBA8888	fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
++#define DRM_FORMAT_BGRA8888	fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
++
++#define DRM_FORMAT_XRGB2101010	fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
++#define DRM_FORMAT_XBGR2101010	fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
++#define DRM_FORMAT_RGBX1010102	fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
++#define DRM_FORMAT_BGRX1010102	fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
++
++#define DRM_FORMAT_ARGB2101010	fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
++#define DRM_FORMAT_ABGR2101010	fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
++#define DRM_FORMAT_RGBA1010102	fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
++#define DRM_FORMAT_BGRA1010102	fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
++
++/* packed YCbCr */
++#define DRM_FORMAT_YUYV		fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
++#define DRM_FORMAT_YVYU		fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
++#define DRM_FORMAT_UYVY		fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
++#define DRM_FORMAT_VYUY		fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
++
++#define DRM_FORMAT_AYUV		fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
++
++/*
++ * 2 plane YCbCr
++ * index 0 = Y plane, [7:0] Y
++ * index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
++ * or
++ * index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
++ */
++#define DRM_FORMAT_NV12		fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
++#define DRM_FORMAT_NV21		fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
++#define DRM_FORMAT_NV16		fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
++#define DRM_FORMAT_NV61		fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
++
++/* 2 non contiguous plane YCbCr */
++#define DRM_FORMAT_NV12M	fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
++#define DRM_FORMAT_NV12MT	fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
++
++/*
++ * 3 plane YCbCr
++ * index 0: Y plane, [7:0] Y
++ * index 1: Cb plane, [7:0] Cb
++ * index 2: Cr plane, [7:0] Cr
++ * or
++ * index 1: Cr plane, [7:0] Cr
++ * index 2: Cb plane, [7:0] Cb
++ */
++#define DRM_FORMAT_YUV410	fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
++#define DRM_FORMAT_YVU410	fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
++#define DRM_FORMAT_YUV411	fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
++#define DRM_FORMAT_YVU411	fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
++#define DRM_FORMAT_YUV420	fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
++#define DRM_FORMAT_YVU420	fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
++#define DRM_FORMAT_YUV422	fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
++#define DRM_FORMAT_YVU422	fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
++#define DRM_FORMAT_YUV444	fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
++#define DRM_FORMAT_YVU444	fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
++
++/* 3 non contiguous plane YCbCr */
++#define DRM_FORMAT_YUV420M	fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
++
++#endif /* DRM_FOURCC_H */
+diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
+index 7639f18..9242310 100644
+--- a/include/drm/drm_mode.h
++++ b/include/drm/drm_mode.h
+@@ -27,6 +27,8 @@
+ #ifndef _DRM_MODE_H
+ #define _DRM_MODE_H
+ 
++#include <linux/types.h>
++
+ #define DRM_DISPLAY_INFO_LEN	32
+ #define DRM_CONNECTOR_NAME_LEN	32
+ #define DRM_DISPLAY_MODE_LEN	32
+@@ -120,11 +122,48 @@ struct drm_mode_crtc {
+ 	struct drm_mode_modeinfo mode;
+ };
+ 
+-#define DRM_MODE_ENCODER_NONE	 0
+-#define DRM_MODE_ENCODER_DAC	 1
+-#define DRM_MODE_ENCODER_TMDS	 2
+-#define DRM_MODE_ENCODER_LVDS	 3
+-#define DRM_MODE_ENCODER_TVDAC	 4
++#define DRM_MODE_PRESENT_TOP_FIELD	(1<<0)
++#define DRM_MODE_PRESENT_BOTTOM_FIELD	(1<<1)
++
++/* Planes blend with or override other bits on the CRTC */
++struct drm_mode_set_plane {
++	__u32 plane_id;
++	__u32 crtc_id;
++	__u32 fb_id; /* fb object contains surface format type */
++	__u32 flags; /* see above flags */
++
++	/* Signed dest location allows it to be partially off screen */
++	__s32 crtc_x, crtc_y;
++	__u32 crtc_w, crtc_h;
++
++	/* Source values are 16.16 fixed point */
++	__u32 src_x, src_y;
++	__u32 src_h, src_w;
++};
++
++struct drm_mode_get_plane {
++	__u32 plane_id;
++
++	__u32 crtc_id;
++	__u32 fb_id;
++
++	__u32 possible_crtcs;
++	__u32 gamma_size;
++
++	__u32 count_format_types;
++	__u64 format_type_ptr;
++};
++
++struct drm_mode_get_plane_res {
++	__u64 plane_id_ptr;
++	__u32 count_planes;
++};
++
++#define DRM_MODE_ENCODER_NONE	0
++#define DRM_MODE_ENCODER_DAC	1
++#define DRM_MODE_ENCODER_TMDS	2
++#define DRM_MODE_ENCODER_LVDS	3
++#define DRM_MODE_ENCODER_TVDAC	4
+ #define DRM_MODE_ENCODER_VIRTUAL 5
+ 
+ struct drm_mode_get_encoder {
+@@ -231,6 +270,33 @@ struct drm_mode_fb_cmd {
+ 	__u32 handle;
+ };
+ 
++#define DRM_MODE_FB_INTERLACED	(1<<0) /* for interlaced framebuffers */
++
++struct drm_mode_fb_cmd2 {
++	__u32 fb_id;
++	__u32 width, height;
++	__u32 pixel_format; /* fourcc code from drm_fourcc.h */
++	__u32 flags; /* see above flags */
++
++	/*
++	 * In case of planar formats, this ioctl allows up to 4
++	 * buffer objects with offets and pitches per plane.
++	 * The pitch and offset order is dictated by the fourcc,
++	 * e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
++	 *
++	 *   YUV 4:2:0 image with a plane of 8 bit Y samples
++	 *   followed by an interleaved U/V plane containing
++	 *   8 bit 2x2 subsampled colour difference samples.
++	 *
++	 * So it would consist of Y as offset[0] and UV as
++	 * offeset[1].  Note that offset[0] will generally
++	 * be 0.
++	 */
++	__u32 handles[4];
++	__u32 pitches[4]; /* pitch for each plane */
++	__u32 offsets[4]; /* offset of each plane */
++};
++
+ #define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
+ #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
+ #define DRM_MODE_FB_DIRTY_FLAGS         0x03
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index def807c..7207a99 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -204,11 +204,57 @@
+ 	{0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x684C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+@@ -516,6 +562,30 @@
+ 	{0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ 	{0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ 	{0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9904, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9905, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9906, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
++	{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ 	{0, 0, 0}
+ 
+ #define r128_PCI_IDS \
+diff --git a/include/drm/drm_sman.h b/include/drm/drm_sman.h
+deleted file mode 100644
+index 08ecf83..0000000
+--- a/include/drm/drm_sman.h
++++ /dev/null
+@@ -1,176 +0,0 @@
+-/**************************************************************************
+- *
+- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+- * All Rights Reserved.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the
+- * "Software"), to deal in the Software without restriction, including
+- * without limitation the rights to use, copy, modify, merge, publish,
+- * distribute, sub license, and/or sell copies of the Software, and to
+- * permit persons to whom the Software is furnished to do so, subject to
+- * the following conditions:
+- *
+- * The above copyright notice and this permission notice (including the
+- * next paragraph) shall be included in all copies or substantial portions
+- * of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+- *
+- *
+- **************************************************************************/
+-/*
+- * Simple memory MANager interface that keeps track on allocate regions on a
+- * per "owner" basis. All regions associated with an "owner" can be released
+- * with a simple call. Typically if the "owner" exists. The owner is any
+- * "unsigned long" identifier. Can typically be a pointer to a file private
+- * struct or a context identifier.
+- *
+- * Authors:
+- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+- */
+-
+-#ifndef DRM_SMAN_H
+-#define DRM_SMAN_H
+-
+-#include "drmP.h"
+-#include "drm_hashtab.h"
+-
+-/*
+- * A class that is an abstration of a simple memory allocator.
+- * The sman implementation provides a default such allocator
+- * using the drm_mm.c implementation. But the user can replace it.
+- * See the SiS implementation, which may use the SiS FB kernel module
+- * for memory management.
+- */
+-
+-struct drm_sman_mm {
+-	/* private info. If allocated, needs to be destroyed by the destroy
+-	   function */
+-	void *private;
+-
+-	/* Allocate a memory block with given size and alignment.
+-	   Return an opaque reference to the memory block */
+-
+-	void *(*allocate) (void *private, unsigned long size,
+-			   unsigned alignment);
+-
+-	/* Free a memory block. "ref" is the opaque reference that we got from
+-	   the "alloc" function */
+-
+-	void (*free) (void *private, void *ref);
+-
+-	/* Free all resources associated with this allocator */
+-
+-	void (*destroy) (void *private);
+-
+-	/* Return a memory offset from the opaque reference returned from the
+-	   "alloc" function */
+-
+-	unsigned long (*offset) (void *private, void *ref);
+-};
+-
+-struct drm_memblock_item {
+-	struct list_head owner_list;
+-	struct drm_hash_item user_hash;
+-	void *mm_info;
+-	struct drm_sman_mm *mm;
+-	struct drm_sman *sman;
+-};
+-
+-struct drm_sman {
+-	struct drm_sman_mm *mm;
+-	int num_managers;
+-	struct drm_open_hash owner_hash_tab;
+-	struct drm_open_hash user_hash_tab;
+-	struct list_head owner_items;
+-};
+-
+-/*
+- * Take down a memory manager. This function should only be called after a
+- * successful init and after a call to drm_sman_cleanup.
+- */
+-
+-extern void drm_sman_takedown(struct drm_sman * sman);
+-
+-/*
+- * Allocate structures for a manager.
+- * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
+- * user_order is the log2 of the number of buckets in the user hash table.
+- *	    set this to approximately log2 of the max number of memory regions
+- *	    that will be allocated for _all_ pools together.
+- * owner_order is the log2 of the number of buckets in the owner hash table.
+- *	    set this to approximately log2 of
+- *	    the number of client file connections that will
+- *	    be using the manager.
+- *
+- */
+-
+-extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+-			 unsigned int user_order, unsigned int owner_order);
+-
+-/*
+- * Initialize a drm_mm.c allocator. Should be called only once for each
+- * manager unless a customized allogator is used.
+- */
+-
+-extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+-			      unsigned long start, unsigned long size);
+-
+-/*
+- * Initialize a customized allocator for one of the managers.
+- * (See the SiS module). The object pointed to by "allocator" is copied,
+- * so it can be destroyed after this call.
+- */
+-
+-extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
+-				struct drm_sman_mm * allocator);
+-
+-/*
+- * Allocate a memory block. Aligment is not implemented yet.
+- */
+-
+-extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
+-						unsigned int manager,
+-						unsigned long size,
+-						unsigned alignment,
+-						unsigned long owner);
+-/*
+- * Free a memory block identified by its user hash key.
+- */
+-
+-extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
+-
+-/*
+- * returns 1 iff there are no stale memory blocks associated with this owner.
+- * Typically called to determine if we need to idle the hardware and call
+- * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
+- * resources associated with owner.
+- */
+-
+-extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
+-
+-/*
+- * Frees all stale memory blocks associated with this owner. Note that this
+- * requires that the hardware is finished with all blocks, so the graphics engine
+- * should be idled before this call is made. This function also frees
+- * any resources associated with "owner" and should be called when owner
+- * is not going to be referenced anymore.
+- */
+-
+-extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
+-
+-/*
+- * Frees all stale memory blocks associated with the memory manager.
+- * See idling above.
+- */
+-
+-extern void drm_sman_cleanup(struct drm_sman * sman);
+-
+-#endif
+diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
+index 1205043..e478de4 100644
+--- a/include/drm/exynos_drm.h
++++ b/include/drm/exynos_drm.h
+@@ -74,9 +74,38 @@ struct drm_exynos_gem_mmap {
+ 	uint64_t mapped;
+ };
+ 
++/**
++ * A structure for user connection request of virtual display.
++ *
++ * @connection: indicate whether doing connetion or not by user.
++ * @extensions: if this value is 1 then the vidi driver would need additional
++ *	128bytes edid data.
++ * @edid: the edid data pointer from user side.
++ */
++struct drm_exynos_vidi_connection {
++	unsigned int connection;
++	unsigned int extensions;
++	uint64_t edid;
++};
++
++struct drm_exynos_plane_set_zpos {
++	__u32 plane_id;
++	__s32 zpos;
++};
++
++/* memory type definitions. */
++enum e_drm_exynos_gem_mem_type {
++	/* Physically Non-Continuous memory. */
++	EXYNOS_BO_NONCONTIG	= 1 << 0,
++	EXYNOS_BO_MASK		= EXYNOS_BO_NONCONTIG
++};
++
+ #define DRM_EXYNOS_GEM_CREATE		0x00
+ #define DRM_EXYNOS_GEM_MAP_OFFSET	0x01
+ #define DRM_EXYNOS_GEM_MMAP		0x02
++/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
++#define DRM_EXYNOS_PLANE_SET_ZPOS	0x06
++#define DRM_EXYNOS_VIDI_CONNECTION	0x07
+ 
+ #define DRM_IOCTL_EXYNOS_GEM_CREATE		DRM_IOWR(DRM_COMMAND_BASE + \
+ 		DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
+@@ -87,19 +116,70 @@ struct drm_exynos_gem_mmap {
+ #define DRM_IOCTL_EXYNOS_GEM_MMAP	DRM_IOWR(DRM_COMMAND_BASE + \
+ 		DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
+ 
++#define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS	DRM_IOWR(DRM_COMMAND_BASE + \
++		DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos)
++
++#define DRM_IOCTL_EXYNOS_VIDI_CONNECTION	DRM_IOWR(DRM_COMMAND_BASE + \
++		DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
++
++#ifdef __KERNEL__
++
+ /**
+- * Platform Specific Structure for DRM based FIMD.
++ * A structure for lcd panel information.
+  *
+  * @timing: default video mode for initializing
++ * @width_mm: physical size of lcd width.
++ * @height_mm: physical size of lcd height.
++ */
++struct exynos_drm_panel_info {
++	struct fb_videomode timing;
++	u32 width_mm;
++	u32 height_mm;
++};
++
++/**
++ * Platform Specific Structure for DRM based FIMD.
++ *
++ * @panel: default panel info for initializing
+  * @default_win: default window layer number to be used for UI.
+  * @bpp: default bit per pixel.
+  */
+ struct exynos_drm_fimd_pdata {
+-	struct fb_videomode		timing;
++	struct exynos_drm_panel_info panel;
+ 	u32				vidcon0;
+ 	u32				vidcon1;
+ 	unsigned int			default_win;
+ 	unsigned int			bpp;
+ };
+ 
+-#endif
++/**
++ * Platform Specific Structure for DRM based HDMI.
++ *
++ * @hdmi_dev: device point to specific hdmi driver.
++ * @mixer_dev: device point to specific mixer driver.
++ *
++ * this structure is used for common hdmi driver and each device object
++ * would be used to access specific device driver(hdmi or mixer driver)
++ */
++struct exynos_drm_common_hdmi_pd {
++	struct device *hdmi_dev;
++	struct device *mixer_dev;
++};
++
++/**
++ * Platform Specific Structure for DRM based HDMI core.
++ *
++ * @timing: default video mode for initializing
++ * @default_win: default window layer number to be used for UI.
++ * @bpp: default bit per pixel.
++ * @is_v13: set if hdmi version 13 is.
++ */
++struct exynos_drm_hdmi_pdata {
++	struct fb_videomode		timing;
++	unsigned int			default_win;
++	unsigned int			bpp;
++	unsigned int			is_v13:1;
++};
++
++#endif	/* __KERNEL__ */
++#endif	/* _EXYNOS_DRM_H_ */
+diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h
+new file mode 100644
+index 0000000..884613e
+--- /dev/null
++++ b/include/drm/gma_drm.h
+@@ -0,0 +1,91 @@
++/**************************************************************************
++ * Copyright (c) 2007-2011, Intel Corporation.
++ * All Rights Reserved.
++ * Copyright (c) 2008, Tungsten Graphics Inc.  Cedar Park, TX., USA.
++ * All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ **************************************************************************/
++
++#ifndef _PSB_DRM_H_
++#define _PSB_DRM_H_
++
++/*
++ *	Manage the LUT for an output
++ */
++struct drm_psb_dpst_lut_arg {
++	uint8_t lut[256];
++	int output_id;
++};
++
++/*
++ *	Validate modes
++ */
++struct drm_psb_mode_operation_arg {
++	u32 obj_id;
++	u16 operation;
++	struct drm_mode_modeinfo mode;
++	u64 data;
++};
++
++/*
++ *	Query the stolen memory for smarter management of
++ *	memory by the server
++ */
++struct drm_psb_stolen_memory_arg {
++	u32 base;
++	u32 size;
++};
++
++struct drm_psb_get_pipe_from_crtc_id_arg {
++	/** ID of CRTC being requested **/
++	u32 crtc_id;
++	/** pipe of requested CRTC **/
++	u32 pipe;
++};
++
++struct drm_psb_gem_create {
++	__u64 size;
++	__u32 handle;
++	__u32 flags;
++#define GMA_GEM_CREATE_STOLEN		1	/* Stolen memory can be used */
++};
++
++struct drm_psb_gem_mmap {
++	__u32 handle;
++	__u32 pad;
++	/**
++	 * Fake offset to use for subsequent mmap call
++	 *
++	 * This is a fixed-size type for 32/64 compatibility.
++	 */
++	__u64 offset;
++};
++
++/* Controlling the kernel modesetting buffers */
++
++#define DRM_GMA_GEM_CREATE	0x00		/* Create a GEM object */
++#define DRM_GMA_GEM_MMAP	0x01		/* Map GEM memory */
++#define DRM_GMA_STOLEN_MEMORY	0x02		/* Report stolen memory */
++#define DRM_GMA_2D_OP		0x03		/* Will be merged later */
++#define DRM_GMA_GAMMA		0x04		/* Set gamma table */
++#define DRM_GMA_ADB		0x05		/* Get backlight */
++#define DRM_GMA_DPST_BL		0x06		/* Set backlight */
++#define DRM_GMA_MODE_OPERATION	0x07		/* Mode validation/DC set */
++#define 	PSB_MODE_OPERATION_MODE_VALID	0x01
++#define DRM_GMA_GET_PIPE_FROM_CRTC_ID	0x08	/* CRTC to physical pipe# */
++
++
++#endif
+diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
+index 28c0d11..da929bb 100644
+--- a/include/drm/i915_drm.h
++++ b/include/drm/i915_drm.h
+@@ -198,6 +198,8 @@ typedef struct _drm_i915_sarea {
+ #define DRM_I915_OVERLAY_PUT_IMAGE	0x27
+ #define DRM_I915_OVERLAY_ATTRS	0x28
+ #define DRM_I915_GEM_EXECBUFFER2	0x29
++#define DRM_I915_GET_SPRITE_COLORKEY	0x2a
++#define DRM_I915_SET_SPRITE_COLORKEY	0x2b
+ 
+ #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+ #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+@@ -239,6 +241,8 @@ typedef struct _drm_i915_sarea {
+ #define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+ #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
+ #define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
++#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
++#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+ 
+ /* Allow drivers to submit batchbuffers directly to hardware, relying
+  * on the security mechanisms provided by hardware.
+@@ -291,6 +295,8 @@ typedef struct drm_i915_irq_wait {
+ #define I915_PARAM_HAS_COHERENT_RINGS	 13
+ #define I915_PARAM_HAS_EXEC_CONSTANTS	 14
+ #define I915_PARAM_HAS_RELAXED_DELTA	 15
++#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
++#define I915_PARAM_HAS_LLC     	 17
+ 
+ typedef struct drm_i915_getparam {
+ 	int param;
+@@ -653,6 +659,9 @@ struct drm_i915_gem_execbuffer2 {
+ 	__u64 rsvd2;
+ };
+ 
++/** Resets the SO write offset registers for transform feedback on gen7. */
++#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
++
+ struct drm_i915_gem_pin {
+ 	/** Handle of the buffer to be pinned. */
+ 	__u32 handle;
+@@ -844,4 +853,36 @@ struct drm_intel_overlay_attrs {
+ 	__u32 gamma5;
+ };
+ 
++/*
++ * Intel sprite handling
++ *
++ * Color keying works with a min/mask/max tuple.  Both source and destination
++ * color keying is allowed.
++ *
++ * Source keying:
++ * Sprite pixels within the min & max values, masked against the color channels
++ * specified in the mask field, will be transparent.  All other pixels will
++ * be displayed on top of the primary plane.  For RGB surfaces, only the min
++ * and mask fields will be used; ranged compares are not allowed.
++ *
++ * Destination keying:
++ * Primary plane pixels that match the min value, masked against the color
++ * channels specified in the mask field, will be replaced by corresponding
++ * pixels from the sprite plane.
++ *
++ * Note that source & destination keying are exclusive; only one can be
++ * active on a given plane.
++ */
++
++#define I915_SET_COLORKEY_NONE		(1<<0) /* disable color key matching */
++#define I915_SET_COLORKEY_DESTINATION	(1<<1)
++#define I915_SET_COLORKEY_SOURCE	(1<<2)
++struct drm_intel_sprite_colorkey {
++	__u32 plane_id;
++	__u32 min_value;
++	__u32 channel_mask;
++	__u32 max_value;
++	__u32 flags;
++};
++
+ #endif				/* _I915_DRM_H_ */
+diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
+index b174620..923afb5 100644
+--- a/include/drm/intel-gtt.h
++++ b/include/drm/intel-gtt.h
+@@ -15,6 +15,10 @@ const struct intel_gtt {
+ 	unsigned int needs_dmar : 1;
+ 	/* Whether we idle the gpu before mapping/unmapping */
+ 	unsigned int do_idle_maps : 1;
++	/* Share the scratch page dma with ppgtts. */
++	dma_addr_t scratch_page_dma;
++	/* for ppgtt PDE access */
++	u32 __iomem *gtt;
+ } *intel_gtt_get(void);
+ 
+ void intel_gtt_chipset_flush(void);
+@@ -40,4 +44,8 @@ void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
+ /* flag for GFDT type */
+ #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+ 
++#ifdef CONFIG_INTEL_IOMMU
++extern int intel_iommu_gfx_mapped;
++#endif
++
+ #endif
+diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
+index be94be6..7c491b4 100644
+--- a/include/drm/radeon_drm.h
++++ b/include/drm/radeon_drm.h
+@@ -509,6 +509,7 @@ typedef struct {
+ #define DRM_RADEON_GEM_SET_TILING	0x28
+ #define DRM_RADEON_GEM_GET_TILING	0x29
+ #define DRM_RADEON_GEM_BUSY		0x2a
++#define DRM_RADEON_GEM_VA		0x2b
+ 
+ #define DRM_IOCTL_RADEON_CP_INIT    DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
+ #define DRM_IOCTL_RADEON_CP_START   DRM_IO(  DRM_COMMAND_BASE + DRM_RADEON_CP_START)
+@@ -550,6 +551,7 @@ typedef struct {
+ #define DRM_IOCTL_RADEON_GEM_SET_TILING	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
+ #define DRM_IOCTL_RADEON_GEM_GET_TILING	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
+ #define DRM_IOCTL_RADEON_GEM_BUSY	DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
++#define DRM_IOCTL_RADEON_GEM_VA		DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
+ 
+ typedef struct drm_radeon_init {
+ 	enum {
+@@ -802,13 +804,23 @@ struct drm_radeon_gem_create {
+ 	uint32_t	flags;
+ };
+ 
+-#define RADEON_TILING_MACRO       0x1
+-#define RADEON_TILING_MICRO       0x2
+-#define RADEON_TILING_SWAP_16BIT  0x4
+-#define RADEON_TILING_SWAP_32BIT  0x8
+-#define RADEON_TILING_SURFACE     0x10 /* this object requires a surface
+-					* when mapped - i.e. front buffer */
+-#define RADEON_TILING_MICRO_SQUARE 0x20
++#define RADEON_TILING_MACRO				0x1
++#define RADEON_TILING_MICRO				0x2
++#define RADEON_TILING_SWAP_16BIT			0x4
++#define RADEON_TILING_SWAP_32BIT			0x8
++/* this object requires a surface when mapped - i.e. front buffer */
++#define RADEON_TILING_SURFACE				0x10
++#define RADEON_TILING_MICRO_SQUARE			0x20
++#define RADEON_TILING_EG_BANKW_SHIFT			8
++#define RADEON_TILING_EG_BANKW_MASK			0xf
++#define RADEON_TILING_EG_BANKH_SHIFT			12
++#define RADEON_TILING_EG_BANKH_MASK			0xf
++#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT	16
++#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK		0xf
++#define RADEON_TILING_EG_TILE_SPLIT_SHIFT		24
++#define RADEON_TILING_EG_TILE_SPLIT_MASK		0xf
++#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT	28
++#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK	0xf
+ 
+ struct drm_radeon_gem_set_tiling {
+ 	uint32_t	handle;
+@@ -872,12 +884,40 @@ struct drm_radeon_gem_pwrite {
+ 	uint64_t data_ptr;
+ };
+ 
++#define RADEON_VA_MAP			1
++#define RADEON_VA_UNMAP			2
++
++#define RADEON_VA_RESULT_OK		0
++#define RADEON_VA_RESULT_ERROR		1
++#define RADEON_VA_RESULT_VA_EXIST	2
++
++#define RADEON_VM_PAGE_VALID		(1 << 0)
++#define RADEON_VM_PAGE_READABLE		(1 << 1)
++#define RADEON_VM_PAGE_WRITEABLE	(1 << 2)
++#define RADEON_VM_PAGE_SYSTEM		(1 << 3)
++#define RADEON_VM_PAGE_SNOOPED		(1 << 4)
++
++struct drm_radeon_gem_va {
++	uint32_t		handle;
++	uint32_t		operation;
++	uint32_t		vm_id;
++	uint32_t		flags;
++	uint64_t		offset;
++};
++
+ #define RADEON_CHUNK_ID_RELOCS	0x01
+ #define RADEON_CHUNK_ID_IB	0x02
+ #define RADEON_CHUNK_ID_FLAGS	0x03
++#define RADEON_CHUNK_ID_CONST_IB	0x04
+ 
+ /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
+ #define RADEON_CS_KEEP_TILING_FLAGS 0x01
++#define RADEON_CS_USE_VM            0x02
++/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
++#define RADEON_CS_RING_GFX          0
++#define RADEON_CS_RING_COMPUTE      1
++/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
++/* 0 = normal, + = higher priority, - = lower priority */
+ 
+ struct drm_radeon_cs_chunk {
+ 	uint32_t		chunk_id;
+@@ -885,6 +925,9 @@ struct drm_radeon_cs_chunk {
+ 	uint64_t		chunk_data;
+ };
+ 
++/* drm_radeon_cs_reloc.flags */
++#define RADEON_RELOC_DONT_SYNC		0x01
++
+ struct drm_radeon_cs_reloc {
+ 	uint32_t		handle;
+ 	uint32_t		read_domains;
+@@ -916,6 +959,12 @@ struct drm_radeon_cs {
+ #define RADEON_INFO_NUM_TILE_PIPES	0x0b /* tile pipes for r600+ */
+ #define RADEON_INFO_FUSION_GART_WORKING	0x0c /* fusion writes to GTT were broken before this */
+ #define RADEON_INFO_BACKEND_MAP		0x0d /* pipe to backend map, needed by mesa */
++/* virtual address start, va < start are reserved by the kernel */
++#define RADEON_INFO_VA_START		0x0e
++/* maximum size of ib using the virtual memory cs */
++#define RADEON_INFO_IB_VM_MAX_SIZE	0x0f
++/* max pipes - needed for compute shaders */
++#define RADEON_INFO_MAX_PIPES		0x10
+ 
+ struct drm_radeon_info {
+ 	uint32_t		request;
+diff --git a/include/drm/sis_drm.h b/include/drm/sis_drm.h
+index 30f7b38..035b804 100644
+--- a/include/drm/sis_drm.h
++++ b/include/drm/sis_drm.h
+@@ -64,4 +64,8 @@ typedef struct {
+ 	unsigned int offset, size;
+ } drm_sis_fb_t;
+ 
++struct sis_file_private {
++	struct list_head obj_list;
++};
++
+ #endif				/* __SIS_DRM_H__ */
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index 42e3469..974c8f8 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -122,17 +122,12 @@ struct ttm_mem_reg {
+  * be mmapped by user space. Each of these bos occupy a slot in the
+  * device address space, that can be used for normal vm operations.
+  *
+- * @ttm_bo_type_user: These are user-space memory areas that are made
+- * available to the GPU by mapping the buffer pages into the GPU aperture
+- * space. These buffers cannot be mmaped from the device address space.
+- *
+  * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
+  * but they cannot be accessed from user-space. For kernel-only use.
+  */
+ 
+ enum ttm_bo_type {
+ 	ttm_bo_type_device,
+-	ttm_bo_type_user,
+ 	ttm_bo_type_kernel
+ };
+ 
+@@ -434,9 +429,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
+  * -EBUSY if the buffer is busy and no_wait is true.
+  * -ERESTARTSYS if interrupted by a signal.
+  */
+-
+ extern int
+ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
++
+ /**
+  * ttm_bo_synccpu_write_release:
+  *
+@@ -447,6 +442,22 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
+ 
+ /**
++ * ttm_bo_acc_size
++ *
++ * @bdev: Pointer to a ttm_bo_device struct.
++ * @bo_size: size of the buffer object in byte.
++ * @struct_size: size of the structure holding buffer object datas
++ *
++ * Returns size to account for a buffer object
++ */
++size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
++		       unsigned long bo_size,
++		       unsigned struct_size);
++size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
++			   unsigned long bo_size,
++			   unsigned struct_size);
++
++/**
+  * ttm_bo_init
+  *
+  * @bdev: Pointer to a ttm_bo_device struct.
+@@ -493,6 +504,7 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
+ 			struct file *persistent_swap_storage,
+ 			size_t acc_size,
+ 			void (*destroy) (struct ttm_buffer_object *));
++
+ /**
+  * ttm_bo_synccpu_object_init
+  *
+diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
+index 94eb143..d43e892 100644
+--- a/include/drm/ttm/ttm_bo_driver.h
++++ b/include/drm/ttm/ttm_bo_driver.h
+@@ -43,36 +43,9 @@ struct ttm_backend;
+ 
+ struct ttm_backend_func {
+ 	/**
+-	 * struct ttm_backend_func member populate
+-	 *
+-	 * @backend: Pointer to a struct ttm_backend.
+-	 * @num_pages: Number of pages to populate.
+-	 * @pages: Array of pointers to ttm pages.
+-	 * @dummy_read_page: Page to be used instead of NULL pages in the
+-	 * array @pages.
+-	 * @dma_addrs: Array of DMA (bus) address of the ttm pages.
+-	 *
+-	 * Populate the backend with ttm pages. Depending on the backend,
+-	 * it may or may not copy the @pages array.
+-	 */
+-	int (*populate) (struct ttm_backend *backend,
+-			 unsigned long num_pages, struct page **pages,
+-			 struct page *dummy_read_page,
+-			 dma_addr_t *dma_addrs);
+-	/**
+-	 * struct ttm_backend_func member clear
+-	 *
+-	 * @backend: Pointer to a struct ttm_backend.
+-	 *
+-	 * This is an "unpopulate" function. Release all resources
+-	 * allocated with populate.
+-	 */
+-	void (*clear) (struct ttm_backend *backend);
+-
+-	/**
+ 	 * struct ttm_backend_func member bind
+ 	 *
+-	 * @backend: Pointer to a struct ttm_backend.
++	 * @ttm: Pointer to a struct ttm_tt.
+ 	 * @bo_mem: Pointer to a struct ttm_mem_reg describing the
+ 	 * memory type and location for binding.
+ 	 *
+@@ -80,46 +53,29 @@ struct ttm_backend_func {
+ 	 * indicated by @bo_mem. This function should be able to handle
+ 	 * differences between aperture and system page sizes.
+ 	 */
+-	int (*bind) (struct ttm_backend *backend, struct ttm_mem_reg *bo_mem);
++	int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+ 
+ 	/**
+ 	 * struct ttm_backend_func member unbind
+ 	 *
+-	 * @backend: Pointer to a struct ttm_backend.
++	 * @ttm: Pointer to a struct ttm_tt.
+ 	 *
+ 	 * Unbind previously bound backend pages. This function should be
+ 	 * able to handle differences between aperture and system page sizes.
+ 	 */
+-	int (*unbind) (struct ttm_backend *backend);
++	int (*unbind) (struct ttm_tt *ttm);
+ 
+ 	/**
+ 	 * struct ttm_backend_func member destroy
+ 	 *
+-	 * @backend: Pointer to a struct ttm_backend.
++	 * @ttm: Pointer to a struct ttm_tt.
+ 	 *
+-	 * Destroy the backend.
++	 * Destroy the backend. This will be call back from ttm_tt_destroy so
++	 * don't call ttm_tt_destroy from the callback or infinite loop.
+ 	 */
+-	void (*destroy) (struct ttm_backend *backend);
+-};
+-
+-/**
+- * struct ttm_backend
+- *
+- * @bdev: Pointer to a struct ttm_bo_device.
+- * @flags: For driver use.
+- * @func: Pointer to a struct ttm_backend_func that describes
+- * the backend methods.
+- *
+- */
+-
+-struct ttm_backend {
+-	struct ttm_bo_device *bdev;
+-	uint32_t flags;
+-	struct ttm_backend_func *func;
++	void (*destroy) (struct ttm_tt *ttm);
+ };
+ 
+-#define TTM_PAGE_FLAG_USER            (1 << 1)
+-#define TTM_PAGE_FLAG_USER_DIRTY      (1 << 2)
+ #define TTM_PAGE_FLAG_WRITE           (1 << 3)
+ #define TTM_PAGE_FLAG_SWAPPED         (1 << 4)
+ #define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
+@@ -135,23 +91,18 @@ enum ttm_caching_state {
+ /**
+  * struct ttm_tt
+  *
++ * @bdev: Pointer to a struct ttm_bo_device.
++ * @func: Pointer to a struct ttm_backend_func that describes
++ * the backend methods.
+  * @dummy_read_page: Page to map where the ttm_tt page array contains a NULL
+  * pointer.
+  * @pages: Array of pages backing the data.
+- * @first_himem_page: Himem pages are put last in the page array, which
+- * enables us to run caching attribute changes on only the first part
+- * of the page array containing lomem pages. This is the index of the
+- * first himem page.
+- * @last_lomem_page: Index of the last lomem page in the page array.
+  * @num_pages: Number of pages in the page array.
+  * @bdev: Pointer to the current struct ttm_bo_device.
+  * @be: Pointer to the ttm backend.
+- * @tsk: The task for user ttm.
+- * @start: virtual address for user ttm.
+  * @swap_storage: Pointer to shmem struct file for swap storage.
+  * @caching_state: The current caching state of the pages.
+  * @state: The current binding state of the pages.
+- * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
+  *
+  * This is a structure holding the pages, caching- and aperture binding
+  * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+@@ -159,16 +110,14 @@ enum ttm_caching_state {
+  */
+ 
+ struct ttm_tt {
++	struct ttm_bo_device *bdev;
++	struct ttm_backend_func *func;
+ 	struct page *dummy_read_page;
+ 	struct page **pages;
+-	long first_himem_page;
+-	long last_lomem_page;
+ 	uint32_t page_flags;
+ 	unsigned long num_pages;
+ 	struct ttm_bo_global *glob;
+ 	struct ttm_backend *be;
+-	struct task_struct *tsk;
+-	unsigned long start;
+ 	struct file *swap_storage;
+ 	enum ttm_caching_state caching_state;
+ 	enum {
+@@ -176,7 +125,23 @@ struct ttm_tt {
+ 		tt_unbound,
+ 		tt_unpopulated,
+ 	} state;
++};
++
++/**
++ * struct ttm_dma_tt
++ *
++ * @ttm: Base ttm_tt struct.
++ * @dma_address: The DMA (bus) addresses of the pages
++ * @pages_list: used by some page allocation backend
++ *
++ * This is a structure holding the pages, caching- and aperture binding
++ * status for a buffer object that isn't backed by fixed (VRAM / AGP)
++ * memory.
++ */
++struct ttm_dma_tt {
++	struct ttm_tt ttm;
+ 	dma_addr_t *dma_address;
++	struct list_head pages_list;
+ };
+ 
+ #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0)	/* Fixed (on-card) PCI memory */
+@@ -351,15 +316,42 @@ struct ttm_mem_type_manager {
+ 
+ struct ttm_bo_driver {
+ 	/**
+-	 * struct ttm_bo_driver member create_ttm_backend_entry
++	 * ttm_tt_create
+ 	 *
+-	 * @bdev: The buffer object device.
++	 * @bdev: pointer to a struct ttm_bo_device:
++	 * @size: Size of the data needed backing.
++	 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
++	 * @dummy_read_page: See struct ttm_bo_device.
+ 	 *
+-	 * Create a driver specific struct ttm_backend.
++	 * Create a struct ttm_tt to back data with system memory pages.
++	 * No pages are actually allocated.
++	 * Returns:
++	 * NULL: Out of memory.
+ 	 */
++	struct ttm_tt *(*ttm_tt_create)(struct ttm_bo_device *bdev,
++					unsigned long size,
++					uint32_t page_flags,
++					struct page *dummy_read_page);
+ 
+-	struct ttm_backend *(*create_ttm_backend_entry)
+-	 (struct ttm_bo_device *bdev);
++	/**
++	 * ttm_tt_populate
++	 *
++	 * @ttm: The struct ttm_tt to contain the backing pages.
++	 *
++	 * Allocate all backing pages
++	 * Returns:
++	 * -ENOMEM: Out of memory.
++	 */
++	int (*ttm_tt_populate)(struct ttm_tt *ttm);
++
++	/**
++	 * ttm_tt_unpopulate
++	 *
++	 * @ttm: The struct ttm_tt to contain the backing pages.
++	 *
++	 * Free all backing page
++	 */
++	void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
+ 
+ 	/**
+ 	 * struct ttm_bo_driver member invalidate_caches
+@@ -477,9 +469,6 @@ struct ttm_bo_global_ref {
+  * @dummy_read_page: Pointer to a dummy page used for mapping requests
+  * of unpopulated pages.
+  * @shrink: A shrink callback object used for buffer object swap.
+- * @ttm_bo_extra_size: Extra size (sizeof(struct ttm_buffer_object) excluded)
+- * used by a buffer object. This is excluding page arrays and backing pages.
+- * @ttm_bo_size: This is @ttm_bo_extra_size + sizeof(struct ttm_buffer_object).
+  * @device_list_mutex: Mutex protecting the device list.
+  * This mutex is held while traversing the device list for pm options.
+  * @lru_lock: Spinlock protecting the bo subsystem lru lists.
+@@ -497,8 +486,6 @@ struct ttm_bo_global {
+ 	struct ttm_mem_global *mem_glob;
+ 	struct page *dummy_read_page;
+ 	struct ttm_mem_shrink shrink;
+-	size_t ttm_bo_extra_size;
+-	size_t ttm_bo_size;
+ 	struct mutex device_list_mutex;
+ 	spinlock_t lru_lock;
+ 
+@@ -600,8 +587,9 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
+ }
+ 
+ /**
+- * ttm_tt_create
++ * ttm_tt_init
+  *
++ * @ttm: The struct ttm_tt.
+  * @bdev: pointer to a struct ttm_bo_device:
+  * @size: Size of the data needed backing.
+  * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+@@ -612,28 +600,22 @@ ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
+  * Returns:
+  * NULL: Out of memory.
+  */
+-extern struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev,
+-				    unsigned long size,
+-				    uint32_t page_flags,
+-				    struct page *dummy_read_page);
++extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
++			unsigned long size, uint32_t page_flags,
++			struct page *dummy_read_page);
++extern int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
++			   unsigned long size, uint32_t page_flags,
++			   struct page *dummy_read_page);
+ 
+ /**
+- * ttm_tt_set_user:
++ * ttm_tt_fini
+  *
+- * @ttm: The struct ttm_tt to populate.
+- * @tsk: A struct task_struct for which @start is a valid user-space address.
+- * @start: A valid user-space address.
+- * @num_pages: Size in pages of the user memory area.
++ * @ttm: the ttm_tt structure.
+  *
+- * Populate a struct ttm_tt with a user-space memory area after first pinning
+- * the pages backing it.
+- * Returns:
+- * !0: Error.
++ * Free memory of ttm_tt structure
+  */
+-
+-extern int ttm_tt_set_user(struct ttm_tt *ttm,
+-			   struct task_struct *tsk,
+-			   unsigned long start, unsigned long num_pages);
++extern void ttm_tt_fini(struct ttm_tt *ttm);
++extern void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
+ 
+ /**
+  * ttm_ttm_bind:
+@@ -646,20 +628,11 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
+ extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+ 
+ /**
+- * ttm_tt_populate:
+- *
+- * @ttm: The struct ttm_tt to contain the backing pages.
+- *
+- * Add backing pages to all of @ttm
+- */
+-extern int ttm_tt_populate(struct ttm_tt *ttm);
+-
+-/**
+  * ttm_ttm_destroy:
+  *
+  * @ttm: The struct ttm_tt.
+  *
+- * Unbind, unpopulate and destroy a struct ttm_tt.
++ * Unbind, unpopulate and destroy common struct ttm_tt.
+  */
+ extern void ttm_tt_destroy(struct ttm_tt *ttm);
+ 
+@@ -673,19 +646,13 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm);
+ extern void ttm_tt_unbind(struct ttm_tt *ttm);
+ 
+ /**
+- * ttm_ttm_destroy:
++ * ttm_tt_swapin:
+  *
+  * @ttm: The struct ttm_tt.
+- * @index: Index of the desired page.
+- *
+- * Return a pointer to the struct page backing @ttm at page
+- * index @index. If the page is unpopulated, one will be allocated to
+- * populate that index.
+  *
+- * Returns:
+- * NULL on OOM.
++ * Swap in a previously swap out ttm_tt.
+  */
+-extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
++extern int ttm_tt_swapin(struct ttm_tt *ttm);
+ 
+ /**
+  * ttm_tt_cache_flush:
+@@ -1046,17 +1013,25 @@ extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
+ #include <linux/agp_backend.h>
+ 
+ /**
+- * ttm_agp_backend_init
++ * ttm_agp_tt_create
+  *
+  * @bdev: Pointer to a struct ttm_bo_device.
+  * @bridge: The agp bridge this device is sitting on.
++ * @size: Size of the data needed backing.
++ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
++ * @dummy_read_page: See struct ttm_bo_device.
++ *
+  *
+  * Create a TTM backend that uses the indicated AGP bridge as an aperture
+  * for TT memory. This function uses the linux agpgart interface to
+  * bind and unbind memory backing a ttm_tt.
+  */
+-extern struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
+-						struct agp_bridge_data *bridge);
++extern struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
++					struct agp_bridge_data *bridge,
++					unsigned long size, uint32_t page_flags,
++					struct page *dummy_read_page);
++int ttm_agp_tt_populate(struct ttm_tt *ttm);
++void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
+ #endif
+ 
+ #endif
+diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
+index 26c1f78..d6d1da4 100644
+--- a/include/drm/ttm/ttm_memory.h
++++ b/include/drm/ttm/ttm_memory.h
+@@ -30,6 +30,7 @@
+ 
+ #include <linux/workqueue.h>
+ #include <linux/spinlock.h>
++#include <linux/bug.h>
+ #include <linux/wait.h>
+ #include <linux/errno.h>
+ #include <linux/kobject.h>
+diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
+index 129de12..5fe2740 100644
+--- a/include/drm/ttm/ttm_page_alloc.h
++++ b/include/drm/ttm/ttm_page_alloc.h
+@@ -30,45 +30,70 @@
+ #include "ttm_memory.h"
+ 
+ /**
+- * Get count number of pages from pool to pages list.
++ * Initialize pool allocator.
++ */
++int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
++/**
++ * Free pool allocator.
++ */
++void ttm_page_alloc_fini(void);
++
++/**
++ * ttm_pool_populate:
++ *
++ * @ttm: The struct ttm_tt to contain the backing pages.
+  *
+- * @pages: head of empty linked list where pages are filled.
+- * @flags: ttm flags for page allocation.
+- * @cstate: ttm caching state for the page.
+- * @count: number of pages to allocate.
+- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
++ * Add backing pages to all of @ttm
+  */
+-int ttm_get_pages(struct list_head *pages,
+-		  int flags,
+-		  enum ttm_caching_state cstate,
+-		  unsigned count,
+-		  dma_addr_t *dma_address);
++extern int ttm_pool_populate(struct ttm_tt *ttm);
++
+ /**
+- * Put linked list of pages to pool.
++ * ttm_pool_unpopulate:
++ *
++ * @ttm: The struct ttm_tt which to free backing pages.
+  *
+- * @pages: list of pages to free.
+- * @page_count: number of pages in the list. Zero can be passed for unknown
+- * count.
+- * @flags: ttm flags for page allocation.
+- * @cstate: ttm caching state.
+- * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
++ * Free all pages of @ttm
+  */
+-void ttm_put_pages(struct list_head *pages,
+-		   unsigned page_count,
+-		   int flags,
+-		   enum ttm_caching_state cstate,
+-		   dma_addr_t *dma_address);
++extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
++
++/**
++ * Output the state of pools to debugfs file
++ */
++extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
++
++
++#ifdef CONFIG_SWIOTLB
+ /**
+  * Initialize pool allocator.
+  */
+-int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
++int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
++
+ /**
+  * Free pool allocator.
+  */
+-void ttm_page_alloc_fini(void);
++void ttm_dma_page_alloc_fini(void);
+ 
+ /**
+  * Output the state of pools to debugfs file
+  */
+-extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
++extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
++
++extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
++extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
++
++#else
++static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
++					  unsigned max_pages)
++{
++	return -ENODEV;
++}
++
++static inline void ttm_dma_page_alloc_fini(void) { return; }
++
++static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
++{
++	return 0;
++}
++#endif
++
+ #endif
+diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
+index fd11a5b..79b3b6e 100644
+--- a/include/drm/via_drm.h
++++ b/include/drm/via_drm.h
+@@ -274,4 +274,8 @@ typedef struct drm_via_dmablit {
+ 	drm_via_blitsync_t sync;
+ } drm_via_dmablit_t;
+ 
++struct via_file_private {
++	struct list_head obj_list;
++};
++
+ #endif				/* _VIA_DRM_H_ */

Added: dists/sid/linux/debian/patches/features/all/drm/drm-i915-revert-switch-to-simple_open.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/drm-i915-revert-switch-to-simple_open.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,59 @@
+From eb5a32214c4f95bbceaf481a53289086274b7275 Mon Sep 17 00:00:00 2001
+From: Julien Cristau <jcristau at debian.org>
+Date: Wed, 22 Aug 2012 18:54:53 +0200
+Subject: [PATCH 1/7] drm/i915: revert switch to simple_open
+
+This reverts part of commit 234e340582901211f40d8c732afc49f0630ecf05
+---
+ drivers/gpu/drm/i915/i915_debugfs.c |   14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index e6162a1..8747da0 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -1505,6 +1505,14 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
+ 	return 0;
+ }
+ 
++static int
++i915_debugfs_common_open(struct inode *inode,
++			 struct file *filp)
++{
++	filp->private_data = inode->i_private;
++	return 0;
++}
++
+ static ssize_t
+ i915_wedged_read(struct file *filp,
+ 		 char __user *ubuf,
+@@ -1555,7 +1563,7 @@ i915_wedged_write(struct file *filp,
+ 
+ static const struct file_operations i915_wedged_fops = {
+ 	.owner = THIS_MODULE,
+-	.open = simple_open,
++	.open = i915_debugfs_common_open,
+ 	.read = i915_wedged_read,
+ 	.write = i915_wedged_write,
+ 	.llseek = default_llseek,
+@@ -1617,7 +1625,7 @@ i915_max_freq_write(struct file *filp,
+ 
+ static const struct file_operations i915_max_freq_fops = {
+ 	.owner = THIS_MODULE,
+-	.open = simple_open,
++	.open = i915_debugfs_common_open,
+ 	.read = i915_max_freq_read,
+ 	.write = i915_max_freq_write,
+ 	.llseek = default_llseek,
+@@ -1688,7 +1696,7 @@ i915_cache_sharing_write(struct file *filp,
+ 
+ static const struct file_operations i915_cache_sharing_fops = {
+ 	.owner = THIS_MODULE,
+-	.open = simple_open,
++	.open = i915_debugfs_common_open,
+ 	.read = i915_cache_sharing_read,
+ 	.write = i915_cache_sharing_write,
+ 	.llseek = default_llseek,
+-- 
+1.7.10.4
+

Added: dists/sid/linux/debian/patches/features/all/drm/drm-revert-part-of-2c9ede55ecec58099b72e4bb8eab719f3.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/drm-revert-part-of-2c9ede55ecec58099b72e4bb8eab719f3.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,26 @@
+From fb11bb3d75a8643ef017b94c904d3691b5dc5a4c Mon Sep 17 00:00:00 2001
+From: Julien Cristau <jcristau at debian.org>
+Date: Sun, 24 Jun 2012 12:02:01 +0200
+Subject: [PATCH 5/7] drm: revert part of
+ 2c9ede55ecec58099b72e4bb8eab719f32f72c31
+
+---
+ drivers/gpu/drm/drm_sysfs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
+index 5a7bd51..e7101be 100644
+--- a/drivers/gpu/drm/drm_sysfs.c
++++ b/drivers/gpu/drm/drm_sysfs.c
+@@ -72,7 +72,7 @@ static int drm_class_resume(struct device *dev)
+ 	return 0;
+ }
+ 
+-static char *drm_devnode(struct device *dev, umode_t *mode)
++static char *drm_devnode(struct device *dev, mode_t *mode)
+ {
+ 	return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
+ }
+-- 
+1.7.10.4
+

Added: dists/sid/linux/debian/patches/features/all/drm/genpatch.py
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/genpatch.py	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+
+import sys
+sys.path.append("debian/lib/python")
+
+import os.path, re, subprocess
+
+from debian_linux.debian import Changelog, VersionLinux
+
+def main(repo, drm_version):
+    changelog = Changelog(version=VersionLinux)[0]
+
+    args = ['git', 'diff',
+            'v' + changelog.version.linux_upstream_full, 'v' + drm_version,
+            '--', 'drivers/char/agp', 'drivers/gpu/drm', 'include/drm']
+    with open('debian/patches/features/all/drm/drm-3.4.patch', 'w') as patch:
+        subprocess.check_call(args, cwd=repo, stdout=patch)
+
+if __name__ == '__main__':
+    if len(sys.argv) != 3:
+        print >>sys.stderr, "Usage: %s REPO DRM-VERSION" % sys.argv[0]
+        sys.exit(2)
+    main(sys.argv[1], sys.argv[2])

Added: dists/sid/linux/debian/patches/features/all/drm/i2c-export-bit-banging-algo-functions.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/i2c-export-bit-banging-algo-functions.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,64 @@
+From: Daniel Vetter <daniel.vetter at ffwll.ch>
+Date: Tue, 28 Feb 2012 00:39:39 +0100
+Subject: [PATCH 3/7] i2c: export bit-banging algo functions
+
+commit b0209b39951868069710c1e39ca14add9fa77ada upstream.
+
+i915 has a hw i2c controller (gmbus) but for a bunch of stupid reasons
+we need to be able to fall back to the bit-banging algo on gpio pins.
+
+The current code sets up a 2nd i2c controller for the same i2c bus using
+the bit-banging algo. This has a bunch of issues, the major one being
+that userspace can directly access this fallback i2c adaptor behind
+the drivers back.
+
+But we need to frob a few registers before and after using fallback
+gpio bit-banging, so this horribly fails.
+
+The new plan is to only set up one i2c adaptor and transparently fall
+back to bit-banging by directly calling the xfer function of the bit-
+banging algo in the i2c core.
+
+To make that possible, export the 2 i2c algo functions.
+
+v2: As suggested by Jean Delvare, simply export the i2c_bit_algo
+vtable instead of the individual functions.
+
+Acked-by: Jean Delvare <khali at linux-fr.org>
+Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
+---
+ drivers/i2c/algos/i2c-algo-bit.c |    3 ++-
+ include/linux/i2c-algo-bit.h     |    1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
+index 24f94f4..acba1c6 100644
+--- a/drivers/i2c/algos/i2c-algo-bit.c
++++ b/drivers/i2c/algos/i2c-algo-bit.c
+@@ -616,10 +616,11 @@ static u32 bit_func(struct i2c_adapter *adap)
+ 
+ /* -----exported algorithm data: -------------------------------------	*/
+ 
+-static const struct i2c_algorithm i2c_bit_algo = {
++const struct i2c_algorithm i2c_bit_algo = {
+ 	.master_xfer	= bit_xfer,
+ 	.functionality	= bit_func,
+ };
++EXPORT_SYMBOL(i2c_bit_algo);
+ 
+ /*
+  * registering functions to load algorithms at runtime
+diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h
+index 4f98148..584ffa0 100644
+--- a/include/linux/i2c-algo-bit.h
++++ b/include/linux/i2c-algo-bit.h
+@@ -49,5 +49,6 @@ struct i2c_algo_bit_data {
+ 
+ int i2c_bit_add_bus(struct i2c_adapter *);
+ int i2c_bit_add_numbered_bus(struct i2c_adapter *);
++extern const struct i2c_algorithm i2c_bit_algo;
+ 
+ #endif /* _LINUX_I2C_ALGO_BIT_H */
+-- 
+1.7.10.4
+

Added: dists/sid/linux/debian/patches/features/all/drm/revert-vm-add-vm_munmap-helper-function.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/revert-vm-add-vm_munmap-helper-function.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,24 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: Revert "VM: add vm_munmap() helper function" in i810
+Date: Sun, 02 Dec 2012 23:49:49 +0000
+
+This reverts commit a46ef99d80817a167477ed1c8b4d90ee0c2e726f 'VM: add
+"vm_munmap()" helper function' and commit
+bfce281c287a427d0841fadf5d59242757b4e620 'kill mm argument of
+vm_munmap()' in drivers/gpu/drm/i810/i810_dma.c
+
+--- a/drivers/gpu/drm/i810/i810_dma.c
++++ b/drivers/gpu/drm/i810/i810_dma.c
+@@ -157,8 +157,11 @@
+ 	if (buf_priv->currently_mapped != I810_BUF_MAPPED)
+ 		return -EINVAL;
+ 
+-	retcode = vm_munmap((unsigned long)buf_priv->virtual,
++	down_write(&current->mm->mmap_sem);
++	retcode = do_munmap(current->mm,
++			    (unsigned long)buf_priv->virtual,
+ 			    (size_t) buf->total);
++	up_write(&current->mm->mmap_sem);
+ 
+ 	buf_priv->currently_mapped = I810_BUF_UNMAPPED;
+ 	buf_priv->virtual = NULL;

Added: dists/sid/linux/debian/patches/features/all/drm/slab-introduce-kmalloc_array.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/slab-introduce-kmalloc_array.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,58 @@
+From: Xi Wang <xi.wang at gmail.com>
+Date: Mon, 5 Mar 2012 15:14:41 -0800
+Subject: slab: introduce kmalloc_array()
+
+commit a8203725dfded5c1f79dca3368a4a273e24b59bb upstream.
+
+Introduce a kmalloc_array() wrapper that performs integer overflow
+checking without zeroing the memory.
+
+Suggested-by: Andrew Morton <akpm at linux-foundation.org>
+Suggested-by: Jens Axboe <axboe at kernel.dk>
+Signed-off-by: Xi Wang <xi.wang at gmail.com>
+Cc: Dan Carpenter <dan.carpenter at oracle.com>
+Acked-by: David Rientjes <rientjes at google.com>
+Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
+Signed-off-by: Pekka Enberg <penberg at kernel.org>
+---
+ include/linux/slab.h |   17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index 573c809..a595dce 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -190,7 +190,7 @@ size_t ksize(const void *);
+ #endif
+ 
+ /**
+- * kcalloc - allocate memory for an array. The memory is set to zero.
++ * kmalloc_array - allocate memory for an array.
+  * @n: number of elements.
+  * @size: element size.
+  * @flags: the type of memory to allocate.
+@@ -240,11 +240,22 @@ size_t ksize(const void *);
+  * for general use, and so are not documented here. For a full list of
+  * potential flags, always refer to linux/gfp.h.
+  */
+-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
++static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+ {
+ 	if (size != 0 && n > ULONG_MAX / size)
+ 		return NULL;
+-	return __kmalloc(n * size, flags | __GFP_ZERO);
++	return __kmalloc(n * size, flags);
++}
++
++/**
++ * kcalloc - allocate memory for an array. The memory is set to zero.
++ * @n: number of elements.
++ * @size: element size.
++ * @flags: the type of memory to allocate (see kmalloc).
++ */
++static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
++{
++	return kmalloc_array(n, size, flags | __GFP_ZERO);
+ }
+ 
+ #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)

Added: dists/sid/linux/debian/patches/features/all/drm/swiotlb-Expose-swiotlb_nr_tlb-function-to-modules.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux/debian/patches/features/all/drm/swiotlb-Expose-swiotlb_nr_tlb-function-to-modules.patch	Wed Feb  6 03:52:57 2013	(r19786)
@@ -0,0 +1,74 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>
+Date: Thu, 11 Aug 2011 16:50:56 -0400
+Subject: [PATCH 4/7] swiotlb: Expose swiotlb_nr_tlb function to modules
+
+commit f21ffe9f6da6d3a69c518b7345c198d48d941c34 upstream.
+
+As a mechanism to detect whether SWIOTLB is enabled or not.
+We also fix the spelling - it was swioltb instead of
+swiotlb.
+
+CC: FUJITA Tomonori <fujita.tomonori at lab.ntt.co.jp>
+[v1: Ripped out swiotlb_enabled]
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>
+---
+ drivers/xen/swiotlb-xen.c |    2 +-
+ include/linux/swiotlb.h   |    2 +-
+ lib/swiotlb.c             |    5 +++--
+ 3 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 284798a..19e6a20 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -153,7 +153,7 @@ void __init xen_swiotlb_init(int verbose)
+ 	char *m = NULL;
+ 	unsigned int repeat = 3;
+ 
+-	nr_tbl = swioltb_nr_tbl();
++	nr_tbl = swiotlb_nr_tbl();
+ 	if (nr_tbl)
+ 		xen_io_tlb_nslabs = nr_tbl;
+ 	else {
+diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
+index 445702c..e872526 100644
+--- a/include/linux/swiotlb.h
++++ b/include/linux/swiotlb.h
+@@ -24,7 +24,7 @@ extern int swiotlb_force;
+ 
+ extern void swiotlb_init(int verbose);
+ extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
+-extern unsigned long swioltb_nr_tbl(void);
++extern unsigned long swiotlb_nr_tbl(void);
+ 
+ /*
+  * Enumeration for sync targets
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index 99093b3..058935e 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -110,11 +110,11 @@ setup_io_tlb_npages(char *str)
+ __setup("swiotlb=", setup_io_tlb_npages);
+ /* make io_tlb_overflow tunable too? */
+ 
+-unsigned long swioltb_nr_tbl(void)
++unsigned long swiotlb_nr_tbl(void)
+ {
+ 	return io_tlb_nslabs;
+ }
+-
++EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
+ /* Note that this doesn't work with highmem page */
+ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
+ 				      volatile void *address)
+@@ -321,6 +321,7 @@ void __init swiotlb_free(void)
+ 		free_bootmem_late(__pa(io_tlb_start),
+ 				  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+ 	}
++	io_tlb_nslabs = 0;
+ }
+ 
+ static int is_swiotlb_buffer(phys_addr_t paddr)
+-- 
+1.7.10.4
+

Modified: dists/sid/linux/debian/patches/series
==============================================================================
--- dists/sid/linux/debian/patches/series	Sat Feb  2 01:19:29 2013	(r19785)
+++ dists/sid/linux/debian/patches/series	Wed Feb  6 03:52:57 2013	(r19786)
@@ -36,7 +36,6 @@
 debian/sched-autogroup-disabled.patch
 features/all/cgroups-Allow-memory-cgroup-support-to-be-included-b.patch
 debian/cgroups-Document-the-Debian-memory-resource-controll.patch
-bugfix/ia64/nouveau-ACPI-support-is-dependent-on-X86.patch
 features/x86/x86-Add-amilo-rfkill-driver-for-some-Fujitsu-Siemens.patch
 
 bugfix/arm/ixp4xx_iobe.patch
@@ -355,17 +354,6 @@
 features/arm/ARM-fix-Kconfig-warning-for-HAVE_BPF_JIT.patch
 features/arm/net-drop-NET-dependency-from-HAVE_BPF_JIT.patch
 
-# nouveau update to support Fermi (NVC0+) acceleration
-features/all/fermi-accel/drm-nouveau-ttm-always-do-buffer-moves-on-kernel-cha.patch
-features/all/fermi-accel/drm-nouveau-remove-subchannel-names-from-places-wher.patch
-features/all/fermi-accel/drm-nouveau-move-fence-sequence-check-to-start-of-lo.patch
-features/all/fermi-accel/drm-nvc0-disp-reimplement-flip-completion-method-as-.patch
-features/all/fermi-accel/drm-nouveau-remove-m2mf-creation-on-userspace-channe.patch
-features/all/fermi-accel/drm-nouveau-inform-userspace-of-relaxed-kernel-subch.patch
-features/all/fermi-accel/drm-nouveau-oops-increase-channel-dispc_vma-to-4.patch
-features/all/fermi-accel/drm-nvd0-disp-ignore-clock-set-if-no-pclk.patch
-features/all/fermi-accel/drm-nouveau-bump-version-to-1.0.0.patch
-
 bugfix/all/net-e100-ucode-is-optional-in-some-cases.patch
 
 features/all/debugfs-add-mode-uid-and-gid-options.patch
@@ -409,7 +397,6 @@
 bugfix/all/usb-Add-quirk-detection-based-on-interface-informati.patch
 bugfix/all/usb-Add-USB_QUIRK_RESET_RESUME-for-all-Logitech-UVC-.patch
 bugfix/alpha/alpha-use-large-data-model.diff
-bugfix/x86/drm-i915-i8xx-interrupt-handler.patch
 features/arm/ahci-Add-JMicron-362-device-IDs.patch
 bugfix/all/speakup-lower-default-software-speech-rate.patch
 debian/perf-hide-abi-change-in-3.2.30.patch
@@ -418,10 +405,8 @@
 debian/xfrm-avoid-ABI-change-in-3.2.31.patch
 debian/fs-writeback-avoid-ABI-change-in-3.2.32.patch
 bugfix/x86/asus-laptop-Do-not-call-HWRS-on-init.patch
-bugfix/x86/drm-i915-Only-kick-out-vesafb-if-we-takeover-the-fbc.patch
 
 features/all/xen/microcode.patch
-debian/radeon-firmware-is-required-for-drm-and-kms-on-r600-onward.patch
 debian/ALSA-avoid-ABI-change-in-3.2.34.patch
 debian/rtnetlink-avoid-ABI-change-in-3.2.34.patch
 debian/mm-avoid-ABI-change-in-3.2.33.patch
@@ -482,6 +467,24 @@
 features/all/alx/remove-atl1c-devices-from-alx.patch
 features/all/alx/mark-as-staging.patch
 features/arm/rtc-s35390a-wakealarm.patch
+
+# drm backport from 3.4
+features/all/drm/i2c-export-bit-banging-algo-functions.patch
+features/all/drm/swiotlb-Expose-swiotlb_nr_tlb-function-to-modules.patch
+features/all/drm/slab-introduce-kmalloc_array.patch
+features/all/drm/drm-3.4.patch
+features/all/drm/drm-i915-revert-switch-to-simple_open.patch
+features/all/drm/Revert-VM-add-vm_mmap-helper-function.patch
+features/all/drm/drm-revert-part-of-2c9ede55ecec58099b72e4bb8eab719f3.patch
+features/all/drm/Remove-gma500-driver-from-staging.patch
+features/all/drm/Revert-drm-base-prime-dma-buf-support-v5.patch
+features/all/drm/revert-vm-add-vm_munmap-helper-function.patch
+
+bugfix/x86/drm-i915-kick-any-firmware-framebuffers-before-claim.patch
+bugfix/x86/drm-i915-Only-kick-out-vesafb-if-we-takeover-the-fbc.patch
+bugfix/ia64/nouveau-ACPI-support-is-dependent-on-X86.patch
+bugfix/x86/drm-i915-i8xx-interrupt-handler.patch
+debian/radeon-firmware-is-required-for-drm-and-kms-on-r600-onward.patch
 bugfix/x86/drm-i915-panel-invert-brightness-via-parameter.patch
 bugfix/x86/drm-i915-panel-invert-brightness-via-quirk.patch
 bugfix/x86/drm-i915-panel-invert-brightness-acer-aspire-5734z.patch



More information about the Kernel-svn-changes mailing list